content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = r"""
Created on 02/03/2020
"""
from enum import Enum
__all__ = ["UpscaleMode", "MergeMode"]
class MergeMode(Enum):
Concat = 0
Add = 1
class UpscaleMode(Enum):
FractionalTranspose = 0
Upsample = 1
if __name__ == "__main__":
assert MergeMode.Concat in MergeMode
assert not (UpscaleMode.Upsample in MergeMode)
assert UpscaleMode.Upsample in UpscaleMode
assert not (MergeMode.Add in UpscaleMode)
# assert not (0 in UpscaleMode)
# assert not (3 in UpscaleMode)
|
nilq/baby-python
|
python
|
"""Refiner is the refinement module public interface. RefinerFactory is
what should usually be used to construct a Refiner."""
import copy
import logging
import math
import psutil
import libtbx
from dxtbx.model.experiment_list import ExperimentList
from libtbx.phil import parse
import dials.util
from dials.algorithms.refinement import DialsRefineConfigError
from dials.algorithms.refinement.constraints import ConstraintManagerFactory
from dials.algorithms.refinement.engine import AdaptLstbx, refinery_phil_str
from dials.algorithms.refinement.parameterisation import (
build_prediction_parameterisation,
)
from dials.algorithms.refinement.parameterisation import (
phil_str as parameterisation_phil_str,
)
from dials.algorithms.refinement.parameterisation.autoreduce import AutoReduce
from dials.algorithms.refinement.parameterisation.parameter_report import (
ParameterReporter,
)
from dials.algorithms.refinement.prediction.managed_predictors import (
ExperimentsPredictorFactory,
)
from dials.algorithms.refinement.refinement_helpers import ordinal_number, string_sel
from dials.algorithms.refinement.reflection_manager import ReflectionManagerFactory
from dials.algorithms.refinement.reflection_manager import (
phil_str as reflections_phil_str,
)
from dials.algorithms.refinement.restraints import RestraintsParameterisation
from dials.algorithms.refinement.target import TargetFactory
from dials.algorithms.refinement.target import phil_str as target_phil_str
from dials.array_family import flex
logger = logging.getLogger(__name__)
# The include scope directive does not work here. For example:
#
# include scope dials.algorithms.refinement.outlier_detection.phil_scope
#
# results in:
#
# AttributeError: 'module' object has no attribute 'refinement'
#
# to work around this, just include external phil scopes as strings
format_data = {
"reflections_phil": reflections_phil_str,
"target_phil": target_phil_str,
"parameterisation_phil": parameterisation_phil_str,
"refinery_phil": refinery_phil_str,
}
phil_scope = parse(
"""
refinement
.help = "Parameters to configure the refinement"
{
mp
.expert_level = 2
{
nproc = 1
.type = int(value_min=1)
.help = "The number of processes to use. Not all choices of refinement"
"engine support nproc > 1. Where multiprocessing is possible,"
"it is helpful only in certain circumstances, so this is not"
"recommended for typical use."
}
parameterisation
.help = "Parameters to control the parameterisation of experimental models"
{
%(parameterisation_phil)s
}
%(refinery_phil)s
target
.help = "Parameters to configure the target function"
.expert_level = 1
{
%(target_phil)s
}
reflections
.help = "Parameters used by the reflection manager"
{
%(reflections_phil)s
}
}
"""
% format_data,
process_includes=True,
)
RAD2DEG = 180 / math.pi
def _copy_experiments_for_refining(experiments):
"""
Make a partial copy of experiments, copying properties used in refinement.
Any experiment property that can be altered by refinement e.g. Beam,
Detector and Goniometer will be deep-copied, whereas anything that the
refiner doesn't touch (e.g. Scan, ImageSet) will be left as original
references.
This makes it safe to pass an object into the refiner or get an object
out of the refiner without having to worry about your copy being
unexpectedly altered, but saves time by avoiding the copying of potentially
expensive experiment properties (e.g. ImageSet and its attributes).
Args
experiments (Experiment or ExperimentList or Iterable[Experiment]):
Returns:
ExperimentList: The copied experiments in new ExperimentList
"""
# Look for a non-list e.g. a single experiment and convert to a list
if not hasattr(experiments, "__iter__"):
experiments = [experiments]
out_list = ExperimentList()
# Save a map of object id to copies so shared objects remain shared
id_memo = {}
# Copy each experiment individually
for experiment in experiments:
# Be inclusive about the initial copy
new_exp = copy.copy(experiment)
# Ensure every 'refined' attribute is uniquely copied
for model in ["beam", "goniometer", "detector", "crystal"]:
original = getattr(experiment, model)
if id(original) not in id_memo:
id_memo[id(original)] = copy.deepcopy(original)
# assign the new copy to the experiment
setattr(new_exp, model, id_memo[id(original)])
# Collect this together
out_list.append(new_exp)
return out_list
def _trim_scans_to_observations(experiments, reflections):
"""Check the range of each scan matches the range of observed data and
trim the scan to match if it is too wide"""
# Get observed image number (or at least observed phi)
obs_phi = reflections["xyzobs.mm.value"].parts()[2]
try:
obs_z = reflections["xyzobs.px.value"].parts()[2]
except KeyError:
obs_z = None
# Get z_min and z_max from shoeboxes if present
try:
shoebox = reflections["shoebox"]
bb = shoebox.bounding_boxes()
z_min, z_max = bb.parts()[4:]
if z_min.all_eq(0):
shoebox = None
except KeyError:
shoebox = None
for iexp, exp in enumerate(experiments):
sel = reflections["id"] == iexp
isel = sel.iselection()
if obs_z is not None:
exp_z = obs_z.select(isel)
else:
exp_phi = obs_phi.select(isel)
exp_z = exp.scan.get_array_index_from_angle(exp_phi, deg=False)
start, stop = exp.scan.get_array_range()
min_exp_z = flex.min(exp_z)
max_exp_z = flex.max(exp_z)
# If observed array range is correct, skip to next experiment
if int(min_exp_z) == start and int(math.ceil(max_exp_z)) == stop:
continue
# Extend array range either by shoebox size, or 0.5 deg if shoebox not available
if shoebox is not None:
obs_start = flex.min(z_min.select(isel))
obs_stop = flex.max(z_max.select(isel))
else:
obs_start = int(min_exp_z)
obs_stop = int(math.ceil(max_exp_z))
half_deg_in_images = int(math.ceil(0.5 / exp.scan.get_oscillation()[1]))
obs_start -= half_deg_in_images
obs_stop += half_deg_in_images
# Convert obs_start, obs_stop from position in array range to integer image number
if obs_start > start or obs_stop < stop:
im_start = max(start, obs_start) + 1
im_stop = min(obs_stop, stop)
logger.warning(
"The reflections for experiment {0} do not fill the scan range. The scan will be trimmed "
"to images {{{1},{2}}} to match the range of observed data".format(
iexp, im_start, im_stop
)
)
# Ensure the scan is unique to this experiment and set trimmed limits
exp.scan = copy.deepcopy(exp.scan)
new_oscillation = (
exp.scan.get_angle_from_image_index(im_start),
exp.scan.get_oscillation()[1],
)
exp.scan.set_image_range((im_start, im_stop))
exp.scan.set_oscillation(new_oscillation)
return experiments
class RefinerFactory:
"""Factory class to create refiners"""
@staticmethod
def _filter_reflections(reflections):
"""Return a copy of the input reflections filtered to keep only
those columns that are required by refinement"""
cols = [
"id",
"miller_index",
"panel",
"s1",
"xyzobs.mm.value",
"xyzobs.px.value",
"xyzcal.px",
"xyzobs.mm.variance",
"flags",
"shoebox",
"delpsical.weights",
]
# NB xyzobs.px.value & xyzcal.px required by SauterPoon outlier rejector
# NB delpsical.weights is used by ExternalDelPsiWeightingStrategy
rt = flex.reflection_table()
# copy columns to the new table. Could use the select method
# for this except that 's1' is optional in the input so would want
# to copy that in like this if present anyway
for k in cols:
if k in reflections:
rt[k] = reflections[k]
return rt
@classmethod
def from_parameters_data_experiments(cls, params, reflections, experiments):
# TODO Checks on the input
# E.g. does every experiment contain at least one overlapping model with at
# least one other experiment? Are all the experiments either rotation series
# or stills (the combination of both not yet supported)?
# copy the experiments
experiments = _copy_experiments_for_refining(experiments)
# copy and filter the reflections
reflections = cls._filter_reflections(reflections)
return cls._build_components(params, reflections, experiments)
@classmethod
def _build_components(cls, params, reflections, experiments):
"""low level build"""
# Currently a refinement job can only have one parameterisation of the
# prediction equation. This can either be of the XYDelPsi (stills) type, the
# XYPhi (scans) type or the scan-varying XYPhi type with a varying crystal
# model
single_as_still = params.refinement.parameterisation.treat_single_image_as_still
exps_are_stills = []
for exp in experiments:
if exp.scan is None:
exps_are_stills.append(True)
elif exp.scan.get_num_images() == 1:
if single_as_still:
exps_are_stills.append(True)
elif exp.scan.is_still():
exps_are_stills.append(True)
else:
exps_are_stills.append(False)
else:
if exp.scan.get_oscillation()[1] <= 0.0:
raise DialsRefineConfigError("Cannot refine a zero-width scan")
exps_are_stills.append(False)
# check experiment types are consistent
if not all(exps_are_stills[0] == e for e in exps_are_stills):
raise DialsRefineConfigError("Cannot refine a mixture of stills and scans")
do_stills = exps_are_stills[0]
# If experiments are stills, ensure scan-varying refinement won't be attempted
if do_stills:
params.refinement.parameterisation.scan_varying = False
# Refiner does not accept scan_varying=Auto. This is a special case for
# doing macrocycles of refinement in dials.refine.
if params.refinement.parameterisation.scan_varying is libtbx.Auto:
params.refinement.parameterisation.scan_varying = False
# Trim scans and calculate reflection block_width if required for scan-varying refinement
if (
params.refinement.parameterisation.scan_varying
and params.refinement.parameterisation.trim_scan_to_observations
):
experiments = _trim_scans_to_observations(experiments, reflections)
from dials.algorithms.refinement.reflection_manager import BlockCalculator
block_calculator = BlockCalculator(experiments, reflections)
if params.refinement.parameterisation.compose_model_per == "block":
reflections = block_calculator.per_width(
params.refinement.parameterisation.block_width, deg=True
)
elif params.refinement.parameterisation.compose_model_per == "image":
reflections = block_calculator.per_image()
logger.debug("\nBuilding reflection manager")
logger.debug("Input reflection list size = %d observations", len(reflections))
# create reflection manager
refman = ReflectionManagerFactory.from_parameters_reflections_experiments(
params.refinement.reflections, reflections, experiments, do_stills
)
logger.debug(
"Number of observations that pass initial inclusion criteria = %d",
refman.get_accepted_refs_size(),
)
sample_size = refman.get_sample_size()
if sample_size > 0:
logger.debug("Working set size = %d observations", sample_size)
logger.debug("Reflection manager built\n")
# configure use of sparse data types
params = cls.config_sparse(params, experiments)
do_sparse = params.refinement.parameterisation.sparse
# create managed reflection predictor
ref_predictor = ExperimentsPredictorFactory.from_experiments(
experiments,
force_stills=do_stills,
spherical_relp=params.refinement.parameterisation.spherical_relp_model,
)
# Predict for the managed observations, set columns for residuals and set
# the used_in_refinement flag to the predictions
obs = refman.get_obs()
ref_predictor(obs)
x_obs, y_obs, phi_obs = obs["xyzobs.mm.value"].parts()
x_calc, y_calc, phi_calc = obs["xyzcal.mm"].parts()
obs["x_resid"] = x_calc - x_obs
obs["y_resid"] = y_calc - y_obs
obs["phi_resid"] = phi_calc - phi_obs
# determine whether to do basic centroid analysis to automatically
# determine outlier rejection block
if params.refinement.reflections.outlier.block_width is libtbx.Auto:
ca = refman.get_centroid_analyser()
analysis = ca(calc_average_residuals=False, calc_periodograms=False)
else:
analysis = None
# Now predictions and centroid analysis are available, so we can finalise
# the reflection manager
refman.finalise(analysis)
# Create model parameterisations
logger.debug("Building prediction equation parameterisation")
pred_param = build_prediction_parameterisation(
params.refinement.parameterisation, experiments, refman, do_stills
)
# Build a constraints manager, if requested
cmf = ConstraintManagerFactory(params, pred_param)
constraints_manager = cmf()
# Test for parameters that have too little data to refine and act accordingly
autoreduce = AutoReduce(
params.refinement.parameterisation.auto_reduction,
pred_param,
refman,
constraints_manager,
cmf,
)
autoreduce()
# if reduction was done, constraints_manager will have changed
constraints_manager = autoreduce.constraints_manager
# Build a restraints parameterisation (if requested).
# Only unit cell restraints are supported at the moment.
restraints_parameterisation = cls.config_restraints(
params.refinement.parameterisation, pred_param
)
# Parameter reporting
logger.debug("Prediction equation parameterisation built")
logger.debug("Parameter order : name mapping")
for i, e in enumerate(pred_param.get_param_names()):
logger.debug("Parameter %03d : %s", i + 1, e)
param_reporter = ParameterReporter(
pred_param.get_detector_parameterisations(),
pred_param.get_beam_parameterisations(),
pred_param.get_crystal_orientation_parameterisations(),
pred_param.get_crystal_unit_cell_parameterisations(),
pred_param.get_goniometer_parameterisations(),
)
# Create target function
logger.debug("Building target function")
target = cls.config_target(
params.refinement.target,
experiments,
refman,
ref_predictor,
pred_param,
restraints_parameterisation,
do_stills,
do_sparse,
)
logger.debug("Target function built")
# create refinery
logger.debug("Building refinement engine")
refinery = cls.config_refinery(params, target, pred_param, constraints_manager)
logger.debug("Refinement engine built")
nparam = len(pred_param)
ndim = target.dim
nref = len(refman.get_matches())
logger.info(
"There are %s parameters to refine against %s reflections in %s dimensions",
nparam,
nref,
ndim,
)
if not params.refinement.parameterisation.sparse and isinstance(
refinery, AdaptLstbx
):
dense_jacobian_gigabytes = (
nparam * nref * ndim * flex.double.element_size()
) / 1e9
avail_memory_gigabytes = psutil.virtual_memory().available / 1e9
# Report if the Jacobian requires a large amount of storage
if (
dense_jacobian_gigabytes > 0.2 * avail_memory_gigabytes
or dense_jacobian_gigabytes > 0.5
):
logger.info(
"Storage of the Jacobian matrix requires %.1f GB",
dense_jacobian_gigabytes,
)
# build refiner interface and return
if params.refinement.parameterisation.scan_varying:
refiner = ScanVaryingRefiner
else:
refiner = Refiner
return refiner(
experiments, pred_param, param_reporter, refman, target, refinery
)
@staticmethod
def config_sparse(params, experiments):
"""Configure whether to use sparse datatypes"""
# Automatic selection for sparse parameter
if params.refinement.parameterisation.sparse == libtbx.Auto:
if len(experiments) > 1:
params.refinement.parameterisation.sparse = True
else:
params.refinement.parameterisation.sparse = False
if params.refinement.refinery.engine == "SparseLevMar":
params.refinement.parameterisation.sparse = True
if params.refinement.mp.nproc > 1:
if params.refinement.refinery.engine != "SparseLevMar":
# sparse vectors cannot be pickled, so can't use easy_mp here
params.refinement.parameterisation.sparse = False
else:
pass # but SparseLevMar requires sparse jacobian; does not implement mp
# Check incompatible selection
elif (
params.refinement.parameterisation.sparse and params.refinement.mp.nproc > 1
):
logger.warning(
"Could not set sparse=True and nproc=%s", params.refinement.mp.nproc
)
logger.warning("Resetting sparse=False")
params.refinement.parameterisation.sparse = False
return params
@staticmethod
def config_restraints(params, pred_param):
"""Given a set of user parameters plus a model parameterisation, create
restraints plus a parameterisation of these restraints
Params:
params: The input PHIL parameters
pred_param: A PredictionParameters object
Returns:
A restraints parameterisation or None
"""
if not any(
(
params.crystal.unit_cell.restraints.tie_to_target,
params.crystal.unit_cell.restraints.tie_to_group,
)
):
return None
if params.scan_varying and not params.crystal.unit_cell.force_static:
logger.warning("Restraints will be ignored for scan_varying=True")
return None
det_params = pred_param.get_detector_parameterisations()
beam_params = pred_param.get_beam_parameterisations()
xl_ori_params = pred_param.get_crystal_orientation_parameterisations()
xl_uc_params = pred_param.get_crystal_unit_cell_parameterisations()
gon_params = pred_param.get_goniometer_parameterisations()
rp = RestraintsParameterisation(
detector_parameterisations=det_params,
beam_parameterisations=beam_params,
xl_orientation_parameterisations=xl_ori_params,
xl_unit_cell_parameterisations=xl_uc_params,
goniometer_parameterisations=gon_params,
)
# Shorten params path
cell_r = params.crystal.unit_cell.restraints
for tie in cell_r.tie_to_target:
if len(tie.values) != 6:
raise DialsRefineConfigError(
"6 cell parameters must be provided as the tie_to_target.values."
)
if len(tie.sigmas) != 6:
raise DialsRefineConfigError(
"6 sigmas must be provided as the tie_to_target.sigmas. "
"Note that individual sigmas of 0.0 will remove "
"the restraint for the corresponding cell parameter."
)
if tie.id is None:
# get one experiment id for each parameterisation to apply to all
tie.id = [e.get_experiment_ids()[0] for e in xl_uc_params]
for exp_id in tie.id:
rp.add_restraints_to_target_xl_unit_cell(exp_id, tie.values, tie.sigmas)
for tie in cell_r.tie_to_group:
if len(tie.sigmas) != 6:
raise DialsRefineConfigError(
"6 sigmas must be provided as the tie_to_group.sigmas. "
"Note that individual sigmas of 0.0 will remove "
"the restraint for the corresponding cell parameter."
)
if tie.id is None:
rp.add_restraints_to_group_xl_unit_cell(tie.target, "all", tie.sigmas)
else:
rp.add_restraints_to_group_xl_unit_cell(tie.target, tie.id, tie.sigmas)
return rp
@staticmethod
def config_refinery(params, target, pred_param, constraints_manager):
"""Given a set of parameters, a target class, a prediction
parameterisation class and a constraints_manager (which could be None),
build a refinery
Params:
params The input parameters
Returns:
The refinery instance
"""
# Shorten parameter path
options = params.refinement.refinery
if options.engine == "SimpleLBFGS":
from dials.algorithms.refinement.engine import SimpleLBFGS as refinery
elif options.engine == "LBFGScurvs":
from dials.algorithms.refinement.engine import LBFGScurvs as refinery
elif options.engine == "GaussNewton":
from dials.algorithms.refinement.engine import (
GaussNewtonIterations as refinery,
)
elif options.engine == "LevMar":
from dials.algorithms.refinement.engine import (
LevenbergMarquardtIterations as refinery,
)
elif options.engine == "SparseLevMar":
from dials.algorithms.refinement.sparse_engine import (
SparseLevenbergMarquardtIterations as refinery,
)
else:
raise RuntimeError(
"Refinement engine " + options.engine + " not recognised"
)
logger.debug("Selected refinement engine type: %s", options.engine)
engine = refinery(
target=target,
prediction_parameterisation=pred_param,
constraints_manager=constraints_manager,
log=options.log,
tracking=options.journal,
max_iterations=options.max_iterations,
)
if params.refinement.mp.nproc > 1:
nproc = params.refinement.mp.nproc
try:
engine.set_nproc(nproc)
except NotImplementedError:
logger.warning(
"Could not set nproc=%s for refinement engine of type %s",
nproc,
options.engine,
)
return engine
# Overload to allow subclasses of RefinerFactory to use a different
# TargetFactory
@staticmethod
def config_target(
params,
experiments,
reflection_manager,
predictor,
pred_param,
restraints_param,
do_stills,
do_sparse,
):
target = TargetFactory.from_parameters_and_experiments(
params,
experiments,
reflection_manager,
predictor,
pred_param,
restraints_param,
do_stills,
do_sparse,
)
return target
class Refiner:
"""Public interface for performing DIALS refinement.
Public methods:
run
rmsds
get_experiments
get_matches
get_param_reporter
parameter_correlation_plot
selection_used_for_refinement
predict_for_reflection_table
predict_for_indexed
Notes:
* The return value of run is a recorded history of the refinement
* The experiments accessor provides a copy of the experiments used by
refinement
* get_matches exposes the function of the same name from the privately
stored reflection manager
* The return value of selection_used_for_refinement is a flex.bool
"""
def __init__(
self, experiments, pred_param, param_reporter, refman, target, refinery
):
"""
Mandatory arguments:
experiments - a dxtbx ExperimentList object
pred_param - An object derived from the PredictionParameterisation class
param_reporter -A ParameterReporter object
refman - A ReflectionManager object
target - An object derived from the Target class
refinery - An object derived from the Refinery class
"""
# the experimental models
self._experiments = experiments
# refinement module main objects
self._pred_param = pred_param
self._refman = refman
self._target = target
self._refinery = refinery
# parameter reporter
self._param_report = param_reporter
# Keep track of whether this is stills or scans type refinement
self.experiment_type = refman.experiment_type
return
def get_experiments(self):
"""Return a copy of the current refiner experiments"""
return _copy_experiments_for_refining(self._experiments)
def rmsds(self):
"""Return rmsds of the current model"""
# ensure predictions for the matches are up to date
self._refinery.prepare_for_step()
return self._target.rmsds()
def rmsds_for_reflection_table(self, reflections):
"""Calculate unweighted RMSDs for the specified reflections"""
# ensure predictions for these reflections are up to date
preds = self.predict_for_reflection_table(reflections)
return self._target.rmsds_for_reflection_table(preds)
def get_matches(self):
"""Delegated to the reflection manager"""
return self._refman.get_matches()
def get_free_reflections(self):
"""Delegated to the reflection manager"""
return self._refman.get_free_reflections()
def get_param_reporter(self):
"""Get the ParameterReport object linked to this Refiner"""
return self._param_report
def get_parameter_correlation_matrix(self, step, col_select=None):
"""Return the correlation matrix between columns of the Jacobian at
the specified refinement step. The parameter col_select can be used
to select subsets of the full number of columns. The column labels
are also returned as a list of strings"""
corrmats = self._refinery.get_correlation_matrix_for_step(step)
if corrmats is None:
return None, None
all_labels = self._pred_param.get_param_names()
if col_select is None:
col_select = list(range(len(all_labels)))
sel = string_sel(col_select, all_labels)
labels = [e for e, s in zip(all_labels, sel) if s]
num_cols = len(labels)
if num_cols == 0:
return None, None
for k, corrmat in corrmats.items():
assert corrmat.is_square_matrix()
idx = flex.bool(sel).iselection()
sub_corrmat = flex.double(flex.grid(num_cols, num_cols))
for (i, x) in enumerate(idx):
for (j, y) in enumerate(idx):
sub_corrmat[i, j] = corrmat[x, y]
corrmats[k] = sub_corrmat
return (corrmats, labels)
@property
def history(self):
"""Get the refinement engine's step history"""
return self._refinery.history
def print_step_table(self):
"""print useful output about refinement steps in the form of a simple table"""
logger.info("\nRefinement steps:")
rmsd_multipliers = []
header = ["Step", "Nref"]
for (name, units) in zip(self._target.rmsd_names, self._target.rmsd_units):
if units == "mm":
header.append(name + "\n(mm)")
rmsd_multipliers.append(1.0)
elif units == "rad": # convert radians to degrees for reporting
header.append(name + "\n(deg)")
rmsd_multipliers.append(RAD2DEG)
else: # leave unknown units alone
header.append(name + "\n(" + units + ")")
rows = []
for i in range(self._refinery.history.get_nrows()):
rmsds = [
r * m
for (r, m) in zip(self._refinery.history["rmsd"][i], rmsd_multipliers)
]
rows.append(
[str(i), str(self._refinery.history["num_reflections"][i])]
+ [f"{r:.5g}" for r in rmsds]
)
logger.info(dials.util.tabulate(rows, header))
logger.info(self._refinery.history.reason_for_termination)
return
def print_out_of_sample_rmsd_table(self):
"""print out-of-sample RSMDs per step, if these were tracked"""
# check if it makes sense to proceed
if "out_of_sample_rmsd" not in self._refinery.history:
return
nref = len(self.get_free_reflections())
if nref < 10:
return # don't do anything if very few refs
logger.info("\nRMSDs for out-of-sample (free) reflections:")
rmsd_multipliers = []
header = ["Step", "Nref"]
for (name, units) in zip(self._target.rmsd_names, self._target.rmsd_units):
if units == "mm":
header.append(name + "\n(mm)")
rmsd_multipliers.append(1.0)
elif units == "rad": # convert radians to degrees for reporting
header.append(name + "\n(deg)")
rmsd_multipliers.append(RAD2DEG)
else: # leave unknown units alone
header.append(name + "\n(" + units + ")")
rows = []
for i in range(self._refinery.history.get_nrows()):
rmsds = [
r * m
for r, m in zip(
self._refinery.history["out_of_sample_rmsd"][i], rmsd_multipliers
)
]
rows.append([str(i), str(nref)] + [f"{e:.5g}" for e in rmsds])
logger.info(dials.util.tabulate(rows, header))
return
def print_exp_rmsd_table(self):
"""print useful output about refinement steps in the form of a simple table"""
logger.info("\nRMSDs by experiment:")
header = ["Exp\nid", "Nref"]
for (name, units) in zip(self._target.rmsd_names, self._target.rmsd_units):
if name == "RMSD_X" or name == "RMSD_Y" and units == "mm":
header.append(name + "\n(px)")
elif name == "RMSD_Phi" and units == "rad":
# will convert radians to images for reporting of scans
header.append("RMSD_Z" + "\n(images)")
elif units == "rad":
# will convert other angles in radians to degrees (e.g. for
# RMSD_DeltaPsi and RMSD_2theta)
header.append(name + "\n(deg)")
else: # skip other/unknown RMSDs
pass
rows = []
for iexp, exp in enumerate(self._experiments):
detector = exp.detector
px_sizes = [p.get_pixel_size() for p in detector]
it = iter(px_sizes)
px_size = next(it)
if not all(tst == px_size for tst in it):
logger.info(
"The detector in experiment %d does not have the same pixel "
+ "sizes on each panel. Skipping...",
iexp,
)
continue
px_per_mm = [1.0 / e for e in px_size]
scan = exp.scan
try:
images_per_rad = 1.0 / abs(scan.get_oscillation(deg=False)[1])
except (AttributeError, ZeroDivisionError):
images_per_rad = None
raw_rmsds = self._target.rmsds_for_experiment(iexp)
if raw_rmsds is None:
continue # skip experiments where rmsd cannot be calculated
num = self._target.get_num_matches_for_experiment(iexp)
rmsds = []
for (name, units, rmsd) in zip(
self._target.rmsd_names, self._target.rmsd_units, raw_rmsds
):
if name == "RMSD_X" and units == "mm":
rmsds.append(rmsd * px_per_mm[0])
elif name == "RMSD_Y" and units == "mm":
rmsds.append(rmsd * px_per_mm[1])
elif name == "RMSD_Phi" and units == "rad":
rmsds.append(rmsd * images_per_rad)
elif units == "rad":
rmsds.append(rmsd * RAD2DEG)
rows.append([str(iexp), str(num)] + [f"{r:.5g}" for r in rmsds])
if len(rows) > 0:
logger.info(dials.util.tabulate(rows, header))
return
def print_panel_rmsd_table(self):
"""print useful output about refinement steps in the form of a simple table"""
if len(self._experiments.scans()) > 1:
logger.warning(
"Multiple scans present. Only the first scan will be used "
"to determine the image width for reporting RMSDs"
)
scan = self._experiments.scans()[0]
try:
images_per_rad = 1.0 / abs(scan.get_oscillation(deg=False)[1])
except AttributeError:
images_per_rad = None
for idetector, detector in enumerate(self._experiments.detectors()):
if len(detector) == 1:
continue
logger.info("\nDetector %s RMSDs by panel:", idetector + 1)
header = ["Panel\nid", "Nref"]
for (name, units) in zip(self._target.rmsd_names, self._target.rmsd_units):
if name == "RMSD_X" or name == "RMSD_Y" and units == "mm":
header.append(name + "\n(px)")
elif (
name == "RMSD_Phi" and units == "rad"
): # convert radians to images for reporting of scans
header.append("RMSD_Z" + "\n(images)")
elif (
name == "RMSD_DeltaPsi" and units == "rad"
): # convert radians to degrees for reporting of stills
header.append(name + "\n(deg)")
else: # skip RMSDs that cannot be expressed in image/scan space
pass
rows = []
for ipanel, panel in enumerate(detector):
px_size = panel.get_pixel_size()
px_per_mm = [1.0 / e for e in px_size]
num = self._target.get_num_matches_for_panel(ipanel)
if num <= 0:
continue
raw_rmsds = self._target.rmsds_for_panel(ipanel)
if raw_rmsds is None:
continue # skip panels where rmsd cannot be calculated
rmsds = []
for (name, units, rmsd) in zip(
self._target.rmsd_names, self._target.rmsd_units, raw_rmsds
):
if name == "RMSD_X" and units == "mm":
rmsds.append(rmsd * px_per_mm[0])
elif name == "RMSD_Y" and units == "mm":
rmsds.append(rmsd * px_per_mm[1])
elif name == "RMSD_Phi" and units == "rad":
rmsds.append(rmsd * images_per_rad)
elif name == "RMSD_DeltaPsi" and units == "rad":
rmsds.append(rmsd * RAD2DEG)
rows.append([str(ipanel), str(num)] + [f"{r:.5g}" for r in rmsds])
if len(rows) > 0:
logger.info(dials.util.tabulate(rows, header))
return
def run(self):
"""Run refinement"""
####################################
# Do refinement and return history #
####################################
logger.debug("\nExperimental models before refinement:")
for i, beam in enumerate(self._experiments.beams()):
logger.debug(ordinal_number(i) + " " + str(beam))
for i, detector in enumerate(self._experiments.detectors()):
logger.debug(ordinal_number(i) + " " + str(detector))
for i, goniometer in enumerate(self._experiments.goniometers()):
if goniometer is None:
continue
logger.debug(ordinal_number(i) + " " + str(goniometer))
for i, scan in enumerate(self._experiments.scans()):
if scan is None:
continue
logger.debug(ordinal_number(i) + " " + str(scan))
for i, crystal in enumerate(self._experiments.crystals()):
logger.debug(ordinal_number(i) + " " + str(crystal))
self._refinery.run()
# These involve calculation, so skip them when output is quiet
if logger.getEffectiveLevel() < logging.ERROR:
self.print_step_table()
self.print_out_of_sample_rmsd_table()
self.print_exp_rmsd_table()
det_npanels = [len(d) for d in self._experiments.detectors()]
if any(n > 1 for n in det_npanels):
self.print_panel_rmsd_table()
# Perform post-run tasks to write the refined states back to the models
self._update_models()
logger.debug("\nExperimental models after refinement:")
for i, beam in enumerate(self._experiments.beams()):
logger.debug(ordinal_number(i) + " " + str(beam))
for i, detector in enumerate(self._experiments.detectors()):
logger.debug(ordinal_number(i) + " " + str(detector))
for i, goniometer in enumerate(self._experiments.goniometers()):
if goniometer is None:
continue
logger.debug(ordinal_number(i) + " " + str(goniometer))
for i, scan in enumerate(self._experiments.scans()):
if scan is None:
continue
logger.debug(ordinal_number(i) + " " + str(scan))
for i, crystal in enumerate(self._experiments.crystals()):
logger.debug(ordinal_number(i) + " " + str(crystal))
# Report on the refined parameters
logger.debug(str(self._param_report))
# Return the refinement history
return self._refinery.history
def _update_models(self):
"""Perform any extra tasks required to update the models after refinement.
Does nothing here, but used by subclasses"""
pass
def selection_used_for_refinement(self):
"""Return a selection as a flex.bool in terms of the input reflection
data of those reflections that were used in the final step of
refinement."""
matches = self._refman.get_matches()
selection = flex.bool(len(self._refman.get_indexed()), False)
try: # new reflection table format for matches
isel = matches["iobs"]
selection.set_selected(isel, True)
except TypeError: # old ObsPredMatch format for matches
for m in matches:
selection[m.iobs] = True
return selection
def predict_for_indexed(self):
"""perform prediction for all the indexed reflections passed into
refinement and additionally set the used_in_refinement flag. Do not
compose the derivatives of states of the model as this is expensive and
they are not needed outside of a refinement run"""
reflections = self.predict_for_reflection_table(
self._refman.get_indexed(), skip_derivatives=True
)
reflections.sort("iobs")
mask = self.selection_used_for_refinement()
reflections.set_flags(mask, reflections.flags.used_in_refinement)
return reflections
def predict_for_reflection_table(self, reflections, skip_derivatives=False):
"""perform prediction for all reflections in the supplied table"""
# delegate to the target object, which has access to the predictor
return self._target.predict_for_reflection_table(reflections, skip_derivatives)
class ScanVaryingRefiner(Refiner):
"""Includes functionality to update the models with their states at
scan-points after scan-varying refinement"""
def _update_models(self):
for iexp, exp in enumerate(self._experiments):
ar_range = exp.scan.get_array_range()
obs_image_numbers = list(range(ar_range[0], ar_range[1] + 1))
# write scan-varying s0 vectors back to beam models
s0_list = self._pred_param.get_varying_s0(obs_image_numbers, iexp)
if s0_list is not None:
exp.beam.set_s0_at_scan_points(s0_list)
# write scan-varying setting rotation matrices back to goniometer models
S_list = self._pred_param.get_varying_setting_rotation(
obs_image_numbers, iexp
)
if S_list is not None:
exp.goniometer.set_setting_rotation_at_scan_points(S_list)
# write scan-varying crystal setting matrices back to crystal models
A_list = self._pred_param.get_varying_UB(obs_image_numbers, iexp)
if A_list is not None:
exp.crystal.set_A_at_scan_points(A_list)
# Calculate scan-varying errors if requested
if self._pred_param.set_scan_varying_errors:
# get state covariance matrices the whole range of images. We select
# the first element of this at each image because crystal scan-varying
# parameterisations are not multi-state
state_cov_list = [
self._pred_param.calculate_model_state_uncertainties(
obs_image_number=t, experiment_id=iexp
)
for t in range(ar_range[0], ar_range[1] + 1)
]
if "U_cov" in state_cov_list[0]:
u_cov_list = [e["U_cov"] for e in state_cov_list]
else:
u_cov_list = None
if "B_cov" in state_cov_list[0]:
b_cov_list = [e["B_cov"] for e in state_cov_list]
else:
b_cov_list = None
# return these to the model parameterisations to be set in the models
self._pred_param.set_model_state_uncertainties(
u_cov_list, b_cov_list, iexp
)
|
nilq/baby-python
|
python
|
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat.mock import patch
from ansible_collections.community.general.plugins.modules.network.onyx import onyx_bgp
from ansible_collections.community.general.tests.unit.modules.utils import set_module_args
from ..onyx_module import TestOnyxModule, load_fixture
class TestOnyxBgpModule(TestOnyxModule):
module = onyx_bgp
def setUp(self):
super(TestOnyxBgpModule, self).setUp()
self.mock_get_config = patch.object(
onyx_bgp.OnyxBgpModule, "_get_bgp_summary")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible_collections.community.general.plugins.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxBgpModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_bgp_show.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_bgp_no_change(self):
neighbor = dict(remote_as=322, neighbor='10.2.3.5', multihop=255)
set_module_args(dict(as_number=172, router_id='1.2.3.4',
neighbors=[neighbor],
networks=['172.16.1.0/24'],
evpn=True, fast_external_fallover=True,
max_paths=31, ecmp_bestpath=True,
))
self.execute_module(changed=False)
def test_bgp_remove(self):
set_module_args(dict(as_number=172, state='absent'))
commands = ['no router bgp 172']
self.execute_module(changed=True, commands=commands)
def test_bgp_with_vrf_changed(self):
set_module_args(dict(as_number=173, vrf='new_vrf'))
commands = ['no router bgp 172 vrf default', 'router bgp 173 vrf new_vrf', 'exit']
self.execute_module(changed=True, commands=commands)
def test_bgp_change(self):
neighbor = dict(remote_as=173, neighbor='10.2.3.4')
set_module_args(dict(as_number=174, router_id='1.2.3.4',
neighbors=[neighbor],
evpn=False, fast_external_fallover=False,
max_paths=32, ecmp_bestpath=False,
))
commands = ['no router bgp 172 vrf default', 'router bgp 174 vrf default', 'exit',
'router bgp 174 vrf default router-id 1.2.3.4 force',
'router bgp 174 vrf default neighbor 10.2.3.4 remote-as 173',
'no router bgp 174 vrf default neighbor evpn peer-group',
'no router bgp 174 vrf default address-family l2vpn-evpn auto-create',
'router bgp 174 vrf default no bgp fast-external-fallover',
'router bgp 174 vrf default maximum-paths 32',
'router bgp 174 vrf default no bestpath as-path multipath-relax force']
self.execute_module(changed=True, commands=commands)
def test_bgp_add_neighbor(self):
neighbors = [dict(remote_as=173, neighbor='10.2.3.4'),
dict(remote_as=175, neighbor='10.2.3.5'),
dict(remote_as=175, neighbor='10.2.3.6', multihop=250)]
set_module_args(dict(as_number=172, router_id='1.2.3.4',
neighbors=neighbors,
networks=['172.16.1.0/24'],
evpn=True))
commands = ['router bgp 172 vrf default neighbor 10.2.3.5 remote-as 175',
'router bgp 172 vrf default neighbor 10.2.3.6 remote-as 175',
'router bgp 172 vrf default neighbor 10.2.3.6 ebgp-multihop 250',
'router bgp 172 vrf default neighbor 10.2.3.6 peer-group evpn',
'router bgp 172 vrf default neighbor 10.2.3.4 peer-group evpn']
self.execute_module(changed=True, commands=commands)
def test_bgp_del_neighbor(self):
set_module_args(dict(as_number=172,
networks=['172.16.1.0/24'],
purge=True))
commands = ['router bgp 172 vrf default no neighbor 10.2.3.4 remote-as 173',
'router bgp 172 vrf default no neighbor 10.2.3.5 remote-as 322']
self.execute_module(changed=True, commands=commands)
def test_bgp_add_network(self):
neighbors = [dict(remote_as=173, neighbor='10.2.3.4')]
set_module_args(dict(as_number=172, router_id='1.2.3.4',
neighbors=neighbors,
networks=['172.16.1.0/24', '172.16.2.0/24']))
commands = ['router bgp 172 vrf default network 172.16.2.0 /24']
self.execute_module(changed=True, commands=commands)
def test_bgp_del_network(self):
neighbors = [dict(remote_as=173, neighbor='10.2.3.4')]
set_module_args(dict(as_number=172, neighbors=neighbors))
commands = ['router bgp 172 no network 172.16.1.0 /24']
self.execute_module(changed=True, commands=commands)
|
nilq/baby-python
|
python
|
import torch
from enum import Enum
from experiments import constants
class OptimizerType(Enum):
SGD = 0
Adam = 1
class SchedulerType(Enum):
MultiStep = 0
class SingleNetworkOptimization(object):
def __init__(self, network: torch.nn.Module, n_epochs: int,
lr=1e-4, weight_decay=1e-3, optimizer_type: OptimizerType = OptimizerType.SGD, grad_norm_clipping=10,
betas=(0.9, 0.999), enable_lr_scheduler=False, gamma: float = 0.1, scheduler_steps: list = []):
self.n_epochs = n_epochs
self.network = network
self.optimizer_type = optimizer_type
if self.optimizer_type == OptimizerType.SGD:
self.opt = torch.optim.SGD(network.parameters(), lr=lr, momentum=0.0, nesterov=False,
weight_decay=weight_decay)
elif self.optimizer_type == OptimizerType.Adam:
self.opt = torch.optim.Adam(network.parameters(), lr=lr, weight_decay=weight_decay, betas=betas)
else:
raise NotImplemented
self.grad_norm_clipping = grad_norm_clipping
self.enable_lr_scheduler = enable_lr_scheduler
self.scheduler_list = []
if self.enable_lr_scheduler:
self.scheduler_list.append(
torch.optim.lr_scheduler.MultiStepLR(self.opt, milestones=scheduler_steps, gamma=gamma))
self.norm_type = 2
def end_epoch(self):
[s.step() for s in self.scheduler_list]
def zero_grad(self):
self.opt.zero_grad()
def step(self):
if self.grad_norm_clipping > 0:
grad_norm = torch.nn.utils.clip_grad.clip_grad_norm_(self.network.parameters(),
max_norm=self.grad_norm_clipping).item()
else:
grad_norm = torch.norm(
torch.stack(
[torch.norm(p.grad.detach(), self.norm_type).to(constants.DEVICE) for p in
self.network.parameters() if p.grad is not None]),
self.norm_type)
self.opt.step()
return grad_norm
|
nilq/baby-python
|
python
|
import sys
import random
import time
from cnn import utils
import logging
import warnings
warnings.filterwarnings("ignore")
import argparse
import torch.utils
import torch.backends.cudnn as cudnn
from utils.scheduler import Scheduler
from torchvision.transforms import transforms
import torchvision.datasets as datasets
from tensorboardX import SummaryWriter
from cnn.supernet import *
from distributed import *
from utils.auto_augment import auto_augment_transform
from utils.Mixup import Mixup
from utils.loss import *
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
parser = argparse.ArgumentParser("RepNAS")
parser.add_argument('--data', type=str, default='.', help='location of the data')
parser.add_argument('--workers', type=int, default=50, help='number of data loading workers')
parser.add_argument('--model', type=str, default='RepVGGA0', help='type of model which can be selected in [RepVGG_A0, RepVGG_A1, RepVGG_B2g4, RepVGG_B3]')
parser.add_argument('--batch_size', type=int, default=256, help='batch size')
parser.add_argument('--base_lr', type=float, default=0.1, help='init learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--lr_mode', type=str, default='cosine', help='[step, poly, cosine]')
parser.add_argument('--wd_mode', type=str, default='cosine', help='[step, poly, cosine]')
parser.add_argument('--warmup_lr', type=float, default=0.1, help='init warmup learning rate')
parser.add_argument('--warmup_epochs', type=int, default=0, help='number of warmup epochs')
parser.add_argument('--warmup_mode', type=str, default='constant', help='mode of warmup [constant, linear]')
parser.add_argument('--mixup', action='store_true', help='using mixup')
parser.add_argument('--autoaugment', action='store_true', help='using autoaugment')
parser.add_argument('--smooth', action='store_true', help='using smooth CE')
parser.add_argument('--report_freq', type=float, default=500, help='report frequency')
parser.add_argument('--epochs', type=int, default=150, help='number of training epochs')
parser.add_argument('--resume', action='store_true', help='resume from checkpoint')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--save', type=str, default='logs', help='experiment name')
parser.add_argument('--random_epochs', type=int, default=15, help='number of random sample epochs')
parser.add_argument('--fixed_epochs', type=int, default=100, help='number of fixed epochs.')
parser.add_argument('--arch_lr', type=float, default=1e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=5e-4, help='weight decay for arch encoding')
parser.add_argument('--kept_ratio', type=float, default=0.34, help='learning rate for arch encoding')
parser.add_argument('--local_rank', type=int, default=0, help='number for current rank')
args = parser.parse_args()
CLASSES = 1000
def set_random_seed(seed=None):
"""set random seed"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
current_time = time.strftime("%Y-%m-%dT%H:%M", time.localtime())
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.batch_size = args.batch_size // int(os.environ['WORLD_SIZE'])
if args.distributed:
gpu_id = init_dist()
set_random_seed(args.seed)
if is_master():
Writer = SummaryWriter(log_dir=current_time)
print(args)
else:
Writer = None
cudnn.benchmark = True
cudnn.enabled = True
if not args.mixup and args.smooth:
criterion_smooth = LabelSmoothingCrossEntropy().cuda()
elif args.mixup and args.smooth:
criterion_smooth = SoftTargetCrossEntropy().cuda()
else:
criterion_smooth = nn.CrossEntropyLoss().cuda()
criterion = nn.CrossEntropyLoss().cuda()
model = model_map[args.model]()
model = model.cuda()
param = []
arch_param = []
for key, value in model.named_parameters():
if not value.requires_grad:
continue
if 'alphas' in key:
arch_param += [{'params': [value], 'lr': args.arch_lr, 'weight_decay': args.arch_weight_decay}]
else:
if 'bias' in key or 'bn' in key:
weight_decay = 0
else:
weight_decay = args.weight_decay
param += [{'params': [value], 'lr': args.base_lr, 'weight_decay': weight_decay}]
optimizer = torch.optim.SGD(
param,
momentum=args.momentum,
)
arch_optimizer = torch.optim.Adam(
arch_param,
betas=(0.5, 0.999),
weight_decay=args.arch_weight_decay
)
current_epoch = 0
if args.resume and os.path.exists(os.path.join(args.save, 'ckpt.pt')):
print('loading checkpoint')
checkpoint = torch.load(os.path.join(args.save, 'ckpt.pt'), map_location='cpu')
current_epoch = checkpoint['epoch']
state_dict = OrderedDict()
for name, param in checkpoint['model'].items():
state_dict[name] = param
model.load_state_dict(state_dict)
optimizer.load_state_dict(checkpoint['optimizer'])
arch_optimizer.load_state_dict(checkpoint['arch_optimizer'])
model.fixed_path = checkpoint['fixed_path']
if is_master():
print("total path = {}".format(model.original_ops))
model.constraint = args.kept_ratio
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[gpu_id], find_unused_parameters=True)
model_without_ddp = model.module
else:
model_without_ddp = model
traindir = os.path.join(args.data, 'train')
validdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
if args.autoaugment:
transformer = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
auto_augment_transform('original', dict(translate_const=int(224*0.45), img_mean=tuple([min(255, round(255 * x)) for x in [0.485, 0.456, 0.406]]))),
transforms.ToTensor(),
normalize,
])
else:
transformer = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
train_dataset = datasets.ImageFolder(
traindir,
transformer)
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_queue = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_dataset = datasets.ImageFolder(validdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
valid_queue = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
mixup_fn = Mixup() if args.mixup else None
if is_master():
print("step num for each epoch:", len(train_queue))
lr_scheduler = Scheduler(optimizer, len(train_queue), 'lr', args.epochs, base_value=args.base_lr)
lr_scheduler.update(0, current_epoch)
wd_scheduler = Scheduler(arch_optimizer, len(train_queue),
'weight_decay', args.epochs - args.random_epochs - args.fixed_epochs,
base_value=args.arch_weight_decay)
wd_scheduler.update(0, max(0, current_epoch - args.random_epochs))
best_top1 = 0
for i in range(current_epoch, args.epochs):
if args.distributed:
train_queue.sampler.set_epoch(i)
if is_master():
print('epoch {}'.format(i))
if i < args.random_epochs:
model_without_ddp.random_sample = True
elif i == args.random_epochs and is_master():
torch.save({'model': model_without_ddp.state_dict(),
'epoch': i,
'optimizer': optimizer.state_dict(),
'arch_optimizer': arch_optimizer.state_dict(),
'fixed_path': model_without_ddp.fixed_path
}, os.path.join(args.save, 'pretrained.pt'))
else:
model_without_ddp.random_sample = False
if i >= args.epochs - args.fixed_epochs and model_without_ddp.fixed_path is None:
model_without_ddp.fixed_mask()
if is_master():
print("fixed path:", model_without_ddp.fixed_path)
train(args, train_queue, model, model_without_ddp, optimizer, arch_optimizer, lr_scheduler, wd_scheduler, i, mixup_fn, criterion_smooth, Writer)
top1 = infer(valid_queue, model, criterion, i, Writer)
if is_master():
print(model_without_ddp.alphas.sigmoid())
print("epoch:{} top1:{:3f}".format(i, top1))
torch.save({'model': model_without_ddp.state_dict(),
'epoch': i+1,
'optimizer': optimizer.state_dict(),
'arch_optimizer': arch_optimizer.state_dict(),
'fixed_path': model_without_ddp.fixed_path
}, os.path.join(args.save, 'ckpt.pt'))
if top1 > best_top1:
torch.save({'model': model_without_ddp.state_dict(),
'epoch': i + 1,
'top1': top1,
'fixed_path': model_without_ddp.fixed_path}, os.path.join(args.save, 'best.pt'))
best_top1 = top1
if is_master():
Writer.close()
def train(args, train_queue, model, model_without_ddp, optimizer, arch_optimizer, lr_scheduler, wd_scheduler, epoch, mixup_fn, criterion, Writer):
obj = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.train()
if mixup_fn is not None:
mixup_fn.mixup_enabled = False
for step, (inputs, targets) in enumerate(train_queue):
optimizer.zero_grad()
arch_optimizer.zero_grad()
inputs = inputs.cuda()
targets = targets.cuda()
if mixup_fn is not None:
inputs, smooth_targets = mixup_fn(inputs, targets)
else:
inputs, smooth_targets = inputs, targets
logits, rank = model(inputs)
loss = criterion(logits, smooth_targets)
#loss += 2 * (rank.sum() / rank.numel() - args.kept_ratio) ** 2
loss.backward()
nn.utils.clip_grad_norm(model_without_ddp.parameters(), 5)
prec1, prec5 = utils.accuracy(logits, targets, topk=(1, 5))
n = inputs.size(0)
if args.distributed:
dist_all_reduce_tensor(loss)
dist_all_reduce_tensor(prec1)
dist_all_reduce_tensor(prec5)
obj.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
optimizer.step()
if not model_without_ddp.random_sample and model_without_ddp.fixed_path is None:
arch_optimizer.step()
wd_scheduler.update(step, epoch - args.random_epochs)
lr_scheduler.update(step, epoch)
if any(torch.isnan(model_without_ddp.alphas.view(-1))):
sys.exit(1)
if step % args.report_freq == 0 and is_master():
# print(model.alphas.sigmoid())
print('train step:{}\
lr:{:.3f} wd:{:.3f}\
loss:{:.3f} top1:{:.3f}\
top5:{:.3f}'.format(step,
lr_scheduler.value,
wd_scheduler.value,
obj.avg, top1.avg, top5.avg))
Writer.add_scalar('train loss', obj.avg, epoch*len(train_queue)+step)
Writer.add_scalar('train top1 acc', top1.avg, epoch*len(train_queue)+step)
def infer(valid_queue, model, criterion, epoch, Writer):
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
model.eval()
with torch.no_grad():
for step, (input, target) in enumerate(valid_queue):
input = input.cuda()
target = target.cuda()
logits, _ = model(input)
loss = criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
dist_all_reduce_tensor(loss)
dist_all_reduce_tensor(prec1)
dist_all_reduce_tensor(prec5)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if is_master():
Writer.add_scalar('valid loss', objs.avg, epoch)
Writer.add_scalar('valid top1 acc', top1.avg, epoch)
return top1.avg
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import base64
import os
import subprocess
import sys
import yaml
EDITOR = os.environ.get('EDITOR', 'vi')
class NoDatesSafeLoader(yaml.SafeLoader):
@classmethod
def remove_implicit_resolver(cls, tag_to_remove):
"""
Remove implicit resolvers for a particular tag
Takes care not to modify resolvers in super classes.
We want to load datetimes as strings, not dates, because we
go on to serialise as json which doesn't have the advanced types
of yaml, and leads to incompatibilities down the track.
"""
if 'yaml_implicit_resolvers' not in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [
(tag, regexp)
for tag, regexp in mappings
if tag != tag_to_remove
]
def repr_str(dumper, data):
if '\n' in data:
return dumper.represent_scalar(
u'tag:yaml.org,2002:str', data, style='|')
return dumper.orig_represent_str(data)
def decode(secret):
if 'data' in secret:
secret['data'] = {
k: base64.b64decode(v).decode('utf8')
for k, v in secret['data'].items()
}
return secret
def encode(secret):
if 'data' in secret:
secret['data'] = {
k: base64.b64encode(v.encode())
for k, v in secret['data'].items()
}
return secret
def edit(fname):
with open(fname, 'r') as fid:
secret = yaml.load(fid, Loader=NoDatesSafeLoader)
decoded = decode(secret)
with open(fname, 'w') as fid:
fid.write(yaml.safe_dump(decoded, default_flow_style=False))
subprocess.check_call(EDITOR.split() + [fname])
with open(fname, 'r') as fid:
edited = yaml.load(fid, Loader=NoDatesSafeLoader)
encoded = encode(edited)
with open(fname, 'w') as fid:
fid.write(yaml.safe_dump(encoded, default_flow_style=False))
def main():
NoDatesSafeLoader.remove_implicit_resolver('tag:yaml.org,2002:timestamp')
yaml.SafeDumper.orig_represent_str = yaml.SafeDumper.represent_str
yaml.add_representer(str, repr_str, Dumper=yaml.SafeDumper)
fname = sys.argv[1]
edit(fname)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
"""
Basic class for communication to Parrot Bebop
usage:
./bebop.py <task> [<metalog> [<F>]]
"""
import sys
import socket
import datetime
import struct
import time
import numpy as np
import math
from navdata import *
from commands import *
from video import VideoFrames
# this will be in new separate repository as common library fo droneika Python-powered drones
from apyros.metalog import MetaLog, disableAsserts
from apyros.manual import myKbhit, ManualControlException
HOST = "192.168.42.1"
DISCOVERY_PORT = 44444
NAVDATA_PORT = 43210 # d2c_port
COMMAND_PORT = 54321 # c2d_port
class Bebop:
def __init__( self, metalog=None, onlyIFrames=True ):
if metalog is None:
self._discovery()
metalog = MetaLog()
self.navdata = metalog.createLoggedSocket( "navdata", headerFormat="<BBBI" )
self.navdata.bind( ('',NAVDATA_PORT) )
if metalog.replay:
self.commandSender = CommandSenderReplay(metalog.createLoggedSocket( "cmd", headerFormat="<BBBI" ),
hostPortPair=(HOST, COMMAND_PORT), checkAsserts=metalog.areAssertsEnabled())
else:
self.commandSender = CommandSender(metalog.createLoggedSocket( "cmd", headerFormat="<BBBI" ),
hostPortPair=(HOST, COMMAND_PORT))
self.console = metalog.createLoggedInput( "console", myKbhit ).get
self.metalog = metalog
self.buf = ""
self.videoFrameProcessor = VideoFrames( onlyIFrames=onlyIFrames, verbose=False )
self.videoCbk = None
self.videoCbkResults = None
self.battery = None
self.flyingState = None
self.flatTrimCompleted = False
self.manualControl = False
self.time = None
self.moveByEnd = None
self.altitude = None
self.angle = (0,0,0)
self.position = (0,0,0)
self.speed = (0,0,0)
self.positionGPS = None
self.cameraTilt = -90
self.cameraPan = 0
self.lastImageResult = None
self.navigateHomeState = None
self.config()
self.commandSender.start()
def _discovery( self ):
"start communication with the drone"
filename = "tmp.bin" # TODO combination outDir + date/time
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP
s.connect( (HOST, DISCOVERY_PORT) )
s.send( '{"controller_type":"computer", "controller_name":"katarina", "d2c_port":"43210"}' )
f = open( filename, "wb" )
while True:
data = s.recv(10240)
if len(data) > 0:
f.write(data)
f.flush()
break
f.close()
s.close()
def _update( self, cmd ):
"internal send command and return navdata"
if not self.manualControl:
self.manualControl = self.console()
if self.manualControl:
# raise exception only once
raise ManualControlException()
# send even None, to sync in/out queues
self.commandSender.send( cmd )
while len(self.buf) == 0:
data = self.navdata.recv(40960)
self.buf += data
data, self.buf = cutPacket( self.buf )
return data
def _parseData( self, data ):
try:
parseData( data, drone=self, verbose=False )
except AssertionError, e:
print "AssertionError", e
def update( self, cmd=None, ackRequest=False ):
"send command and return navdata"
if cmd is None:
data = self._update( None )
else:
data = self._update( packData(cmd, ackRequest=ackRequest) )
while True:
if ackRequired(data):
self._parseData( data )
data = self._update( createAckPacket(data) )
elif pongRequired(data):
self._parseData( data ) # update self.time
data = self._update( createPongPacket(data) )
elif videoAckRequired(data):
if self.videoCbk:
self.videoFrameProcessor.append( data )
frame = self.videoFrameProcessor.getFrameEx()
if frame:
self.videoCbk( frame, debug=self.metalog.replay )
if self.videoCbkResults:
ret = self.videoCbkResults()
if ret is not None:
print ret
self.lastImageResult = ret
data = self._update( createVideoAckPacket(data) )
else:
break
self._parseData( data )
return data
def setVideoCallback( self, cbk, cbkResult=None ):
"set cbk for collected H.264 encoded video frames & access to results queue"
self.videoCbk = cbk
if cbkResult is None:
self.videoCbkResults = None
else:
self.videoCbkResults = self.metalog.createLoggedInput( "cv2", cbkResult ).get
def config( self ):
# initial cfg
dt = self.metalog.now()
if dt: # for compatibility with older log files
self.update( cmd=setDateCmd( date=dt.date() ) )
self.update( cmd=setTimeCmd( time=dt.time() ) )
for cmd in setSpeedSettingsCmdList( maxVerticalSpeed=1.0, maxRotationSpeed=90.0,
hullProtection=True, outdoor=True ):
self.update( cmd=cmd )
self.update( cmd=requestAllStatesCmd() )
self.update( cmd=requestAllSettingsCmd() )
self.moveCamera( tilt=self.cameraTilt, pan=self.cameraPan )
self.update( videoAutorecordingCmd( enabled=False ) )
def takeoff( self ):
print "Taking off ...",
self.update( videoRecordingCmd( on=True ) )
self.update( cmd=takeoffCmd() )
prevState = None
for i in xrange(100):
self.update( cmd=None )
if self.flyingState != 1 and prevState == 1:
break
prevState = self.flyingState
print "FLYING"
def land2( self ):
print "Landing ..."
landing_speed = 75
self.update( videoRecordingCmd( on=False ) )
while self.altitude > 0.1 :
self.update( movePCMDCmd( True, 0, 0, 0, -landing_speed ) )
self.update( cmd=emergencyCmd() )
if(self.flyingState == 0):
print "LANDED"
def land( self ):
print "Landing ..."
landing_speed = 75
self.update( videoRecordingCmd( on=False ) )
while self.altitude > 0.1 :
self.update( movePCMDCmd( True, 0, 0, 0, -landing_speed ) )
while(self.flyingState != 0):
self.update( cmd=landCmd() )
print "LANDED"
def hover( self, timeout ):
startTime = self.time
count = 0
while(self.time-startTime<timeout):
self.update( cmd=movePCMDCmd( active=True, roll=0, pitch=0, yaw=0, gaz=0 ) )
count += 1
print count
def emergency( self ):
self.update( cmd=emergencyCmd() )
def trim( self ):
print "Trim:",
self.flatTrimCompleted = False
for i in xrange(10):
print i,
self.update( cmd=None )
print
self.update( cmd=trimCmd() )
for i in xrange(10):
print i,
self.update( cmd=None )
if self.flatTrimCompleted:
break
def takePicture( self ):
self.update( cmd=takePictureCmd() )
print 'picture taken at time ', self.time
def videoEnable( self ):
"enable video stream"
self.update( cmd=videoStreamingCmd( enable=True ), ackRequest=True )
def videoDisable( self ):
"enable video stream"
self.update( cmd=videoStreamingCmd( enable=False ), ackRequest=True )
def moveCamera( self, tilt, pan ):
"Tilt/Pan camera consign for the drone (in degrees)"
self.update( cmd=moveCameraCmd( tilt=tilt, pan=pan) )
self.cameraTilt, self.cameraPan = tilt, pan # maybe move this to parse data, drone should confirm that
def resetHome( self ):
self.update( cmd=resetHomeCmd() )
def stop( self, timeout=3.0 ):
print 'stopping the drone'
startTime = self.time
droneSpeed = self.speed[0]**2+self.speed[1]**2+self.speed[2]**2
while(self.time-startTime<timeout and droneSpeed>0.3):
self.update( movePCMDCmd( True, self.speed[1]*50, self.speed[0]*50, 0, -self.speed[2]*50 ) )
droneSpeed = self.speed[0]**2+self.speed[1]**2+self.speed[2]**2
print 'stopping position ', -self.position[1], -self.position[0], -self.position[2]
print 'droneSpeed ',droneSpeed
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
def moveX( self, dX, speed, timeout=3.0 ):
print 'moveX', dX
if(dX < 0):
speed = -speed
assert self.time is not None
startTime = self.time
startPos = -self.position[0]
while abs(self.position[0]+startPos) < abs(dX) and self.time-startTime < timeout:
self.update( movePCMDCmd( True, speed, 0, 0, 0 ) )
print 'position ', self.position[0], self.position[1], self.position[2]
print 'current speed x',-self.speed[0]
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
def moveY( self, dY, speed, timeout=3.0 ):
print 'moveY', dY
if(dY < 0):
speed = -speed
assert self.time is not None
startTime = self.time
startY = self.position[1]
while abs(self.position[1]-startY) < abs(dY) and self.time-startTime < timeout:
self.update( movePCMDCmd( True, 0, speed, 0, 0 ) )
currentY = self.position[1]
print 'current position y axis',currentY
print 'current speed y',self.speed[1]
print 'end position ', -self.position[0], self.position[1], -self.position[2]
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
def moveZ( self, altitude, timeout=5.0 ):
speed = 50 #in percentage
assert self.time is not None
assert self.altitude is not None
startTime = self.time
if self.altitude < altitude:#going up
while self.altitude < altitude and self.time-startTime < timeout and altitude>0:
self.update( movePCMDCmd( True, 0, 0, 0, speed ) )
# print 'going up ', self.altitude, self.time-startTime
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
return
else:
while self.altitude > altitude and self.time-startTime < timeout and altitude>0:
self.update( movePCMDCmd( True, 0, 0, 0, -speed ) )
#print 'going down ', self.altitude, self.time-startTime
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
return
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
def moveBy( self, dX, dY, timeout=5.0):
# outdated function.
# TODO: modify targetSpeed so it doesn't use updated values.
print 'move by ', dX, dY
startTime = self.time
startPosition = [0]*2
startPosition[0] = -self.position[1]
startPosition[1] = -self.position[0]
print 'starting position ',startPosition
targetPosition = [0]*2
currentSpeed = [0]*2
targetSpeed = [0]*2
inputSpeed = [0]*2
targetPosition[0] = startPosition[0]+dX
targetPosition[1] = startPosition[1]+dY
top_speed = 40
initial_distance = np.sqrt(abs(targetPosition[1]-startPosition[0])**2+abs(targetPosition[0]-startPosition[1])**2)
print 'tartgetPos x ', targetPosition[0], ' y ', targetPosition[1]
while(self.time-startTime<timeout):
distance = np.sqrt(abs(targetPosition[1]+self.position[0])**2+abs(targetPosition[0]+self.position[1])**2)
# print 'distance ',distance
if(distance<0.2):
print 'arrived', distance
break
if(distance>initial_distance+2):
print 'drone out of path', distance
break
targetSpeed[0] = targetPosition[0]+self.position[1]
targetSpeed[1] = targetPosition[1]+self.position[0]
targetSpeed[0] = targetSpeed[0]/np.sqrt(targetSpeed[0]**2+targetSpeed[1]**2)
targetSpeed[1] = targetSpeed[1]/np.sqrt(targetSpeed[0]**2+targetSpeed[1]**2)
#print 'targetSpeed x ',targetSpeed[0],' y ',targetSpeed[1]
currentSpeed[0] = -self.speed[1]/np.sqrt(self.speed[0]**2+self.speed[1]**2)
currentSpeed[1] = -self.speed[0]/np.sqrt(self.speed[0]**2+self.speed[1]**2)
#print 'currentSpeed x ',currentSpeed[0], ' y ',currentSpeed[1]
inputSpeed[0] = targetSpeed[0]-currentSpeed[0]
inputSpeed[1] = targetSpeed[1]-currentSpeed[1]
#print 'inputSpeed x ',inputSpeed[0],' y ',inputSpeed[1]
self.update( movePCMDCmd( True, inputSpeed[0]*top_speed, inputSpeed[1]*top_speed, 0, 0 ) )
self.update( cmd=movePCMDCmd( True, 0, 0, 0, 0 ) )
endPosition = self.position
print 'end position x ',-endPosition[1],' y ',-endPosition[0]
def calibrate( self, dX, dY, timeout=3.0 ):
startTime = self.time
rotation_speed = 75
print 'start angle= ',self.angle[2]
rotation = np.arctan2(dX,dY)
print 'rotation= ',rotation
if(rotation+self.angle[2]>math.pi):
rotateAngle = -2*math.pi+rotation+self.angle[2]
elif(rotation+self.angle[2]<-math.pi):
rotateAngle = 2*math.pi+rotation+self.angle[2]
else:
rotateAngle = self.angle[2]+rotation
if(rotation < 0):
print 'counterclockwise', rotateAngle
while abs(self.angle[2]-rotateAngle) > 0.1 and self.time-startTime < timeout:
self.update( movePCMDCmd( True, 0, 0, -rotation_speed, 0 ) )
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
print 'end angle= ',self.angle[2]
return
else:
print 'clockwise',rotateAngle
while abs(self.angle[2]-rotateAngle) > 0.1 and self.time-startTime < timeout:
self.update( movePCMDCmd( True, 0, 0, rotation_speed, 0 ) )
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
print 'end angle= ',self.angle[2]
return
def resetPosition( self, startAngle, timeout=3.0 ):
print 'reset angle...'
# self.moveZ(altitude)
rotation_speed = 75
assert self.time is not None
startTime = self.time
if abs(startAngle-self.angle[2])<0.1:
print 'already calibrated'
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
else:
if self.angle[2] < 0:
print 'calibrate clockwise'
while abs(self.angle[2]-startAngle) > 0.1 and self.time-startTime < timeout:
self.update( movePCMDCmd( True, 0, 0, rotation_speed, 0 ) )
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
print 'end angle= ',self.angle[2]
return
else:
print 'calibrate counterclockwise'
while abs(self.angle[2]-startAngle) > 0.1 and self.time-startTime < timeout:
self.update( movePCMDCmd( True, 0, 0, -rotation_speed, 0 ) )
self.update( movePCMDCmd( True, 0, 0, 0, 0 ) )
print 'end angle= ',self.angle[2]
return
def moveTo( self, X, Y, Z, timeout=8.0 ):
print 'move to ', X, Y, Z
update_count = 0
startTime = self.time
startPosition = [0]*3
currentSpeed_norm = 0
targetSpeed_norm = 0
startPosition[0] = -self.position[0]
startPosition[1] = self.position[1]
startPosition[2] = -self.position[2]
print 'starting position x ', startPosition[0], ' y ', startPosition[1], ' z ', startPosition[2]
targetPosition = [0]*3
currentSpeed = [0]*3
targetSpeed = [0]*3
inputSpeed = [0]*3
tempSpeed = [0]*3
targetPosition[0] = X
targetPosition[1] = Y
targetPosition[2] = Z
top_speed = 40
initial_distance = np.sqrt(abs(targetPosition[0]+startPosition[0])**2+ \
abs(targetPosition[1]-startPosition[1])**2+ \
abs(targetPosition[2]+startPosition[2])**2)
print 'initial distance', initial_distance
print 'tartgetPos x ', targetPosition[0], ' y ', targetPosition[1], ' z ', targetPosition[2]
while(self.time-startTime<timeout):
print 'current position x ',-self.position[0],' y ',self.position[1],' z ',-self.position[2]
# update_count = update_count+1
distance = np.sqrt(abs(targetPosition[0]+self.position[0])**2+ \
abs(targetPosition[1]-self.position[1])**2+ \
abs(targetPosition[2]+self.position[2])**2)
# print 'flight distance ',distance
# print 'time ',self.time
if(distance<0.1):
# self.moveCamera( tilt=-90, pan=0 )
# self.takePicture();
print 'arrived', distance
break
if(distance>initial_distance+1):
print 'drone out of path', distance
break
targetSpeed_X = targetPosition[0]+self.position[0]
targetSpeed_Y = targetPosition[1]-self.position[1]
targetSpeed_Z = targetPosition[2]+self.position[2]
targetSpeed_norm = np.sqrt(targetSpeed_X**2+targetSpeed_Y**2+targetSpeed_Z**2)
targetSpeed[0] = targetSpeed_X/targetSpeed_norm
targetSpeed[1] = targetSpeed_Y/targetSpeed_norm
targetSpeed[2] = targetSpeed_Z/targetSpeed_norm
print 'targetSpeed x ',targetSpeed[0],' y ',targetSpeed[1], ' z ', targetSpeed[2]
currentSpeed_norm = np.sqrt(self.speed[0]**2+self.speed[1]**2+self.speed[2]**2)
# if currentSpeed_norm == 0:
# currentSpeed_norm = 1
# print 'currentspeednorm', currentSpeed_norm
currentSpeed[0] = -self.speed[1]/currentSpeed_norm
currentSpeed[1] = self.speed[0]/currentSpeed_norm
currentSpeed[2] = -self.speed[2]/currentSpeed_norm
# print 'currentSpeed x ',currentSpeed[0], ' y ',currentSpeed[1], ' z ', currentSpeed[2]
tempSpeed[0] = (targetSpeed[0]-currentSpeed[0])
tempSpeed[1] = (targetSpeed[1]-currentSpeed[1])
tempSpeed[2] = (targetSpeed[2]-currentSpeed[2])
# print 'tempSpeed x ',tempSpeed[0],' y ',tempSpeed[1], ' z ', tempSpeed[2]
inputSpeed_norm = np.sqrt(tempSpeed[0]**2+tempSpeed[1]**2+tempSpeed[2]**2)
inputSpeed[0] = tempSpeed[0]/inputSpeed_norm
inputSpeed[1] = tempSpeed[1]/inputSpeed_norm
inputSpeed[2] = tempSpeed[2]/inputSpeed_norm
# print 'inputSpeed x ',inputSpeed[0]*top_speed,' y ',inputSpeed[1]*top_speed, ' z ', inputSpeed[2]*top_speed
self.update( movePCMDCmd( True, inputSpeed[0]*top_speed, inputSpeed[1]*top_speed, 0, inputSpeed[2]*top_speed ) )
self.update( cmd=movePCMDCmd( True, 0, 0, 0, 0 ) )
endPosition = self.position
# print 'update count ', update_count
print 'end position x ',-endPosition[0],' y ',endPosition[1],' z ',-endPosition[2]
def moveTo2( self, X, Y, Z, timeout=5.0 ):
print 'move to2 ', X, Y, Z
startTime = self.time
startPosition = [0]*3
currentSpeed_norm = 0
targetSpeed_norm = 0
startPosition[0] = -self.position[1]
startPosition[1] = -self.position[0]
startPosition[2] = -self.position[2]
print 'starting position x ', startPosition[0], ' y ', startPosition[1], ' z ', startPosition[2]
targetPosition = [0]*3
targetSpeed = [0]*3
inputSpeed = [0]*3
tempSpeed = [0]*3
targetPosition[0] = X
targetPosition[1] = Y
targetPosition[2] = Z
top_speed = 50
initial_distance = np.sqrt(abs(targetPosition[1]-startPosition[0])**2+ \
abs(targetPosition[0]-startPosition[1])**2+ \
abs(targetPosition[2]-startPosition[2])**2)
print 'tartgetPos x ', targetPosition[0], ' y ', targetPosition[1], ' z ', targetPosition[2]
while(self.time-startTime<timeout):
distance = np.sqrt(abs(targetPosition[1]+self.position[0])**2+ \
abs(targetPosition[0]+self.position[1])**2+ \
abs(targetPosition[2]+self.position[2])**2)
print 'flight distance ',distance
# print 'time ',self.time
if(distance<0.2):
# self.moveCamera( tilt=-90, pan=0 )
# self.takePicture();
print 'arrived', distance
break
if(distance>initial_distance+2):
print 'drone out of path', distance
break
targetSpeed_X = targetPosition[0]+self.position[1]
targetSpeed_Y = targetPosition[1]+self.position[0]
targetSpeed_Z = targetPosition[2]+self.position[2]
targetSpeed_norm = np.sqrt(targetSpeed_X**2+targetSpeed_Y**2+targetSpeed_Z**2)
targetSpeed[0] = targetSpeed_X/targetSpeed_norm
targetSpeed[1] = targetSpeed_Y/targetSpeed_norm
targetSpeed[2] = targetSpeed_Z/targetSpeed_norm
# print 'targetSpeed x ',targetSpeed[0],' y ',targetSpeed[1], ' z ', targetSpeed[2]
self.update( movePCMDCmd( True, targetSpeed[0]*top_speed, targetSpeed[1]*top_speed, 0, targetSpeed[2]*top_speed ) )
self.update( cmd=movePCMDCmd( True, 0, 0, 0, 0 ) )
endPosition = self.position
print 'end position x ',-endPosition[1],' y ',-endPosition[0],' z ',-endPosition[2]
def moveToCancel( self ):
self.update( cmd=cancelMoveToCmd() )
def wait( self, duration ):
print "Wait", duration
assert self.time is not None
startTime = self.time
while self.time-startTime < duration:
self.update()
if __name__ == "__main__":
if len(sys.argv) < 2:
print __doc__
sys.exit(2)
metalog=None
if len(sys.argv) > 2:
metalog = MetaLog( filename=sys.argv[2] )
if len(sys.argv) > 3 and sys.argv[3] == 'F':
disableAsserts()
drone = Bebop( metalog=metalog )
print "Battery:", drone.battery
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A template task with a ball that should touch left or right wall."""
import numpy as np
import phyre.creator as creator_lib
@creator_lib.define_task_template(
ball_x=np.linspace(0.1, 0.9, 32),
ball_y=np.linspace(0, 40, 8),
ball_r=np.linspace(0.05, 0.12, 5),
left=[True, False],
version='6',
)
def build_task(C, ball_x, ball_y, ball_r, left):
target_wall = C.add('static bar', 1.0, left=0, angle=90, bottom=0)
if not left:
target_wall.set_right(C.scene.width)
shelf_size = 0.99 - ball_r * 2
shelf = C.add('static bar', shelf_size, center_x=C.scene.width / 2, top=20)
C.add('static bar', 0.2, angle=65, right=shelf.left + 5, top=shelf.top)
C.add('static bar', 0.2, angle=-65, left=shelf.right - 5, top=shelf.top)
ball = C.add(
'dynamic ball',
ball_r,
left=ball_x * C.scene.width,
bottom=ball_y + shelf.top)
if ball.center_x <= shelf.left or ball.center_x >= shelf.right:
raise creator_lib.SkipTemplateParams
if abs(ball.center_x - target_wall.center_x) > C.scene.width * .7:
raise creator_lib.SkipTemplateParams
C.update_task(
body1=ball,
body2=target_wall,
relationships=[C.SpatialRelationship.TOUCHING])
C.set_meta(C.SolutionTier.BALL)
|
nilq/baby-python
|
python
|
import numpy as np
import csv
import json
import argparse
import pickle
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--method", help="vote Method to learn from")
ap.add_argument("-r", "--row", help="data row joined by comma")
ap.add_argument("-f", "--filename", help="filename of dataset")
args = vars(ap.parse_args())
models = pickle.loads(open(args["method"]+".pkl").read())
def read_csv(filename):
dataset = []
with open(filename, 'rb') as f:
reader = csv.reader(f)
for row in reader:
dataset.append([float(el) for el in row])
return dataset
if args["row"] is not None:
predictions = []
for m in models:
prediction = float(m.predict([float(el) for el in args["row"].split(',')]))
if prediction > 0.5:
prediction = 1
else:
prediction = 0
predictions.append(prediction)
print (sum(predictions)/float(len(predictions)))
elif args["filename"] is not None:
dataset = read_csv(args["filename"])
all_predictions = []
for m in models:
all_predictions.append(m.predict(dataset))
final_predictions = []
for prediction_row in np.array(all_predictions).transpose():
predicted = []
for el in prediction_row:
if el > 0.5:
predicted.append(1)
else:
predicted.append(0)
final_predictions.append(sum(predicted)/float(len(predicted)))
print(json.dumps(final_predictions))
|
nilq/baby-python
|
python
|
# Copyright (C) Izumi Kawashima
#
# json2oscimv4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# json2oscimv4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with json2oscimv4. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
import json
import TileData_v4_pb2
from pyproj import Transformer,Proj
EPSG3857 = Proj('+init=EPSG:3857')
EPSG4326 = Proj('+init=EPSG:4326')
transformer3857to4326 = Transformer.from_proj(EPSG3857,EPSG4326)
transformer4326to3857 = Transformer.from_proj(EPSG4326,EPSG3857)
SIZE = 256
SCALE_FACTOR = 20037508.342789244
HEIGHT_PER_METER = 100.0
YES_VALUES = frozenset(['yes','1','true'])
NAME_KEYS = frozenset(['name','name:ja','name:en'])
TAG_PREDEFINED_KEYS = [
"access",
"addr:housename",
"addr:housenumber",
"addr:interpolation",
"admin_level",
"aerialway",
"aeroway",
"amenity",
"area",
"barrier",
"bicycle",
"brand",
"bridge",
"boundary",
"building",
"construction",
"covered",
"culvert",
"cutting",
"denomination",
"disused",
"embankment",
"foot",
"generator:source",
"harbour",
"highway",
"historic",
"horse",
"intermittent",
"junction",
"landuse",
"layer",
"leisure",
"lock",
"man_made",
"military",
"motorcar",
"name",
"natural",
"oneway",
"operator",
"population",
"power",
"power_source",
"place",
"railway",
"ref",
"religion",
"route",
"service",
"shop",
"sport",
"surface",
"toll",
"tourism",
"tower:type",
"tracktype",
"tunnel",
"water",
"waterway",
"wetland",
"width",
"wood",
"height",
"min_height",
"roof:shape",
"roof:height",
"rank"
]
TAG_PREDEFINED_VALUES = [
"yes",
"residential",
"service",
"unclassified",
"stream",
"track",
"water",
"footway",
"tertiary",
"private",
"tree",
"path",
"forest",
"secondary",
"house",
"no",
"asphalt",
"wood",
"grass",
"paved",
"primary",
"unpaved",
"bus_stop",
"parking",
"parking_aisle",
"rail",
"driveway",
"8",
"administrative",
"locality",
"turning_circle",
"crossing",
"village",
"fence",
"grade2",
"coastline",
"grade3",
"farmland",
"hamlet",
"hut",
"meadow",
"wetland",
"cycleway",
"river",
"school",
"trunk",
"gravel",
"place_of_worship",
"farm",
"grade1",
"traffic_signals",
"wall",
"garage",
"gate",
"motorway",
"living_street",
"pitch",
"grade4",
"industrial",
"road",
"ground",
"scrub",
"motorway_link",
"steps",
"ditch",
"swimming_pool",
"grade5",
"park",
"apartments",
"restaurant",
"designated",
"bench",
"survey_point",
"pedestrian",
"hedge",
"reservoir",
"riverbank",
"alley",
"farmyard",
"peak",
"level_crossing",
"roof",
"dirt",
"drain",
"garages",
"entrance",
"street_lamp",
"deciduous",
"fuel",
"trunk_link",
"information",
"playground",
"supermarket",
"primary_link",
"concrete",
"mixed",
"permissive",
"orchard",
"grave_yard",
"canal",
"garden",
"spur",
"paving_stones",
"rock",
"bollard",
"convenience",
"cemetery",
"post_box",
"commercial",
"pier",
"bank",
"hotel",
"cliff",
"retail",
"construction",
"-1",
"fast_food",
"coniferous",
"cafe",
"6",
"kindergarten",
"tower",
"hospital",
"yard",
"sand",
"public_building",
"cobblestone",
"destination",
"island",
"abandoned",
"vineyard",
"recycling",
"agricultural",
"isolated_dwelling",
"pharmacy",
"post_office",
"motorway_junction",
"pub",
"allotments",
"dam",
"secondary_link",
"lift_gate",
"siding",
"stop",
"main",
"farm_auxiliary",
"quarry",
"10",
"station",
"platform",
"taxiway",
"limited",
"sports_centre",
"cutline",
"detached",
"storage_tank",
"basin",
"bicycle_parking",
"telephone",
"terrace",
"town",
"suburb",
"bus",
"compacted",
"toilets",
"heath",
"works",
"tram",
"beach",
"culvert",
"fire_station",
"recreation_ground",
"bakery",
"police",
"atm",
"clothes",
"tertiary_link",
"waste_basket",
"attraction",
"viewpoint",
"bicycle",
"church",
"shelter",
"drinking_water",
"marsh",
"picnic_site",
"hairdresser",
"bridleway",
"retaining_wall",
"buffer_stop",
"nature_reserve",
"village_green",
"university",
"1",
"bar",
"townhall",
"mini_roundabout",
"camp_site",
"aerodrome",
"stile",
"9",
"car_repair",
"parking_space",
"library",
"pipeline",
"true",
"cycle_barrier",
"4",
"museum",
"spring",
"hunting_stand",
"disused",
"car",
"tram_stop",
"land",
"fountain",
"hiking",
"manufacture",
"vending_machine",
"kiosk",
"swamp",
"unknown",
"7",
"islet",
"shed",
"switch",
"rapids",
"office",
"bay",
"proposed",
"common",
"weir",
"grassland",
"customers",
"social_facility",
"hangar",
"doctors",
"stadium",
"give_way",
"greenhouse",
"guest_house",
"viaduct",
"doityourself",
"runway",
"bus_station",
"water_tower",
"golf_course",
"conservation",
"block",
"college",
"wastewater_plant",
"subway",
"halt",
"forestry",
"florist",
"butcher"
]
def heightstr2float(_height):
if type(_height) is str:
if _height[-1] == 'm':
_height = _height[:-1]
_height = _height.strip()
return _height
predefined_key_idx = {}
predefined_value_idx = {}
for i in range(len(TAG_PREDEFINED_KEYS)):
predefined_key = TAG_PREDEFINED_KEYS[i]
predefined_key_idx[predefined_key] = i
for i in range(len(TAG_PREDEFINED_VALUES)):
predefined_value = TAG_PREDEFINED_VALUES[i]
predefined_value_idx[predefined_value] = i
def convert(tile_z,tile_x,tile_y,buffer_pixels,fr):
paz = 20037508.342789244 / 256 / (2 ** tile_z)
tile_x = tile_x*SIZE
tile_y = tile_y*SIZE
center = (SIZE << tile_z) >> 1
min_lat3857 = ((center - (tile_y+SIZE+paz))/center)*SCALE_FACTOR
max_lat3857 = ((center - (tile_y-paz))/center)*SCALE_FACTOR
min_lon3857 = (((tile_x-paz)-center)/center)*SCALE_FACTOR
max_lon3857 = (((tile_x+SIZE+paz)-center)/center)*SCALE_FACTOR
min_lon4326,min_lat4326 = transformer3857to4326.transform(min_lon3857,min_lat3857)
max_lon4326,max_lat4326 = transformer3857to4326.transform(max_lon3857,max_lat3857)
oscim_tile = TileData_v4_pb2.Data()
oscim_tile.version = 4
found_points = []
found_polygons = []
found_lines = []
appending_target = None
tag2idx = {}
serialized_tags = []
oscim_keys = []
key2oscim_idx = {}
oscim_values = []
value2oscim_idx = {}
def ll2xy(lon,lat):
lon3857,lat3857 = transformer4326to3857.transform(lon,lat)
rx = float(lon3857-min_lon3857)/float(max_lon3857-min_lon3857)
ry = float(lat3857-min_lat3857)/float(max_lat3857-min_lat3857)
rx = rx-0.5
rx = rx*float(SIZE+buffer_pixels)/float(SIZE)
rx = rx+0.5
x = int(rx*4096.0)
ry = ry-0.5
ry = -ry*float(SIZE+buffer_pixels)/float(SIZE) # NEGATE!
ry = ry+0.5
y = int(ry*4096.0)
return x,y
def lls2xy(lls):
abs_xys = []
for ll in lls:
lon = ll[0]
lat = ll[1]
x,y = ll2xy(lon,lat)
abs_xys.append([x,y])
last_x = 0
last_y = 0
delta_xys = []
for x,y in abs_xys:
dx = x-last_x
dy = y-last_y
if dx != 0 or dy != 0:
delta_xys.append([dx,dy])
last_x = x
last_y = y
return delta_xys
def llss2xy(llss):
abs_xyss = []
for lls in llss:
abs_xys = []
for ll in lls:
lon = ll[0]
lat = ll[1]
x,y = ll2xy(lon,lat)
abs_xys.append([x,y])
abs_xys = abs_xys[:-1]
abs_xyss.append(abs_xys)
last_x = 0
last_y = 0
delta_xyss = []
for abs_xys in abs_xyss:
delta_xys = []
for x,y in abs_xys:
dx = x-last_x
dy = y-last_y
if dx != 0 or dy != 0:
delta_xys.append([dx,dy])
last_x = x
last_y = y
delta_xyss.append(delta_xys)
return delta_xyss
j = json.loads(fr)
layers = j['features']
for layer in layers:
if layer['properties']['layer'] in frozenset(['admin_lines']): continue
features = layer['features']
for feature in features:
fixed_kv = {}
names_kv = {}
tag_idxs_in_feature = []
properties = feature['properties']
if 'min_zoom' in properties:
min_zoom = int(properties['min_zoom'])
if tile_z > min_zoom: continue
kv = {}
if layer['properties']['layer'] == 'land':
fixed_kv['land'] = 'land'
else:
for key in properties:
if key in frozenset(['id','sort_rank','source','surface']): continue
value = properties[key]
kv[key] = value
for key in kv.keys():
value = kv[key]
if key in NAME_KEYS:
names_kv[key] = value
if 'oneway' in kv and str(kv['oneway']).lower() in YES_VALUES:
fixed_kv['oneway'] = 'yes'
if 'area' in kv and str(kv['area']).lower() in YES_VALUES:
fixed_kv['area'] = 'yes'
elif 'tunnel' in kv and str(kv['tunnel']).lower() in YES_VALUES:
fixed_kv['tunnel'] = 'yes'
elif 'bridge' in kv and str(kv['bridge']).lower() in YES_VALUES:
fixed_kv['bridge'] = 'yes'
if 'leisure' in kv:
leisure = kv['leisure']
fixed_kv['leisure'] = leisure
if 'natural' in kv:
natural = kv['natural']
if natural in frozenset(['village_green','meadow','wood']):
fixed_kv['landuse'] = natural
elif natural == 'mountain_range': pass
else:
fixed_kv['natural'] = natural
if 'landuse' in kv:
landuse = kv['landuse']
if landuse in frozenset(['park','natural_reserve']):
fixed_kv['leisure'] = landuse
elif landuse == 'field':
fixed_kv['landuse'] = 'farmland'
elif landuse in frozenset(['grassland','scrub']):
fixed_kv['natural'] = landuse
else:
fixed_kv['landuse'] = landuse
for explicit_kind in frozenset(['waterway']):
if explicit_kind in kv: fixed_kv[explicit_kind] = kv[explicit_kind]
if 'type' in kv:
type_ = kv['type']
property_layer = layer['properties']['layer']
if property_layer in frozenset(['buildings','building:part']):
fixed_kv['building'] = 'yes'
fixed_kv['type'] = 'yes'
if 'id' in kv:
fixed_kv['id'] = kv['id']
if tile_z > 16:
if 'height' in kv:
_height = float(heightstr2float(kv['height']))*HEIGHT_PER_METER
fixed_kv['height'] = str(_height)
elif 'building:levels' in kv:
fixed_kv['building:levels'] = str(kv['building:levels'])
if 'min_height' in kv:
_min_height = heightstr2float(kv['min_height'])*HEIGHT_PER_METER
fixed_kv['min_height'] = str(_min_height)
if 'colour' in kv: fixed_kv['colour'] = kv['colour']
elif type_ in frozenset([
'atm',
'bank',
'bar',
'bench',
'bicycle',
'bicycle_rental',
'books',
'bus_station',
'cafe',
'cinema',
'clothes',
'convenience',
'dry_cleaning',
'fast_food',
'fountain',
'grave_yard',
'hospital',
'library',
'parking',
'pharmacy',
'place_of_worship',
'police',
'post_box',
'post_office',
'pub',
'recycling',
'restaurant',
'school',
'shelter',
'supermarket',
'university',
'telephone',
'theatre',
'toilets'
]):
fixed_kv['amenity'] = type_
elif type_ == 'administrative':
fixed_kv['boundary'] = 'administrative'
admin_level = int(kv['admin_level'])
if admin_level == 2:
fixed_kv['place'] = 'country'
elif admin_level == 4:
fixed_kv['place'] = 'city'
elif admin_level == 7:
fixed_kv['place'] = 'village'
elif admin_level == 8:
fixed_kv['place'] = 'town'
elif type_ == 'place_of_worship':
fixed_kv['amenity'] = type_
elif type_ =='riverbank':
fixed_kv['natural'] = 'water'
if 'class' in kv:
class_value = kv['class']
if class_value in frozenset(['earth']):
fixed_kv['landuse'] = 'urban'
# WATER
elif class_value == 'natural':
if type_ == 'lake;pond':
fixed_kv['water'] = 'pond'
if type_ == 'beach':
fixed_kv['natural'] = 'beach'
elif type_ == 'river':
fixed_kv['waterway'] = 'river'
elif type_ in frozenset(['water','riverbank','ocean']):
fixed_kv['natural'] = 'water'
elif type_ == 'wood':
fixed_kv['landuse'] = 'wood'
# LEISURE
elif class_value == 'leisure':
if type_ in frozenset(['pitch','park','playground','common','garden']):
fixed_kv['leisure'] = type_
# LANDUSE
elif class_value == 'landuse':
if type_ in frozenset(['park','natural_reserve']):
fixed_kv['leisure'] = type_
elif type_ == 'field':
fixed_kv['landuse'] = 'farmland'
elif type_ in frozenset(['grassland','scrub','tree']):
fixed_kv['natural'] = type_
else:
fixed_kv['landuse'] = type_
# ROADS
elif class_value == 'highway':
if type_ == 'minor_road':
fixed_kv['highway'] = 'residential'
elif type_ == 'highway':
fixed_kv['highway'] = 'motorway'
elif type_ == 'residential':
fixed_kv['highway'] = 'service'
elif type_ == 'pedestrian':
fixed_kv['highway'] = 'footway'
else:
fixed_kv['highway'] = type_
# RAILS
elif class_value == 'railway':
if type_ in frozenset(['rail','subway','station','platform']):
fixed_kv['railway'] = type_
# AIR
elif class_value == 'aeroway':
if type_ in frozenset(['aerodrome','apron','helipad']):
fixed_kv['aeroway'] = type_
elif class_value in frozenset(['pitch','park','playground','common','garden']):
fixed_kv['leisure'] = class_value
elif class_value == 'tourism':
fixed_kv['tourism'] = type_
elif class_value in frozenset(['viewpoint','museum','information','park','theme_park','attraction']):
fixed_kv['tourism'] = class_value
elif class_value == 'office':
fixed_kv['office'] = type_
if len(fixed_kv) == 0: continue
merged_kv = {}
for key in names_kv: merged_kv[key] = names_kv[key]
for key in fixed_kv: merged_kv[key] = fixed_kv[key]
# print(layer['properties']['layer'],merged_kv)
for key in merged_kv.keys():
value = merged_kv[key]
if key in predefined_key_idx:
key_idx = predefined_key_idx[key]
else:
if key in key2oscim_idx:
key_idx = key2oscim_idx[key]
else:
key_idx = len(oscim_keys)
key2oscim_idx[key] = key_idx
oscim_keys.append(key)
key_idx = key_idx+256
if value in predefined_value_idx:
value_idx = predefined_value_idx[value]
else:
if value in value2oscim_idx:
value_idx = value2oscim_idx[value]
else:
value_idx = len(oscim_values)
value2oscim_idx[value] = value_idx
oscim_values.append(value)
value_idx = value_idx+256
tag = (key_idx,value_idx)
if tag in tag2idx:
tag_idx = tag2idx[tag]
else:
tag_idx = int(len(serialized_tags)/2)
tag2idx[tag] = tag_idx
serialized_tags.append(int(key_idx))
serialized_tags.append(int(value_idx))
tag_idxs_in_feature.append(int(tag_idx))
if len(tag_idxs_in_feature) == 0: continue
geometry = feature['geometry']
geometry_type = geometry['type']
c = geometry['coordinates']
if geometry_type == 'Point':
oscim_element = TileData_v4_pb2.Data.Element()
oscim_element.num_tags = len(tag_idxs_in_feature)
oscim_element.tags.extend(tag_idxs_in_feature)
x,y = ll2xy(c[0],c[1])
oscim_element.coordinates.extend([x,y])
oscim_element.indices.extend([1])
oscim_element.num_indices = 1
found_points.append(oscim_element)
elif geometry_type == 'MultiPoint':
for cp in c:
oscim_element = TileData_v4_pb2.Data.Element()
oscim_element.num_tags = len(tag_idxs_in_feature)
oscim_element.tags.extend(tag_idxs_in_feature)
x,y = ll2xy(cp[0],cp[1])
oscim_element.coordinates.extend([x,y])
oscim_element.num_indices = 0
oscim_element.indices.extend([1])
oscim_element.num_indices = 1
found_points.append(oscim_element)
elif geometry_type == 'LineString':
oscim_element = TileData_v4_pb2.Data.Element()
oscim_element.num_tags = len(tag_idxs_in_feature)
oscim_element.tags.extend(tag_idxs_in_feature)
delta_xys = lls2xy(c)
oscim_element.num_indices = 1
oscim_element.indices.extend([len(delta_xys)])
flat_xys = []
for x,y in delta_xys:
flat_xys.append(x)
flat_xys.append(y)
oscim_element.coordinates.extend(flat_xys)
found_lines.append(oscim_element)
elif geometry_type == 'MultiLineString':
for cp in c:
oscim_element = TileData_v4_pb2.Data.Element()
oscim_element.num_tags = len(tag_idxs_in_feature)
oscim_element.tags.extend(tag_idxs_in_feature)
delta_xys = lls2xy(cp)
oscim_element.indices.extend([len(delta_xys)])
oscim_element.num_indices = 1
flat_xys = []
for x,y in delta_xys:
flat_xys.append(x)
flat_xys.append(y)
oscim_element.coordinates.extend(flat_xys)
found_lines.append(oscim_element)
elif geometry_type == 'Polygon':
oscim_element = TileData_v4_pb2.Data.Element()
oscim_element.num_tags = len(tag_idxs_in_feature)
oscim_element.tags.extend(tag_idxs_in_feature)
delta_xyss = llss2xy(c)
indices = []
for delta_xys in delta_xyss:
indices.append(len(delta_xys))
oscim_element.indices.extend(indices)
oscim_element.num_indices = len(indices)
flat_xys = []
for delta_xys in delta_xyss:
for x,y in delta_xys:
flat_xys.append(x)
flat_xys.append(y)
oscim_element.coordinates.extend(flat_xys)
found_polygons.append(oscim_element)
elif geometry_type == 'MultiPolygon':
for cp in c:
oscim_element = TileData_v4_pb2.Data.Element()
oscim_element.num_tags = len(tag_idxs_in_feature)
oscim_element.tags.extend(tag_idxs_in_feature)
delta_xyss = llss2xy(cp)
indices = []
for delta_xys in delta_xyss:
indices.append(len(delta_xys))
oscim_element.indices.extend(indices)
oscim_element.num_indices = len(indices)
flat_xys = []
for delta_xys in delta_xyss:
for x,y in delta_xys:
flat_xys.append(x)
flat_xys.append(y)
oscim_element.coordinates.extend(flat_xys)
found_polygons.append(oscim_element)
if len(found_points) > 0: oscim_tile.points.extend(found_points)
if len(found_polygons) > 0: oscim_tile.polygons.extend(found_polygons)
if len(found_lines) > 0: oscim_tile.lines.extend(found_lines)
oscim_tile.num_tags = int(len(serialized_tags)/2)
oscim_tile.tags.extend(serialized_tags)
oscim_tile.keys.extend(oscim_keys)
oscim_tile.num_keys = len(oscim_keys)
oscim_tile.values.extend(oscim_values)
oscim_tile.num_vals = len(oscim_values)
return b'0000'+oscim_tile.SerializeToString() # TODO: Header bytes to be fixed (although it is readable from vtm)
|
nilq/baby-python
|
python
|
load(
"@bazel_tools//tools/jdk:toolchain_utils.bzl",
"find_java_toolchain",
)
load(
"@rules_scala_annex//rules:providers.bzl",
_ScalaConfiguration = "ScalaConfiguration",
_ZincConfiguration = "ZincConfiguration",
_ZincInfo = "ZincInfo",
)
load(
"@rules_scala_annex//rules/common:private/utils.bzl",
_resolve_execution_reqs = "resolve_execution_reqs",
)
#
# PHASE: compile
#
# Compiles Scala sources ;)
#
def phase_zinc_compile(ctx, g):
scala_configuration = ctx.attr.scala[_ScalaConfiguration]
zinc_configuration = ctx.attr.scala[_ZincConfiguration]
apis = ctx.actions.declare_file("{}/apis.gz".format(ctx.label.name))
infos = ctx.actions.declare_file("{}/infos.gz".format(ctx.label.name))
mains_file = ctx.actions.declare_file("{}.jar.mains.txt".format(ctx.label.name))
relations = ctx.actions.declare_file("{}/relations.gz".format(ctx.label.name))
setup = ctx.actions.declare_file("{}/setup.gz".format(ctx.label.name))
stamps = ctx.actions.declare_file("{}/stamps.gz".format(ctx.label.name))
used = ctx.actions.declare_file("{}/deps_used.txt".format(ctx.label.name))
tmp = ctx.actions.declare_directory("{}/tmp".format(ctx.label.name))
javacopts = [
ctx.expand_location(option, ctx.attr.data)
for option in ctx.attr.javacopts + java_common.default_javac_opts(
java_toolchain = find_java_toolchain(ctx, ctx.attr._java_toolchain),
)
]
zincs = [dep[_ZincInfo] for dep in ctx.attr.deps if _ZincInfo in dep]
args = ctx.actions.args()
args.add_all(depset(transitive = [zinc.deps for zinc in zincs]), map_each = _compile_analysis)
args.add("--compiler_bridge", zinc_configuration.compiler_bridge)
args.add_all("--compiler_classpath", g.classpaths.compiler)
args.add_all("--classpath", g.classpaths.compile)
args.add_all(scala_configuration.global_scalacopts, format_each = "--compiler_option=%s")
args.add_all(ctx.attr.scalacopts, format_each = "--compiler_option=%s")
args.add_all(javacopts, format_each = "--java_compiler_option=%s")
args.add(ctx.label, format = "--label=%s")
args.add("--main_manifest", mains_file)
args.add("--output_apis", apis)
args.add("--output_infos", infos)
args.add("--output_jar", g.classpaths.jar)
args.add("--output_relations", relations)
args.add("--output_setup", setup)
args.add("--output_stamps", stamps)
args.add("--output_used", used)
args.add_all("--plugins", g.classpaths.plugin)
args.add_all("--source_jars", g.classpaths.src_jars)
args.add("--tmp", tmp.path)
args.add("--log_level", zinc_configuration.log_level)
args.add_all("--", g.classpaths.srcs)
args.set_param_file_format("multiline")
args.use_param_file("@%s", use_always = True)
worker = zinc_configuration.compile_worker
worker_inputs, _, input_manifests = ctx.resolve_command(tools = [worker])
inputs = depset(
[zinc_configuration.compiler_bridge] + ctx.files.data + ctx.files.srcs + worker_inputs,
transitive = [
g.classpaths.plugin,
g.classpaths.compile,
g.classpaths.compiler,
] + [zinc.deps_files for zinc in zincs],
)
outputs = [g.classpaths.jar, mains_file, apis, infos, relations, setup, stamps, used, tmp]
# todo: different execution path for nosrc jar?
ctx.actions.run(
mnemonic = "ScalaCompile",
inputs = inputs,
outputs = outputs,
executable = worker.files_to_run.executable,
input_manifests = input_manifests,
execution_requirements = _resolve_execution_reqs(ctx, {"no-sandbox": "1", "supports-workers": "1"}),
use_default_shell_env = True,
arguments = [args],
)
jars = []
for jar in g.javainfo.java_info.outputs.jars:
jars.append(jar.class_jar)
jars.append(jar.ijar)
zinc_info = _ZincInfo(
apis = apis,
deps_files = depset([apis, relations], transitive = [zinc.deps_files for zinc in zincs]),
label = ctx.label,
relations = relations,
deps = depset(
[struct(
apis = apis,
jars = tuple(jars),
label = ctx.label,
relations = relations,
)],
transitive = [zinc.deps for zinc in zincs],
),
)
g.out.providers.append(zinc_info)
return struct(
mains_file = mains_file,
used = used,
# todo: see about cleaning up & generalizing fields below
zinc_info = zinc_info,
)
def _compile_analysis(analysis):
return [
"--analysis",
"_{}".format(analysis.label),
analysis.apis.path,
analysis.relations.path,
] + [jar.path for jar in analysis.jars]
|
nilq/baby-python
|
python
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import functools
import numpy as np
import timeit
import json
from operator_benchmark import benchmark_utils
"""Performance microbenchmarks.
This module contains core functionalities for performance microbenchmark tests.
"""
# List of run modes we support.
# Each benchmark test case is associated with a run mode.
# If the value of the test case's run mode is less than the value of the
# benchmark binary's run mode, the test case will be executed, e.g. a short-mode
# test case will be executed when the binary is on either long and short
# modes; while a long-mode test case will only be executed when the binary is
# on long-mode.
RUN_MODES = {'short': 0, 'long': 1}
BENCHMARK_TESTER = [{} for _ in range(len(RUN_MODES))]
BENCHMARK_TEST_GROUP = {}
def add_benchmark_tester(framework, op_name, input_shapes, op_args, run_mode, func):
func_name = "__".join([framework, op_name, benchmark_utils.shape_to_string(input_shapes)
, str(op_args), run_mode])
run_mode = RUN_MODES[run_mode]
for mode in RUN_MODES.values():
# short mode runs with some of the input shapes for an op
# long mode runs with all the input shapes for an op
if (mode < run_mode):
continue
BENCHMARK_TESTER[mode][func_name] = func
def register_test(func):
"""Decorator to register a benchmark test group.
A benchmark test group is a function that returns a list of benchmark test
case objects to be run.
"""
BENCHMARK_TEST_GROUP[__name__ + "." + func.__name__] = func
return func
HEADER_LINE = """
# {}
# PyTorch/Caffe2 Operator Micro-benchmarks
# {}
# Run_mode : {}
"""
class BenchmarkRunner(object):
"""BenchmarkRunner is responsible for benchmarking all the registered
benchmark test groups.
Attributes:
run_mode (str): Must of one of 'short', 'long'. For long mode, the
benchmark runner takes a longer time to run since it repeats each benchmark
test case more times to reduce measured variance, and it also executes
longer running test cases that is marked as long mode.
operator (str): Only run benchmark test cases that contains
this filter string in the test case's id.
"""
def __init__(self, args):
# Depend on the run mode, set the execution contrains based of number of
# runs per measure, and number of measures.
# TODO: consider time-bound constraints as well.
self.args = args
self.iters = 100
self.has_explicit_iteration_count = False
self.multiplier = 2
self.min_time = 0.8
self.max_iters = 1e6
for test_group in BENCHMARK_TEST_GROUP.items():
test_group_func = test_group[1]
test_group_func()
if self.args.iterations:
self.has_explicit_iteration_count = True
self.iters = self.args.iterations
def _print_header(self, run_mode):
DASH_LINE = '-' * 40
print(HEADER_LINE.format(DASH_LINE, DASH_LINE, self.args.run_mode, self.iters))
print("# List of Operators to run:")
if self.args.operator is None:
ops = set()
for tester in BENCHMARK_TESTER[run_mode].items():
full_test_id = tester[0]
framework, op_name, input_shapes, args, run_mode = full_test_id.split("__")
if op_name not in ops:
print("# {}".format(op_name))
ops.add(op_name)
else:
print("# {}".format(self.args.operator))
print("\n")
def _print_perf_result(self, full_test_id, input_shapes, args, reported_run_time):
if self.args.ai_pep_format:
# Output for AI-PEP
print("Caffe2Observer " + json.dumps(
{
"type": "NET",
"metric": full_test_id,
"unit": "ms",
"value": str(reported_run_time),
}
))
else:
print("# Input Shape: {}\n"
"Execution Time (us) : {:.3f} \n"
.format(input_shapes, reported_run_time))
def _predict_num_iter_needed(self, i):
return (i * self.multiplier)
def _report_iteration_result(self, iters, run_time):
return (iters > self.max_iters or
run_time > 5 * self.min_time)
def run(self):
run_mode = RUN_MODES[self.args.run_mode]
self._print_header(run_mode)
if self.args.list_tests:
return
for tester in BENCHMARK_TESTER[run_mode].items():
full_test_id = tester[0]
benchmark_func = tester[1]
framework, op_name, input_shapes, args, run_mode = full_test_id.split("__")
# TODO: consider regex matching for test filtering.
# Currently, this is a sub-string matching.
if self.args.operator and (self.args.operator not in full_test_id):
continue
if self.args.framework and (self.args.framework not in full_test_id):
continue
# To reduce variance, fix a numpy randseed to the test case,
# so that the randomly generated input tensors remain the
# same for each test case.
# The random seed is limited to 32-bit because of numpy
# requirement.
np.random.seed(seed=hash(full_test_id) & ((1 << 32) - 1))
print("# Benchmarking {} {}".format(
framework,
op_name))
# Warmup
functools.partial(benchmark_func, self.args.warmup_iterations)
# Actual Execution
run_time = 0
iters = self.iters
while True:
# Use Python's timeit module to measure execution time (unit: second).
# Each experiment consists of repeated execution of
# the benchmark_func a number of times (self.iters)
# because otherwise the duration is too short to get
# an accurate measure. The benchmark loop is pushed
# to C++ to minimize Python overhead.
# The experiment is also repeated a number of times
# (num_repeats) and we then take the minimum execution
# time as the final measurement result (this is also
# recommended by timeit's doc).
run_time = min(timeit.repeat(functools.partial(benchmark_func, iters),
repeat=1, number=1))
# Analyze time after each run to decide if the result is stable
results_are_significant = self.has_explicit_iteration_count or \
self._report_iteration_result(iters, run_time)
if results_are_significant:
break
# Re-estimate the hopefully-sufficient
# iteration count, and run the benchmark again...
iters = self._predict_num_iter_needed(iters)
reported_run_time = (1e6 * run_time / iters)
self._print_perf_result(full_test_id, input_shapes, args, reported_run_time)
|
nilq/baby-python
|
python
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
"""Tests for tink.python.tink.jwt._verified_jwt."""
import datetime
from absl.testing import absltest
from tink import jwt
ISSUED_AT = datetime.datetime.fromtimestamp(1582230020, datetime.timezone.utc)
NOT_BEFORE = datetime.datetime.fromtimestamp(1893553445, datetime.timezone.utc)
EXPIRATION = datetime.datetime.fromtimestamp(2218027244, datetime.timezone.utc)
class VerifiedJwtTest(absltest.TestCase):
def test_empty(self):
token = jwt.VerifiedJwt._create(jwt.new_raw_jwt())
with self.assertRaises(KeyError):
token.issuer()
with self.assertRaises(KeyError):
token.subject()
with self.assertRaises(KeyError):
token.jwt_id()
with self.assertRaises(KeyError):
token.audiences()
with self.assertRaises(KeyError):
token.expiration()
with self.assertRaises(KeyError):
token.issued_at()
with self.assertRaises(KeyError):
token.not_before()
with self.assertRaises(KeyError):
token.custom_claim('unknown')
self.assertFalse(token.has_issuer())
self.assertFalse(token.has_subject())
self.assertFalse(token.has_jwt_id())
self.assertFalse(token.has_audiences())
self.assertFalse(token.has_expiration())
self.assertFalse(token.has_issued_at())
self.assertFalse(token.has_not_before())
def test_full(self):
token = jwt.VerifiedJwt._create(
jwt.new_raw_jwt(
issuer='Issuer',
subject='Subject',
jwt_id='JWT ID',
audiences=['bob', 'eve'],
expiration=EXPIRATION,
issued_at=ISSUED_AT,
not_before=NOT_BEFORE))
self.assertTrue(token.has_issuer())
self.assertEqual(token.issuer(), 'Issuer')
self.assertTrue(token.has_subject())
self.assertEqual(token.subject(), 'Subject')
self.assertTrue(token.has_jwt_id())
self.assertEqual(token.jwt_id(), 'JWT ID')
self.assertTrue(token.has_audiences())
self.assertEqual(token.audiences(), ['bob', 'eve'])
self.assertTrue(token.has_expiration())
self.assertEqual(token.expiration(), EXPIRATION)
self.assertTrue(token.has_issued_at())
self.assertEqual(token.issued_at(), ISSUED_AT)
self.assertTrue(token.has_not_before())
self.assertEqual(token.not_before(), NOT_BEFORE)
def test_custom_claims(self):
custom_claims = {'string': 'value',
'boolean': True,
'number': 123.456,
'integer': 123,
'null': None,
'array': [1, None, 'Bob', 2.2, {'foo': 'bar'}],
'object': {'one': {'two': 3}}}
token = token = jwt.VerifiedJwt._create(
jwt.new_raw_jwt(custom_claims=custom_claims))
self.assertCountEqual(
token.custom_claim_names(),
{'string', 'boolean', 'number', 'integer', 'null', 'array', 'object'})
self.assertEqual(token.custom_claim('string'), 'value')
self.assertEqual(token.custom_claim('boolean'), True)
self.assertEqual(token.custom_claim('number'), 123.456)
self.assertEqual(token.custom_claim('integer'), 123)
self.assertIsNone(token.custom_claim('null'))
self.assertEqual(
token.custom_claim('array'),
[1, None, 'Bob', 2.2, {'foo': 'bar'}])
self.assertEqual(token.custom_claim('object'), {'one': {'two': 3}})
if __name__ == '__main__':
absltest.main()
|
nilq/baby-python
|
python
|
import abc
import numpy as np
class ErrFunc(abc.ABC):
"""Base class for classification model error functions."""
@abc.abstractmethod
def apply(self, prediction, y):
"""Apply the nonconformity function.
Parameters
----------
prediction : numpy array of shape [n_samples, n_classes]
Class probability estimates for each sample.
y : numpy array of shape [n_samples]
True output labels of each sample.
Returns
-------
nc : numpy array of shape [n_samples]
Nonconformity scores of the samples.
"""
pass
class BaseModelNC:
"""Base class for nonconformity scorers based on an underlying model.
Parameters
----------
model : Underlying pretrained model
err_func : ClassificationErrFunc or RegressionErrFunc
Error function object.
normalizer : Normalizer
Normalization model.
beta : float
Normalization smoothing parameter. As the beta-value increases,
the normalized nonconformity function approaches a non-normalized
equivalent.
"""
def __init__(self, model, err_func, normalizer=None, beta=0):
super().__init__()
self.err_func = err_func
self.model = model
self.normalizer = normalizer
self.beta = beta
# If we use sklearn.base.clone (e.g., during cross-validation),
# object references get jumbled, so we need to make sure that the
# normalizer has a reference to the proper model adapter, if applicable.
if self.normalizer is not None and hasattr(self.normalizer, "base_model"):
self.normalizer.base_model = self.model
self.clean = False
def fit(self, x, y):
"""Fits the underlying model of the nonconformity scorer.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for fitting the underlying model.
y : numpy array of shape [n_samples]
Outputs of examples for fitting the underlying model.
Returns
-------
None
"""
if self.normalizer is not None and not self.normalizer.is_fitted:
self.normalizer.fit(x, y)
self.clean = False
def score(self, x, y=None):
"""Calculates the nonconformity score of a set of samples.
Parameters
----------
x : numpy array of shape [n_samples, n_features]
Inputs of examples for which to calculate a nonconformity score.
y : numpy array of shape [n_samples]
Outputs of examples for which to calculate a nonconformity score.
Returns
-------
nc : numpy array of shape [n_samples]
Nonconformity scores of samples.
"""
prediction = self.model.predict(x)
n_test = x.shape[0]
if self.normalizer is not None:
norm = self.normalizer.score(x) + self.beta
else:
norm = np.ones(n_test)
return self.err_func.apply(prediction, y) / norm
|
nilq/baby-python
|
python
|
import bpy
from math import pi
# ----------------------------------------------------------------------------------------
# Start - copied functions
# ----------------------------------------------------------------------------------------
def sun_light(
location, rotation, power=2.5, angle=135, name="Light_sun",
):
"""Sun light.
Args:
power (float, optional): [description]. Defaults to 2.5.
angle (int, optional): [description]. Defaults to 135.
name (str, optional): [description]. Defaults to "Light_sun".
location ([type], optional): [description]. Defaults to light_location.
rotation ([type], optional): [description]. Defaults to light_rotation.
Returns:
[type]: Light object
"""
light_data = bpy.data.lights.new(name, type="SUN")
light = bpy.data.objects.new(name, light_data)
bpy.context.collection.objects.link(light)
light.location = location
light.rotation_euler = rotation
light.data.energy = power
light.data.specular_factor = 0.4
light.data.angle = angle * pi / 180.0
return light
def make_material_Principled_BSDF(name, color_RGB):
"""Create a Pincipled BSDF material.
Args:
name (str): Name to give new material
color_RGB (3-element tuple or list of floats): RGB color (each element is in range of 0.0 to 1.0))
Returns:
[type]: [description]
"""
mat = bpy.data.materials.new(name=name)
mat.use_nodes = True
mat_nodes = mat.node_tree.nodes
# Set Principled BSDF values
mat_nodes["Principled BSDF"].inputs["Metallic"].default_value = 0.0
mat_nodes["Principled BSDF"].inputs["Roughness"].default_value = 0.4
mat_nodes["Principled BSDF"].inputs["Base Color"].default_value = (
*color_RGB,
1.0,
)
# Change material settings for blend method, show backface, shadow mode
mat.blend_method = "BLEND"
mat.show_transparent_back = False
mat.shadow_method = "NONE"
return mat
def make_cube(name, size, position):
"""Make cube with specified size and position. Ensure that the
bottom of the cube is at the specified z position.
Args:
name (str): Name for new cube
size (3-element tuple or list of floats): desired size of cube
position (3-element tuple or list of floats): position of center bottom of cube
"""
z_layer_size = size[2]
z_position = position[2]
bpy.ops.mesh.primitive_cube_add(size=1)
layer = bpy.context.object
layer.scale = size
layer.location = (position[0], position[1], z_layer_size / 2 + z_position)
layer.name = name
return layer
# ----------------------------------------------------------------------------------------
# New functions
# ----------------------------------------------------------------------------------------
def attach_to_parent(child_obj, parent_obj):
"""Make parent_obj the parent of child_obj.
Args:
child_obj (Blender object): Object to be the child
parent_obj (Blender object): Object to be the parent
"""
child_obj.parent = parent_obj
# Prevent parent transforms from being applied to child object
child_obj.matrix_parent_inverse = parent_obj.matrix_world.inverted()
def duplicate_object(obj, invert_y_position=True, parent_relationship=True):
"""Duplicate an object, move the duplicate in relation to the original,
and make the original the parent object of the duplicate.
Args:
obj (Blender object): Object to duplicate
invert_y_position (Boolean, optional): Whether to invert the y-position of
the duplicated object. Defaults to True.
parent (Boolean, optional): Make the original object the parent
of the duplicate.
"""
bpy.ops.object.duplicate() # linked=True)
obj_dupl = bpy.context.selected_objects[0]
if invert_y_position:
position = obj.location
obj_dupl.location = (position[0], -position[1], position[2])
if parent_relationship:
attach_to_parent(obj_dupl, obj)
def make_bulk_layer(name, layer_size, color_RGB, z_position=0.0, parent=None):
"""Create a 3D print bulk layer.
Args:
name (str): Name to give the object.
layer_size (3-element tuple or list of floats or ints): x,y,z size
color_RGB (3-element tuple or list of floats): RGB color with each value
in range 0.0-1.0.
z_position (float, optional): Where to place object in z direction.
Defaults to 0.0.
parent (Blender object, optional): Use this object as parent for new layer.
Defaults to None.
Returns:
Blender object: Newly created layer.
"""
position = (0.0, 0.0, z_position)
layer = make_cube(name, layer_size, position)
mat = make_material_Principled_BSDF(f"{name}_mat", color_RGB)
layer.data.materials.append(mat)
if parent:
attach_to_parent(layer, parent)
return layer
def make_channel_layer(
name, layer_size, channel_width, color_RGB, z_position=0.0, parent=None
):
"""Create a 3D print channel layer.
Args:
name (str): Name to give the object.
layer_size (3-element tuple or list of floats or ints): x,y,z size
channel_width (float or int): Width of channel.
color_RGB (3-element tuple or list of floats): RGB color with each value
in range 0.0-1.0.
z_position (float, optional): Where to place object in z direction.
Defaults to 0.0.
parent (Blender object, optional): Use this object as parent for new layer.
Defaults to None.
Returns:
Blender object: Newly created layer.
"""
lx, ly, lz = layer_size
c = channel_width
size = (lx, ly / 2.0 - c / 2.0, lz)
position = (0.0, -((ly / 2.0 - c / 2.0) / 2.0 + c / 2.0), z_position)
layer_chan = make_cube(name, size, position)
mat = make_material_Principled_BSDF(f"{name}_mat", color_RGB)
layer_chan.data.materials.append(mat)
duplicate_object(layer_chan)
if parent:
attach_to_parent(layer_chan, parent)
return layer_chan
def make_channelfill_layer(
name, layer_size, channel_width, color_RGB, z_position=0.0, parent=None
):
"""Create a 3D print layer that consists of filled channel.
Args:
name (str): Name to give the object.
layer_size (3-element tuple or list of floats or ints): x,y,z size
channel_width (float or int): Width of channel.
color_RGB (3-element tuple or list of floats): RGB color with each value
in range 0.0-1.0.
z_position (float, optional): Where to place object in z direction.
Defaults to 0.0.
parent (Blender object, optional): Use this object as parent for new layer.
Defaults to None.
Returns:
Blender object: Newly created layer.
"""
lx, ly, lz = layer_size
c = channel_width
size = (lx, c, lz)
position = (0.0, 0.0, z_position)
layer_chan = make_cube(name, size, position)
mat = make_material_Principled_BSDF(f"{name}_mat", color_RGB)
layer_chan.data.materials.append(mat)
if parent:
attach_to_parent(layer_chan, parent)
return layer_chan
def make_channel_eroded_layer(
name, layer_size, channel_width, edge_width, color_RGB, z_position=0.0, parent=None
):
"""Create a 3D print eroded channel layer.
Args:
name (str): Name to give the object.
layer_size (3-element tuple or list of floats or ints): x,y,z size
channel_width (float or int): Width of channel.
edge_width (float or int): Width of edge on each side of channel.
color_RGB (3-element tuple or list of floats): RGB color with each value
in range 0.0-1.0.
z_position (float, optional): Where to place object in z direction.
Defaults to 0.0.
parent (Blender object, optional): Use this object as parent for new layer.
Defaults to None.
Returns:
Blender object: Newly created layer.
"""
return make_channel_layer(
name,
layer_size,
channel_width + 2 * edge_width,
color_RGB,
z_position=z_position,
parent=parent,
)
def make_channel_edge_layer(
name, layer_size, channel_width, edge_width, color_RGB, z_position=0.0, parent=None
):
"""Create a 3D print layer with two edge objects at each side of a channel.
Args:
name (str): Name to give the object.
layer_size (3-element tuple or list of floats or ints): x,y,z size
channel_width (float or int): Width of channel.
edge_width (float or int): Width of edge on each side of channel.
color_RGB (3-element tuple or list of floats): RGB color with each value
in range 0.0-1.0.
z_position (float, optional): Where to place object in z direction.
Defaults to 0.0.
parent (Blender object, optional): Use this object as parent for new layer.
Defaults to None.
Returns:
Blender object: Newly created layer.
"""
lx, ly, lz = layer_size
c = channel_width
e = edge_width
# Make edges
size = (lx, e, lz)
position = (0, -(c / 2.0 + e / 2.0), z_position)
edge_name = f"{name}_edge"
layer_edge = make_cube(edge_name, size, position)
mat = make_material_Principled_BSDF(f"{edge_name}_mat", color_RGB)
layer_edge.data.materials.append(mat)
duplicate_object(layer_edge)
if parent:
attach_to_parent(layer_edge, parent)
return layer_edge
# ----------------------------------------------------------------------------------------
# Main code
# ----------------------------------------------------------------------------------------
# Lights
light_location = (8.1524, 2.0110, 11.808)
light_rotation = [pi * 37.3 / 180, pi * 3.16 / 180, pi * 107 / 180]
light_sun = sun_light(location=light_location, rotation=light_rotation)
xy_layer_size = 10
z_layer_size = 0.5
channel_width = 3
edge_width = 1
color_RGB_bulk = (1, 0.71, 0.2)
color_RGB_channel = (0.1, 0.4, 0.7)
color_RGB_edge = (0.7, 0.1, 0.4)
color_RGB_eroded = (0.4, 0.7, 0.1)
color_RGB_channelfill = (0.4, 0.1, 0.2)
size = (xy_layer_size, xy_layer_size, z_layer_size)
position = (0, 0, 0)
# Bulk layer
z = 0 * z_layer_size
layer = make_bulk_layer("First Layer", size, color_RGB_bulk, z)
# Channel layer
z = 1 * z_layer_size
layer_chan = make_channel_layer(
"Chan01", size, channel_width, color_RGB_channel, z, parent=layer
)
# Channel with edge dose
z = 2 * z_layer_size
layer_edge = make_channel_edge_layer(
"Edge01", size, channel_width, edge_width, color_RGB_edge, z, parent=layer
)
layer_eroded = make_channel_eroded_layer(
"Eroded01", size, channel_width, edge_width, color_RGB_eroded, z, parent=layer
)
# Roof layer
z = 3 * z_layer_size
layer_chanfill = make_channelfill_layer(
"ChanFill01", size, channel_width, color_RGB_channelfill, z, parent=layer,
)
layer_chan = make_channel_layer(
"Chan02", size, channel_width, color_RGB_channel, z, parent=layer
)
# Bulk layer
z = 4 * z_layer_size
layer_top = make_bulk_layer("Top Layer", size, color_RGB_bulk, z, parent=layer)
|
nilq/baby-python
|
python
|
#!/bin/python
#python
import sys
import os
import shutil
import numpy
import time
import math
import threading
from scipy import ndimage
#appion
from appionlib import appionScript
from appionlib import apStack
from appionlib import apDisplay
from appionlib import appiondata
from appionlib import apEMAN
from appionlib import apFile
from appionlib import apRecon
from appionlib import apChimera
from appionlib import apProject
from appionlib import spyder
from appionlib.apTilt import apTiltPair
from appionlib.apSpider import operations, backproject
from pyami import mem, mrc
class rctVolumeScript(appionScript.AppionScript):
#=====================
def onInit(self):
self.rotmirrorcache = {}
self.fscresolution = None
self.rmeasureresolution = None
#=====================
def setupParserOptions(self):
self.parser.set_usage("Usage: %prog --cluster-id=ID --tilt-stack=# --classnums=#,#,# [options]")
### strings
self.parser.add_option("--classnums", dest="classnums", type="str",
help="Class numbers to use for rct volume, e.g. 0,1,2", metavar="#")
### integers
self.parser.add_option("--tilt-stack", dest="tiltstackid", type="int",
help="Tilted Stack ID", metavar="#")
self.parser.add_option("--cluster-id", dest="clusterid", type="int",
help="clustering stack id", metavar="ID")
self.parser.add_option("--align-id", dest="alignid", type="int",
help="alignment stack id", metavar="ID")
self.parser.add_option("--num-iters", dest="numiters", type="int", default=4,
help="Number of tilted image shift refinement iterations", metavar="#")
self.parser.add_option("--mask-rad", dest="radius", type="int",
help="Particle mask radius (in pixels)", metavar="ID")
self.parser.add_option("--tilt-bin", dest="tiltbin", type="int", default=1,
help="Binning of the tilted image", metavar="ID")
self.parser.add_option("--num-part", dest="numpart", type="int",
help="Limit number of particles, for debugging", metavar="#")
self.parser.add_option("--median", dest="median", type="int", default=3,
help="Median filter", metavar="#")
### floats
self.parser.add_option("--lowpassvol", dest="lowpassvol", type="float", default=10.0,
help="Low pass volume filter (in Angstroms)", metavar="#")
self.parser.add_option("--highpasspart", dest="highpasspart", type="float", default=600.0,
help="High pass particle filter (in Angstroms)", metavar="#")
self.parser.add_option("--lowpasspart", dest="lowpasspart", type="float",
help="Low pass particle filter (in Angstroms)", metavar="#")
self.parser.add_option("--min-score", "--min-spread", dest="minscore", type="float",
help="Minimum score/spread/cross-correlation for particles", metavar="#")
self.parser.add_option("--contour", dest="contour", type="float", default=3.0,
help="Chimera snapshot contour", metavar="#")
self.parser.add_option("--zoom", dest="zoom", type="float", default=1.1,
help="Chimera snapshot zoom", metavar="#")
self.parser.add_option("--mass", dest="mass", type="float",
help="Use mass in kDa to set Chimera snapshot contour", metavar="#")
### true/false
self.parser.add_option("--no-eotest", dest="eotest", default=True,
action="store_false", help="Do not perform eotest for resolution")
self.parser.add_option("--eotest", dest="eotest", default=True,
action="store_true", help="Perform eotest for resolution")
self.parser.add_option("--skip-chimera", dest="skipchimera", default=False,
action="store_true", help="Skip chimera imaging")
### choices
self.mirrormodes = ( "all", "yes", "no" )
self.parser.add_option("--mirror", dest="mirror",
help="Mirror mode", metavar="MODE",
type="choice", choices=self.mirrormodes, default="all" )
#=====================
def checkConflicts(self):
### parse class list
if self.params['classnums'] is None:
apDisplay.printError("class number was not defined")
rawclasslist = self.params['classnums'].split(",")
self.classlist = []
for cnum in rawclasslist:
try:
self.classlist.append(int(cnum))
except:
apDisplay.printError("could not parse: "+cnum)
### check for missing and duplicate entries
if self.params['mass'] is not None:
self.params['contour'] = 1.0
apDisplay.printMsg("Using scale by mass method")
### check for missing and duplicate entries
if self.params['alignid'] is None and self.params['clusterid'] is None:
apDisplay.printError("Please provide either --cluster-id or --align-id")
if self.params['alignid'] is not None and self.params['clusterid'] is not None:
apDisplay.printError("Please provide only one of either --cluster-id or --align-id")
### get the stack ID from the other IDs
if self.params['alignid'] is not None:
self.alignstackdata = appiondata.ApAlignStackData.direct_query(self.params['alignid'])
self.params['notstackid'] = self.alignstackdata['stack'].dbid
elif self.params['clusterid'] is not None:
self.clusterstackdata = appiondata.ApClusteringStackData.direct_query(self.params['clusterid'])
self.alignstackdata = self.clusterstackdata['clusterrun']['alignstack']
self.params['notstackid'] = self.alignstackdata['stack'].dbid
### check and make sure we got the stack id
if self.params['notstackid'] is None:
apDisplay.printError("untilted stackid was not found")
if self.params['tiltstackid'] is None:
apDisplay.printError("tilt stack ID was not defined")
if self.params['radius'] is None:
apDisplay.printError("particle mask radius was not defined")
if self.params['description'] is None:
apDisplay.printError("enter a description")
boxsize = self.getBoxSize()
if self.params['radius']*2 > boxsize-2:
apDisplay.printError("particle radius is too big for stack boxsize")
#=====================
def setRunDir(self):
stackdata = apStack.getOnlyStackData(self.params['tiltstackid'], msg=False)
path = stackdata['path']['path']
uppath = os.path.dirname(os.path.dirname(os.path.abspath(path)))
classliststr = operations.intListToString(self.classlist)
self.params['rundir'] = os.path.join(uppath, "rctvolume",
self.params['runname'] )
#=====================
def getParticleInPlaneRotation(self, tiltstackpartdata):
partid = tiltstackpartdata.dbid
if partid in self.rotmirrorcache:
### use cached value
return self.rotmirrorcache[partid]
partnum = tiltstackpartdata['particleNumber']
notstackpartdata = apTiltPair.getStackParticleTiltPair(self.params['tiltstackid'],
partnum, self.params['notstackid'])
alignpartq = appiondata.ApAlignParticleData()
alignpartq['stackpart'] = notstackpartdata
alignpartq['alignstack'] = self.alignstackdata
alignpartdatas = alignpartq.query()
if not alignpartdatas or len(alignpartdatas) != 1:
apDisplay.printError("could not get inplane rotation for particle %d"%(tiltstackpartdata['particleNumber']))
inplane = alignpartdatas[0]['rotation']
mirror = alignpartdatas[0]['mirror']
if alignpartdatas[0]['alignstack']['alignrun']['maxlikerun'] is not None:
# maxlike does mirror then rotation, and its rotation is negative relative to spider
# April 6, 2009: rotation is negative independent of mirror
inplane = -1.0*inplane
self.rotmirrorcache[partid] = (inplane, mirror)
return inplane, mirror
#=====================
def getBoxSize(self):
boxsize = apStack.getStackBoxsize(self.params['tiltstackid'])
if self.params['tiltbin'] == 1:
return boxsize
newbox = int( math.floor( boxsize / float(self.params['tiltbin']) / 2.0)* 2.0 )
return newbox
#=====================
def convertStackToSpider(self, emanstackfile):
"""
takes the stack file and creates a spider file ready for processing
"""
if not os.path.isfile(emanstackfile):
apDisplay.printError("stackfile does not exist: "+emanstackfile)
tempstack = os.path.join(self.params['rundir'], "filter"+self.timestamp+".hed")
### first high pass filter particles
apDisplay.printMsg("pre-filtering particles")
apix = apStack.getStackPixelSizeFromStackId(self.params['tiltstackid'])
boxsize = self.getBoxSize()
emancmd = ("proc2d "+emanstackfile+" "+tempstack
+" apix="+str(apix)+" ")
if self.params['highpasspart'] is not None and self.params['highpasspart'] > 0:
emancmd += "hp="+str(self.params['highpasspart'])+" "
if self.params['lowpasspart'] is not None and self.params['lowpasspart'] > 0:
emancmd += "lp="+str(self.params['lowpasspart'])+" "
if self.params['tiltbin'] > 1:
clipsize = boxsize*self.params['tiltbin']
emancmd += " shrink=%d clip=%d,%d "%(self.params['tiltbin'], clipsize, clipsize)
apEMAN.executeEmanCmd(emancmd, verbose=True)
### convert imagic stack to spider
emancmd = "proc2d "
emancmd += tempstack+" "
spiderstack = os.path.join(self.params['rundir'], "rctstack"+self.timestamp+".spi")
apFile.removeFile(spiderstack, warn=True)
emancmd += spiderstack+" "
emancmd += "spiderswap edgenorm"
starttime = time.time()
apDisplay.printColor("Running spider stack conversion this can take a while", "cyan")
apEMAN.executeEmanCmd(emancmd, verbose=True)
time.sleep(1) # wait a sec, for things to finish
apDisplay.printColor("finished eman in "+apDisplay.timeString(time.time()-starttime), "cyan")
apFile.removeStack(tempstack, warn=False)
apFile.removeStack(emanstackfile, warn=False)
if not os.path.isfile(spiderstack):
apDisplay.printError("Failed to create a spider stack")
return spiderstack
#=====================
def sortTiltParticlesData(self, a, b):
if a['particleNumber'] > b['particleNumber']:
return 1
return -1
#=====================
def insertRctRun(self, volfile):
### setup resolutions
fscresq = appiondata.ApResolutionData()
fscresq['type'] = "fsc"
fscresq['half'] = self.fscresolution
fscresq['fscfile'] = "fscdata"+self.timestamp+".fsc"
rmeasureq = appiondata.ApResolutionData()
rmeasureq['type'] = "rmeasure"
rmeasureq['half'] = self.rmeasureresolution
rmeasureq['fscfile'] = None
### insert rct run data
rctrunq = appiondata.ApRctRunData()
rctrunq['runname'] = self.params['runname']
classliststr = operations.intListToString(self.classlist)
rctrunq['classnums'] = classliststr
rctrunq['numiter'] = self.params['numiters']
rctrunq['maskrad'] = self.params['radius']
rctrunq['lowpassvol'] = self.params['lowpassvol']
rctrunq['highpasspart'] = self.params['highpasspart']
rctrunq['lowpasspart'] = self.params['lowpasspart']
rctrunq['median'] = self.params['median']
rctrunq['description'] = self.params['description']
rctrunq['path'] = appiondata.ApPathData(path=os.path.abspath(self.params['rundir']))
rctrunq['alignstack'] = self.alignstackdata
rctrunq['tiltstack'] = apStack.getOnlyStackData(self.params['tiltstackid'])
rctrunq['numpart'] = self.numpart
rctrunq['fsc_resolution'] = fscresq
rctrunq['rmeasure_resolution'] = rmeasureq
if self.params['commit'] is True:
rctrunq.insert()
### insert 3d volume density
densq = appiondata.Ap3dDensityData()
densq['rctrun'] = rctrunq
densq['path'] = appiondata.ApPathData(path=os.path.dirname(os.path.abspath(volfile)))
densq['name'] = os.path.basename(volfile)
densq['hidden'] = False
densq['norm'] = True
densq['symmetry'] = appiondata.ApSymmetryData.direct_query(25)
densq['pixelsize'] = apStack.getStackPixelSizeFromStackId(self.params['tiltstackid'])*self.params['tiltbin']
densq['boxsize'] = self.getBoxSize()
densq['lowpass'] = self.params['lowpassvol']
densq['highpass'] = self.params['highpasspart']
densq['mask'] = self.params['radius']
#densq['iterid'] = self.params['numiters']
densq['description'] = self.params['description']
densq['resolution'] = self.fscresolution
densq['rmeasure'] = self.rmeasureresolution
densq['session'] = apStack.getSessionDataFromStackId(self.params['tiltstackid'])
densq['md5sum'] = apFile.md5sumfile(volfile)
if self.params['commit'] is True:
densq.insert()
return
#=====================
def processVolume(self, spivolfile, iternum=0):
### set values
apix = apStack.getStackPixelSizeFromStackId(self.params['tiltstackid'])*self.params['tiltbin']
boxsize = self.getBoxSize()
rawspifile = os.path.join(self.params['rundir'], "rawvolume%s-%03d.spi"%(self.timestamp, iternum))
mrcvolfile = os.path.join(self.params['rundir'], "volume%s-%03d.mrc"%(self.timestamp, iternum))
lowpass = self.params['lowpassvol']
### copy original to raw file
shutil.copy(spivolfile, rawspifile)
### convert to mrc
emancmd = ("proc3d "+spivolfile+" "+mrcvolfile+" norm=0,1 apix="+str(apix))
apEMAN.executeEmanCmd(emancmd, verbose=False)
### median filter
rawvol = mrc.read(mrcvolfile)
medvol = ndimage.median_filter(rawvol, size=self.params['median'])
mrc.write(medvol, mrcvolfile)
### low pass filter
emancmd = ("proc3d "+mrcvolfile+" "+mrcvolfile+" center norm=0,1 apix="
+str(apix)+" lp="+str(lowpass))
apEMAN.executeEmanCmd(emancmd, verbose=False)
### set origin
emancmd = "proc3d "+mrcvolfile+" "+mrcvolfile+" origin=0,0,0 "
apEMAN.executeEmanCmd(emancmd, verbose=False)
### mask volume
emancmd = "proc3d "+mrcvolfile+" "+mrcvolfile+" mask="+str(self.params['radius'])
apEMAN.executeEmanCmd(emancmd, verbose=False)
### convert to spider
apFile.removeFile(spivolfile)
emancmd = "proc3d "+mrcvolfile+" "+spivolfile+" spidersingle"
apEMAN.executeEmanCmd(emancmd, verbose=False)
### image with chimera
if self.params['skipchimera'] is False:
if self.params['mass'] is not None:
apDisplay.printMsg("Using scale by mass method")
apChimera.setVolumeMass(mrcvolfile, apix=apix, mass=self.params['mass'])
apChimera.renderSnapshots(mrcvolfile, self.params['contour'], self.params['zoom'], 'c1')
return mrcvolfile
#=====================
def makeEulerDoc(self, tiltParticlesData):
count = 0
eulerfile = os.path.join(self.params['rundir'], "eulersdoc"+self.timestamp+".spi")
eulerf = open(eulerfile, "w")
apDisplay.printMsg("Creating Euler angles doc file")
starttime = time.time()
tiltParticlesData.sort(self.sortTiltParticlesData)
startmem = mem.active()
for stackpartdata in tiltParticlesData:
count += 1
if count%50 == 0:
sys.stderr.write(".")
eulerf.flush()
memdiff = (mem.active()-startmem)/count/1024.0
if memdiff > 3:
apDisplay.printColor("Memory increase: %d MB/part"%(memdiff), "red")
tiltrot, theta, notrot, tiltangle = apTiltPair.getParticleTiltRotationAngles(stackpartdata)
if tiltrot is None:
apDisplay.printError("BAD particle "+str(stackpartdata))
inplane, mirror = self.getParticleInPlaneRotation(stackpartdata)
totrot = -1.0*(notrot + inplane)
if mirror is True:
#theta flips to the back
tiltangle = -1.0 * tiltangle + 180 #tiltangle = tiltangle + 180.0 #theta
totrot = -1.0 * totrot - 180.0 #phi
tiltrot = tiltrot + 180 #tiltrot = -1.0 * tiltrot + 180.0 #psi
while totrot < 0:
totrot += 360.0
### this is the original eman part num; count is new part num
partnum = stackpartdata['particleNumber']-1
line = operations.spiderOutLine(count, [tiltrot, tiltangle, totrot])
eulerf.write(line)
eulerf.close()
apDisplay.printColor("\nFinished Euler angle doc file in "+apDisplay.timeString(time.time()-starttime), "cyan")
memdiff = (mem.active()-startmem)/count/1024.0
if memdiff > 0.1:
apDisplay.printColor("Memory increase: %.2f MB/part"%(memdiff), "red")
return eulerfile
#=====================
def getGoodAlignParticles(self):
includeParticle = []
tiltParticlesData = []
nopairParticle = 0
excludeParticle = 0
badmirror = 0
badscore = 0
apDisplay.printMsg("Sorting particles from classes at "+time.asctime())
count = 0
startmem = mem.active()
t0 = time.time()
if self.params['clusterid'] is not None:
### method 1: get particles from clustering data
clusterpartq = appiondata.ApClusteringParticleData()
clusterpartq['clusterstack'] = appiondata.ApClusteringStackData.direct_query(self.params['clusterid'])
clusterpartdatas = clusterpartq.query()
apDisplay.printMsg("Sorting "+str(len(clusterpartdatas))+" clustered particles")
for clustpart in clusterpartdatas:
count += 1
if count%50 == 0:
sys.stderr.write(".")
memdiff = (mem.active()-startmem)/count/1024.0
if memdiff > 3:
apDisplay.printColor("Memory increase: %d MB/part"%(memdiff), "red")
#write to text file
clustnum = clustpart['refnum']-1
if self.params['minscore'] is not None:
if ( clustpart['alignparticle']['score'] is not None
and clustpart['alignparticle']['score'] < self.params['minscore'] ):
badscore += 1
continue
elif ( clustpart['alignparticle']['spread'] is not None
and clustpart['alignparticle']['spread'] < self.params['minscore'] ):
badscore += 1
continue
if clustnum in self.classlist:
notstackpartnum = clustpart['alignparticle']['stackpart']['particleNumber']
tiltstackpartdata = apTiltPair.getStackParticleTiltPair(self.params['notstackid'],
notstackpartnum, self.params['tiltstackid'])
if tiltstackpartdata is None:
nopairParticle += 1
continue
tiltrot, theta, notrot, tiltangle = apTiltPair.getParticleTiltRotationAngles(tiltstackpartdata)
if tiltrot is None:
apDisplay.printWarning("BAD particle "+str(tiltstackpartdata))
nopairParticle += 1
continue
else:
inplane, mirror = self.getParticleInPlaneRotation(tiltstackpartdata)
if ( self.params['mirror'] == "all"
or (self.params['mirror'] == "no" and mirror is False)
or (self.params['mirror'] == "yes" and mirror is True) ):
emantiltstackpartnum = tiltstackpartdata['particleNumber']-1
includeParticle.append(emantiltstackpartnum)
tiltParticlesData.append(tiltstackpartdata)
if self.params['numpart'] is not None and len(includeParticle) > self.params['numpart']:
break
else:
badmirror += 1
else:
excludeParticle += 1
else:
### method 2: get particles from alignment data
alignpartq = appiondata.ApAlignParticleData()
alignpartq['alignstack'] = self.alignstackdata
alignpartdatas = alignpartq.query()
apDisplay.printMsg("Sorting "+str(len(alignpartdatas))+" aligned particles")
for alignpart in alignpartdatas:
count += 1
if count%50 == 0:
sys.stderr.write(".")
memdiff = (mem.active()-startmem)/count/1024.0
if memdiff > 3:
apDisplay.printColor("Memory increase: %d MB/part"%(memdiff), "red")
#write to text file
alignnum = alignpart['ref']['refnum']-1
if ( self.params['minscore'] is not None
and alignpart['score'] is not None
and alignpart['score'] < self.params['minscore'] ):
badscore += 1
continue
if alignnum in self.classlist:
notstackpartnum = alignpart['stackpart']['particleNumber']
tiltstackpartdata = apTiltPair.getStackParticleTiltPair(self.params['notstackid'],
notstackpartnum, self.params['tiltstackid'])
if tiltstackpartdata is None:
nopairParticle += 1
else:
inplane, mirror = self.getParticleInPlaneRotation(tiltstackpartdata)
if ( self.params['mirror'] == "all"
or (self.params['mirror'] == "no" and mirror is False)
or (self.params['mirror'] == "yes" and mirror is True) ):
emantiltstackpartnum = tiltstackpartdata['particleNumber']-1
includeParticle.append(emantiltstackpartnum)
tiltParticlesData.append(tiltstackpartdata)
if self.params['numpart'] is not None and len(includeParticle) > self.params['numpart']:
break
else:
badmirror += 1
else:
excludeParticle += 1
### end methods
includeParticle.sort()
### messages
if time.time()-t0 > 1.0:
apDisplay.printMsg("\nSorting time: "+apDisplay.timeString(time.time()-t0))
apDisplay.printMsg("Keeping "+str(len(includeParticle))+" and excluding \n\t"
+str(excludeParticle)+" particles with "+str(nopairParticle)+" unpaired particles")
if badmirror > 0:
apDisplay.printMsg("Particles with bad mirrors: %d"%(badmirror))
if badscore > 0:
apDisplay.printColor("Particles with bad scores: %d"%(badscore), "cyan")
if len(includeParticle) < 1:
apDisplay.printError("No particles were kept")
memdiff = (mem.active()-startmem)/count/1024.0
if memdiff > 0.1:
apDisplay.printColor("Memory increase: %.2f MB/part"%(memdiff), "red")
return includeParticle, tiltParticlesData
#=====================
def mirrorParticles(self, partdatas, spiderstack):
partnum = 0
mySpider = spyder.SpiderSession(dataext=".spi", logo=False, log=False)
for stackpartdata in partdatas:
partnum += 1
inplane, mirror = self.getParticleInPlaneRotation(stackpartdata)
if mirror is True:
sys.stderr.write("m")
mySpider.toSpiderQuiet("MR",
spyder.fileFilter(spiderstack)+("@%05d"%(partnum)),
"_9",
"Y",
)
mySpider.toSpiderQuiet("CP",
"_9",
spyder.fileFilter(spiderstack)+("@%05d"%(partnum)),
)
else:
sys.stderr.write(".")
sys.stderr.write("\n")
mySpider.close()
#=====================
def runEoTest(self, alignstack, eulerfile):
evenvolfile = os.path.join(self.params['rundir'], "evenvolume%s.spi"%(self.timestamp))
oddvolfile = os.path.join(self.params['rundir'], "oddvolume%s.spi"%(self.timestamp))
eveneulerfile = os.path.join(self.params['rundir'], "eveneulers%s.spi"%(self.timestamp))
oddeulerfile = os.path.join(self.params['rundir'], "oddeulers%s.spi"%(self.timestamp))
evenpartlist = os.path.join(self.params['rundir'], "evenparts%s.lst"%(self.timestamp))
oddpartlist = os.path.join(self.params['rundir'], "oddparts%s.lst"%(self.timestamp))
### Create New Doc Files
of = open(oddeulerfile, "w")
ef = open(eveneulerfile, "w")
op = open(oddpartlist, "w")
ep = open(evenpartlist, "w")
inf = open(eulerfile, "r")
evenpart = 0
oddpart = 0
for line in inf:
spidict = operations.spiderInLine(line)
if spidict:
partnum = spidict['row']
if partnum % 2 == 0:
ep.write("%d\n"%(partnum-1))
evenpart += 1
outline = operations.spiderOutLine(evenpart, spidict['floatlist'])
ef.write(outline)
elif partnum % 2 == 1:
op.write("%d\n"%(partnum-1))
oddpart += 1
outline = operations.spiderOutLine(oddpart, spidict['floatlist'])
of.write(outline)
inf.close()
of.close()
ef.close()
op.close()
ep.close()
### Create stacks
evenstack = os.path.join(self.params['rundir'], "evenstack%s.spi"%(self.timestamp))
emancmd = "proc2d %s %s list=%s spiderswap"%(alignstack,evenstack,evenpartlist)
apEMAN.executeEmanCmd(emancmd, verbose=True, showcmd=True)
oddstack = os.path.join(self.params['rundir'], "oddstack%s.spi"%(self.timestamp))
emancmd = "proc2d %s %s list=%s spiderswap"%(alignstack,oddstack,oddpartlist)
apEMAN.executeEmanCmd(emancmd, verbose=True, showcmd=True)
### Create Volumes
backproject.backproject3F(evenstack, eveneulerfile, evenvolfile, evenpart)
backproject.backproject3F(oddstack, oddeulerfile, oddvolfile, oddpart)
if not os.path.isfile(evenvolfile) or not os.path.isfile(oddvolfile):
apDisplay.printError("Even-Odd volume creation failed")
### Calculate FSC
apix = apStack.getStackPixelSizeFromStackId(self.params['tiltstackid'])*self.params['tiltbin']
emancmd = "proc3d %s %s"%(evenvolfile, evenvolfile+".mrc")
apEMAN.executeEmanCmd(emancmd, verbose=True, showcmd=True)
emancmd = "proc3d %s %s"%(oddvolfile, oddvolfile+".mrc")
apEMAN.executeEmanCmd(emancmd, verbose=True, showcmd=True)
fscfile = os.path.join(self.params['rundir'], "fscdata%s.fsc"%(self.timestamp))
emancmd = "proc3d %s %s fsc=%s"%(evenvolfile+".mrc", oddvolfile+".mrc", fscfile)
apEMAN.executeEmanCmd(emancmd, verbose=True, showcmd=True)
if not os.path.isfile(fscfile):
apDisplay.printError("Even-Odd fsc calculation failed")
boxsize = self.getBoxSize()
self.fscresolution = apRecon.getResolutionFromFSCFile(fscfile, boxsize, apix, msg=True)
apDisplay.printColor( ("Final FSC resolution: %.5f" % (self.fscresolution)), "cyan")
for fname in (evenvolfile, oddvolfile, evenstack, oddstack, eveneulerfile, oddeulerfile, evenpartlist, oddpartlist):
apFile.removeFile(fname)
#=====================
def runRmeasure(self):
finalrawvolfile = os.path.join(self.params['rundir'], "rawvolume%s-%03d.spi"%(self.timestamp, self.params['numiters']))
emancmd = "proc3d %s %s"%(finalrawvolfile, "rmeasure.mrc")
apEMAN.executeEmanCmd(emancmd, verbose=True, showcmd=True)
apix = apStack.getStackPixelSizeFromStackId(self.params['tiltstackid'])*self.params['tiltbin']
self.rmeasureresolution = apRecon.runRMeasure(apix, "rmeasure.mrc")
#apDisplay.printColor("Final Rmeasure resolution: "+str(self.rmeasureresolution), "cyan")
apFile.removeFile("rmeasure.mrc")
#=====================
def start(self):
### get stack data
notstackdata = apStack.getOnlyStackData(self.params['notstackid'])
tiltstackdata = apStack.getOnlyStackData(self.params['tiltstackid'])
### get good particle numbers
includeParticle, tiltParticlesData = self.getGoodAlignParticles()
self.numpart = len(includeParticle)
### make doc file of Euler angles
eulerfile = self.makeEulerDoc(tiltParticlesData)
### write kept particles to file
self.params['keepfile'] = os.path.join(self.params['rundir'], "keepfile"+self.timestamp+".lst")
apDisplay.printMsg("writing to keepfile "+self.params['keepfile'])
kf = open(self.params['keepfile'], "w")
for partnum in includeParticle:
kf.write(str(partnum)+"\n")
kf.close()
### make new stack of tilted particle from that run
tiltstackfile = os.path.join(tiltstackdata['path']['path'], tiltstackdata['name'])
rctstackfile = os.path.join(self.params['rundir'], "rctstack"+self.timestamp+".hed")
apFile.removeStack(rctstackfile, warn=False)
apStack.makeNewStack(tiltstackfile, rctstackfile, self.params['keepfile'])
spiderstack = self.convertStackToSpider(rctstackfile)
#self.mirrorParticles(tiltParticlesData, spiderstack)
### iterations over volume creation
### back project particles into filter volume
volfile = os.path.join(self.params['rundir'], "volume%s-%03d.spi"%(self.timestamp, 0))
backproject.backprojectCG(spiderstack, eulerfile, volfile,
numpart=self.numpart, pixrad=self.params['radius'])
alignstack = spiderstack
### center/convert the volume file
mrcvolfile = self.processVolume(volfile, 0)
for i in range(self.params['numiters']):
looptime = time.time()
iternum = i+1
apDisplay.printMsg("running backprojection iteration "+str(iternum))
### xy-shift particles to volume projections
alignstack = backproject.rctParticleShift(volfile, alignstack, eulerfile, iternum,
numpart=self.numpart, pixrad=self.params['radius'], timestamp=self.timestamp)
apFile.removeFile(volfile)
### back project particles into better volume
volfile = os.path.join(self.params['rundir'], "volume%s-%03d.spi"%(self.timestamp, iternum))
backproject.backproject3F(alignstack, eulerfile, volfile,
numpart=self.numpart)
### center/convert the volume file
mrcvolfile = self.processVolume(volfile, iternum)
apDisplay.printColor("finished volume refinement loop in "
+apDisplay.timeString(time.time()-looptime), "cyan")
### optimize Euler angles
#NOT IMPLEMENTED YET
### perform eotest
if self.params['eotest'] is True:
self.runEoTest(alignstack, eulerfile)
self.runRmeasure()
### insert volumes into DB
self.insertRctRun(mrcvolfile)
#apDisplay.printMsg("waiting for Chimera to finish")
#time.sleep(60)
#=====================
if __name__ == "__main__":
rctVolume = rctVolumeScript()
rctVolume.start()
rctVolume.close()
|
nilq/baby-python
|
python
|
# terrascript/provider/josenk/esxi.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:15:57 UTC)
import terrascript
class esxi(terrascript.Provider):
"""Terraform-provider-esxi plugin"""
__description__ = "Terraform-provider-esxi plugin"
__namespace__ = "josenk"
__name__ = "esxi"
__source__ = "https://github.com/josenk/terraform-provider-esxi"
__version__ = "1.8.3"
__published__ = "2021-08-29T02:45:18Z"
__tier__ = "community"
__all__ = ["esxi"]
|
nilq/baby-python
|
python
|
from .db import session_scope, Server
from datetime import datetime, timedelta
class Gateway:
def get_all_servers(self):
with session_scope() as session:
raw_servers = session.query(Server)
raw_servers_dict = []
if raw_servers:
for raw_server in raw_servers:
raw_dict = raw_server.__dict__
del raw_dict['_sa_instance_state']
raw_servers_dict.append(raw_dict)
return raw_servers_dict
def fill_db(self, servers):
with session_scope() as session:
for key in servers.keys():
server = session.query(Server).filter(Server.name == key).first()
if server:
server.number = servers[key]
else:
session.add(Server(name=key, number=servers[key], added=datetime.now()))
def get_analytics(self, hour=False, day=False, month=False):
if hour:
comparator = datetime.now() - timedelta(hours=1)
elif day:
comparator = datetime.now() - timedelta(days=1)
else:
comparator = datetime.now() - timedelta(days=30)
with session_scope() as session:
raw_servers = session.query(Server).filter(Server.added >= comparator).all()
raw_servers_dict = []
if raw_servers:
for raw_server in raw_servers:
raw_dict = raw_server.__dict__
del raw_dict['_sa_instance_state']
raw_servers_dict.append(raw_dict)
return raw_servers_dict
|
nilq/baby-python
|
python
|
"""Utility functions."""
from typing import typevar, List, Any
T = typevar('T')
def short_type(obj: object) -> str:
"""Return the last component of the type name of an object.
If obj is None, return 'nil'. For example, if obj is 1, return 'int'.
"""
if obj is None:
return 'nil'
t = str(type(obj))
return t.split('.')[-1].rstrip("'>")
def indent(s: str, n: int) -> str:
"""Indent all the lines in s (separated by Newlines) by n spaces."""
s = ' ' * n + s
s = s.replace('\n', '\n' + ' ' * n)
return s
def array_repr(a: List[T]) -> List[str]:
"""Return the items of an array converted to strings using Repr."""
aa = [] # type: List[str]
for x in a:
aa.append(repr(x))
return aa
def dump_tagged(nodes: List[Any], tag: str) -> str:
"""Convert an array into a pretty-printed multiline string representation.
The format is
tag(
item1..
itemN)
Individual items are formatted like this:
- arrays are flattened
- pairs (str : array) are converted recursively, so that str is the tag
- other items are converted to strings and indented
"""
a = [] # type: List[str]
if tag:
a.append(tag + '(')
for n in nodes:
if isinstance(n, list):
if n:
a.append(dump_tagged(n, None))
elif isinstance(n, tuple):
s = dump_tagged(n[1], n[0])
a.append(indent(s, 2))
elif n:
a.append(indent(str(n), 2))
if tag:
a[-1] += ')'
return '\n'.join(a)
|
nilq/baby-python
|
python
|
from abc import ABC
from abc import abstractmethod
class DatabaseQuery(ABC):
@staticmethod
def query_invoke(func):
def wrapper_method(self_instance, *args, **kwargs):
try:
self_instance.create_connection()
result = func(self_instance, *args, **kwargs)
return result
finally:
self_instance.close_connection()
return wrapper_method
@abstractmethod
def create_connection(self):
pass
@abstractmethod
def close_connection(self):
pass
@abstractmethod
def get_object_by_id(self, collection, object_id):
pass
@abstractmethod
def get_object(self, collection, key, value):
pass
@abstractmethod
def get_collection(self, collection):
pass
@abstractmethod
def get_all_item_in_collection(self, collection):
pass
@abstractmethod
def get_item_in_collection(self, collection, filter_query):
pass
@abstractmethod
def get_list_item_in_collection(self, collection, filter_query):
pass
@abstractmethod
def count_item_in_query(self, collection, filter_query):
pass
@abstractmethod
def close_client_connection(self):
pass
@abstractmethod
def close_server(self):
pass
|
nilq/baby-python
|
python
|
# coding: utf-8
import argparse
from argparse import RawDescriptionHelpFormatter
import sys
import os
sys.path.append('.\src')
sys.path.append('..\src')
from src.main_functions import *
updateInput='u'
fullupdateInput='fu'
downloadInput='d'
statusInput='s'
compressInput='c'
def check_env():
try:
os.listdir('novel_list')
except FileNotFoundError:
os.mkdir('novel_list')
def parser():
parser = argparse.ArgumentParser(description=""" c to compress novels in zip
d to download input.txt list
s to update status.csv
u to update novels""",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("mode",
help="put the letter of argument c/d/s/u",
type=str,default=argparse.SUPPRESS)
parser.add_argument("-r", help="regex of entree for compression selection (select * containing regex)",
type=str,default=argparse.SUPPRESS)
parser.add_argument("-o", help="output directory (only works for compression)",
type=str,default=argparse.SUPPRESS)
parser.add_argument("-f", help="force",action='store_true'
,default=argparse.SUPPRESS)
parser.add_argument("-md", help="format",action='store_true'
,default=argparse.SUPPRESS)
args = parser.parse_args()
print(args)
regex=''
keep_text_format=False
if args.mode:
if hasattr(args, 'md'):
keep_text_format=True
if hasattr(args, 'r'):
regex=args.r
if(args.mode==downloadInput):
print("downloading")
download(keep_text_format)
elif(args.mode==updateInput):
archiveUpdate(findNovel(regex),keep_text_format)
elif(args.mode==statusInput):
getFolderStatus()
elif(args.mode==fullupdateInput):
if hasattr(args, 'f'):
archiveFullUpdate(findNovel(regex),True)
else:
archiveFullUpdate(findNovel(regex))
elif(args.mode==compressInput):
print('compression')
print(args)
out=''
if hasattr(args, 'o'):
out=args.o
compressAll(regex,out)
if __name__ == '__main__':
check_env()
parser()
|
nilq/baby-python
|
python
|
import json
from collections import defaultdict
from typing import List, Tuple, Dict
from commands.snapshot import snapshot
from drive.api import DriveFile, Snapshot, resolve_paths, ResourcePath, Resource
from drive.http import ErrorHandlingRunner, GAPIBatchRunner
from drive.misc import eprint
from drive.serializers import load_snapshot
def dedup_list(service, args):
eprint('Computing duplicates and resolving resource paths.')
duplicates, paths = compute_duplicates(service, get_snapshot(service, args))
summary = {
md5: [
{
'id': duplicate.id,
'path': str(paths[duplicate])
}
for duplicate in entries
] for md5, entries in duplicates.items()
}
if len(duplicates) == 0:
eprint('Hooray! There are no duplicates in the snapshot.')
else:
eprint('Duplicates were found.')
if args.json:
print(json.dumps(summary, indent=3))
else:
for md5, entries in summary.items():
eprint('------ %s -------' % md5)
for entry in entries:
eprint('%s (%s)' % (entry['path'], entry['id']))
eprint('')
def dedup_apply(service, args):
eprint('Now computing and removing duplicates. Prefix order is %s.' % args.prefixes)
duplicates, paths = compute_duplicates(
service, get_snapshot(service, args)
)
# Strips white spaces.
prefixes = [prefix.strip() for prefix in args.prefixes.split(',')]
# Adds trailing backlash to make prefixes unique.
prefixes = [prefix + ('/' if not prefix.endswith('/') else '') for prefix in prefixes]
runner = ErrorHandlingRunner(service, delegate=GAPIBatchRunner)
for md5, entries in duplicates.items():
preferences = list(zip(entries, list(rank(prefixes, [paths[entry] for entry in entries]))))
for duplicate, _ in sorted(preferences, key=lambda x: x[1])[1:]:
rid = '%s (%s)' % (paths[duplicate], duplicate.id)
eprint('Queue request for deleting duplicate %s' % rid)
runner.add(request_id=rid, request=duplicate.delete())
if not args.dry_run:
eprint('\n --- Now running %d deletion requests in batch.' % len(runner.requests))
for rid, result, _ in runner.execute():
eprint('Successfully deleted %s' % rid)
else:
eprint('Dry run. No changes applied.')
def rank(prefix_preferences, paths: List[ResourcePath]):
for path in paths:
for i, prefix in enumerate(prefix_preferences):
if path.startswith(prefix):
yield i
break
else:
# If the path does not match any of the prefixes, we throw an error. This can be painful to the
# user but it's better than taking an arbitrary decision for either deleting or leaving the duplicate
# behind
raise Exception('Path %s is not covered by any of the specified prefixes. Aborting.' % str(path))
def compute_duplicates(service, snapshot: Snapshot) -> Tuple[Dict[str, List[Resource]], Dict[Resource, ResourcePath]]:
by_md5 = defaultdict(list)
for entry in snapshot.entries:
if not isinstance(entry, DriveFile):
continue
by_md5[entry.md5Checksum].append(entry)
duplicates = {k: v for k, v in by_md5.items() if len(v) > 1}
paths = dict(resolve_paths(service, [
element for elements in duplicates.values() for element in elements
]))
return duplicates, paths
def get_snapshot(service, args):
return load_snapshot(service, args.snapshot) if args.snapshot else snapshot(service, [args.folder])
def register_parser(subparsers):
parser = subparsers.add_parser(
'dedup', help='Hunt down and remove duplicate files from Google Drive.')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--snapshot', help='causes _dedup_ to look for duplicates inside of a pre-existing snapshot,'
'created with the _snapshot_ command')
group.add_argument('--folder', help='causes _dedup_ to look for duplicates in an existing folder '
'in Google Drive (e.g. "/Pictures/Old/")')
subsubparsers = parser.add_subparsers()
subsubparsers.required = True
subsubparsers.dest = 'command'
compute = subsubparsers.add_parser('list', help='lists duplicates, grouped by MD5')
compute.add_argument('--json', help='Outputs listing in JSON format.', action='store_true')
compute.set_defaults(func=dedup_list)
apply = subsubparsers.add_parser('apply', help='removes duplicates')
apply.add_argument('--prefixes', help='comma-separated list of preferred prefixes, most '
'preferred come first.'
'Duplicates will be dropped from least-preferred '
'prefixes, or prefixes not present in this list. Use '
'the single prefix "/" to delete duplicates at random. If no '
'prefix can be matched, deduplication will abort with an error.',
required=True)
apply.add_argument('--dry-run', help='Prints actions but do not actually change the contents of Google Drive.',
action='store_true')
apply.set_defaults(func=dedup_apply)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: UTF-8
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
import bs4
import requests
import re
from urllib.request import urlretrieve
import os.path
import csv
if __name__ == "__main__":
base__url = "http://pesquisa.memoriasreveladas.gov.br/mrex/consulta/"
first_url = "http://pesquisa.memoriasreveladas.gov.br/mrex/consulta/resultado_pesquisa_new.asp?v_pesquisa=&v_fundo_colecao=1278707&Pages={}"
cookies = dict(ASPSESSIONIDCCBQCQSR='EEPJMIIDAOLDBMOABLJODJAJ',ASPSESSIONIDACCRCQSQ='BPKADPKDIEABCDGGLBAAHEAK',ASPSESSIONIDCACTCQSQ='EEGDOMLCJKKNGPFIPJLCMFBI',TS01dc25d1='01770c3a9841dbb80f087c5796c0c8a70c918eb8b9a5fb7229d46b84e736c80391564a8a27de44504d56d751444c19f2040f128a02c04b95e7951acf7b4448895e221d9514')
series = ['BR RJANRIO CNV.0.ERE','BR RJANRIO CNV.0.OCO','BR RJANRIO CNV.0.PEI','BR RJANRIO CNV.0.EST','BR RJANRIO CNV.0.GRG','BR RJANRIO CNV.0.RCE']
arquivos = []
for serie in series:
payload = {'input_pesqfundocolecao': '1278707', 'input_pesqnotacao': serie,'v_fundo_colecao': '1278707', 'v_ordem': 'CodigoReferencia' }
if not os.path.exists(serie):
os.makedirs(serie)
with open('{}/{}.csv'.format(serie, serie), 'w', newline='') as f:
writer = csv.writer(f)
num_pgs = 2
pagina = 1
while pagina <= num_pgs:
url = first_url.format(pagina)
print('Serie', serie, 'lendo pagina', pagina, 'de', num_pgs)
r = requests.post(url, data=payload, cookies=cookies)
r.encoding = 'utf-8'
s1 = bs4.BeautifulSoup(r.text, "lxml")
ulres = s1.find('ul', id='resultado')
ptp = re.compile(r'var TotalPag = (\d+)')
match = ptp.search(r.text)
if match:
num_pgs = int(match.group(1))
# cnt = 0
for li in ulres.find_all('li'):
l = li.find("a", title="Fazer download do arquivo")
if l and l.find_parent('li') == li:
link = l["onclick"]
parts = link.split('\',\'')
arq = parts[0][30:]
arq_name = parts[1]
link = "{}download.asp?NomeArquivo={}&arquivo={}&apresentacao=2".format(base__url, arq_name, arq)
arquivos.append((link, arq_name, serie))
else:
arq_name = 'Sem arquivo'
link = '--'
description = li.span.text
writer.writerow((description, arq_name, link))
# cnt += 1
# print('Dados:', cnt, serie, description[1:10], arq_name)
pagina += 1
for arq_tpl in arquivos:
filename = '{}/{}'.format(arq_tpl[2], arq_tpl[1])
if not os.path.isfile(filename):
urlretrieve(arq_tpl[0], filename)
print('Carregou arquivo:', filename)
|
nilq/baby-python
|
python
|
# 20412 - [Job Adv] (Lv.100) Mihile 4rd job adv
sm.setSpeakerID(1101002)
if sm.sendAskYesNo("Are you ready, are you okay to leave?"):
sm.warp(913070100, 0)
sm.setInstanceTime(300, 130000000)
|
nilq/baby-python
|
python
|
##############################################################################
# Written by: Cachen Chen <cachen@novell.com>
# Date: 09/25/2009
# Application wrapper for Moonlight combobox
# Used by the combobox-*.py tests
##############################################################################
'Application wrapper for Moonlight combobox'
from strongwind import *
from os.path import exists, dirname
from sys import path
init_dir = path[0]
uiaqa_path = dirname(dirname(init_dir))
# Variable the path of Firefox to run the application, Please install
# Firefox3.5.1 first which is accessible by accerciser
firefox_path = '/usr/bin/firefox'
def launchComboBox(exe=None):
'''Launch Moonlight combobox with accessibility enabled and return a
combobox object. Log an error and return None if something goes wrong'''
if exe is None:
# make sure we can find the sample applications
exe = '%s/samples/moonlight/ComboBox/ComboBoxSample.html' % uiaqa_path
if not exists(exe):
raise IOError, "Could not find file %s" % exe
args = [firefox_path, exe]
(app, subproc) = cache.launchApplication(args=args, name="Firefox", \
wait=config.LONG_DELAY)
combobox = ComboBox(app, subproc)
cache.addApplication(combobox)
combobox.comboBoxFrame.app = combobox
return combobox
# class to represent the application
class ComboBox(accessibles.Application):
#checkShowing=False
def __init__(self, accessible, subproc=None):
'Get a reference to the ComboBox window'
super(ComboBox, self).__init__(accessible, subproc)
self.findFrame(re.compile('^ComboBoxSample'), logName='Combo Box')
|
nilq/baby-python
|
python
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for LocalDockerModelServerRunner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import socket
import time
from absl import logging
import docker
from docker import errors as docker_errors
from typing import Text
from tfx import types
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.proto import infra_validator_pb2
from tfx.utils import path_utils
_POLLING_INTERVAL_SEC = 1
def _make_docker_client(config: infra_validator_pb2.LocalDockerConfig):
params = {}
if config.client_timeout_seconds:
params['timeout'] = config.client_timeout_seconds
if config.client_base_url:
params['base_url'] = config.client_base_url
if config.client_api_version:
params['version'] = config.client_api_version
return docker.DockerClient(**params)
def _find_available_port():
"""Find available port in the host machine."""
with contextlib.closing(
socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('localhost', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
_, port = sock.getsockname()
return port
def _parse_model_path(model_path: Text):
"""Parse model path into a base path, model name, and a version.
Args:
model_path: Path to the SavedModel (or other format) in the structure of
`{model_base_path}/{model_name}/{version}`, where version is an integer.
Raises:
ValueError: if the model_path does not conform to the expected directory
structure.
Returns:
`model_base_path`, `model_name`, and integer `version`.
"""
model_path, version = os.path.split(model_path)
if not version.isdigit():
raise ValueError(
'{} does not conform to tensorflow serving directory structure: '
'BASE_PATH/model_name/int_version.'.format(model_path))
base_path, model_name = os.path.split(model_path)
return base_path, model_name, int(version)
class LocalDockerRunner(base_runner.BaseModelServerRunner):
"""A model server runner that runs in a local docker runtime.
You need to pre-install docker in the machine that is running InfraValidator
component. For that reason, it is recommended to use this runner only for
testing purpose.
"""
def __init__(self, model: types.Artifact,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec):
"""Make a local docker runner.
Args:
model: A model artifact to infra validate.
serving_binary: A ServingBinary to run.
serving_spec: A ServingSpec instance.
"""
base_path, model_name, version = _parse_model_path(
path_utils.serving_model_path(model.uri))
if model_name != serving_spec.model_name:
raise ValueError(
'ServingSpec.model_name ({}) does not match the model name ({}) from'
'the Model artifact.'.format(
serving_spec.model_name, model_name))
self._model_base_path = base_path
self._model_name = model_name
self._model_version = version
self._serving_binary = serving_binary
self._serving_spec = serving_spec
self._docker = _make_docker_client(serving_spec.local_docker)
self._container = None
self._endpoint = None
def __repr__(self):
return 'LocalDockerRunner(image: {image})'.format(
image=self._serving_binary.image)
@property
def _model_path(self):
return os.path.join(self._model_base_path, self._model_name)
@property
def _model_version_path(self):
return os.path.join(self._model_base_path, self._model_name,
str(self._model_version))
def GetEndpoint(self):
assert self._endpoint is not None, (
'Endpoint is not yet created. You should call Start() first.')
return self._endpoint
def Start(self):
assert self._container is None, (
'You cannot start model server multiple times.')
host_port = _find_available_port()
self._endpoint = 'localhost:{}'.format(host_port)
if isinstance(self._serving_binary, serving_bins.TensorFlowServing):
is_local_model = os.path.exists(self._model_version_path)
if is_local_model:
run_params = self._serving_binary.MakeDockerRunParams(
host_port=host_port,
host_model_path=self._model_path)
else:
run_params = self._serving_binary.MakeDockerRunParams(
host_port=host_port,
model_base_path=self._model_base_path)
else:
raise NotImplementedError('Unsupported serving binary {}'.format(
type(self._serving_binary).__name__))
logging.info('Running container with parameter %s', run_params)
self._container = self._docker.containers.run(**run_params)
def WaitUntilRunning(self, deadline):
assert self._container is not None, 'container has not been started.'
while time.time() < deadline:
try:
# Reload container attributes from server. This is the only right way to
# retrieve the latest container status from docker engine.
self._container.reload()
status = self._container.status
except docker_errors.NotFound:
# If the job has been aborted and container has specified auto_removal
# to True, we might get a NotFound error during container.reload().
raise error_types.JobAborted(
'Container not found. Possibly removed after the job has been '
'aborted.')
# The container is just created and not yet in the running status.
if status == 'created':
time.sleep(_POLLING_INTERVAL_SEC)
continue
# The container is running :)
if status == 'running':
return
# Docker status is one of {'created', 'restarting', 'running', 'removing',
# 'paused', 'exited', or 'dead'}. Status other than 'created' and
# 'running' indicates the job has been aborted.
raise error_types.JobAborted(
'Job has been aborted (container status={})'.format(status))
raise error_types.DeadlineExceeded(
'Deadline exceeded while waiting for the container to be running.')
def Stop(self):
if self._container:
logging.info('Stopping container.')
self._container.stop()
self._docker.close()
|
nilq/baby-python
|
python
|
"""
Copyright (c) Open Carbon, 2020
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
backend/tools.py
Provides range of backend tools that can be run from command line:
importlocations: Imports location data from file that is used to geolocate specific locations
generategeometries: Generates multiple geometries of boundaries for multiple zoom levels using simplification
processspecialcases: Perform additional ad-hoc processing
importdata: Imports data for specific area scale and year range (assuming BEIS data)
"""
import os
import pandas
import json
import topojson as tp
import geojson
import csv
import re
from shapely.geometry import Polygon
if __name__ == '__main__':
import sys
import django
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parent_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "carbonmap.settings")
django.setup()
from django.contrib.gis.db.models.functions import AsGeoJSON
from django.core.serializers.json import DjangoJSONEncoder
from django.contrib.gis.geos import GEOSException, GEOSGeometry, Point, fromstr
from django.db import connection, transaction
from django.contrib.gis.db.models import Extent
from backend.gis import get_degrees_per_pixel
from backend.models import Location, Geometry, Data, DATATYPES_CHOICES
# Number of zoom levels to cache geometries for
# We generate a target-resolution-dependent simplification for each geometry object to minimize download size
zoomrange = 15
# Paths to subregion geojson files
subregions = {
'lau1': [ 'subregions/Local_Administrative_Units_Level_1_2018.json'],
'msoa': [ "subregions/england_msoa.json",
"subregions/wales_msoa.json",
"subregions/scotland_ig.json"],
'lsoa': [ "subregions/england_lsoa.json",
"subregions/wales_lsoa.json",
"subregions/scotland_dz.json"],
}
subregion_scotland_correction = "subregions/Counties_and_Unitary_Authorities_GB_2018.json"
non_decimal = re.compile(r'[^\d.]+')
def getlargestpolygon(areatype):
"""
Get largest area for particular area type
Ad-hoc function used to determine minimum zoom levels when MSOA/IG and LSOA/DZ appear
"""
maxvalue = 0
areacode = ''
geometries = Geometry.objects.filter(zoom=15, type=areatype).annotate(Extent('geometry')).values('code', 'geometry__extent')
for geometry in geometries:
lng_west, lat_south, lng_east, lat_north = geometry['geometry__extent']
lat_dif = lat_north - lat_south
lng_dif = lng_east - lng_west
if (lng_dif > maxvalue):
maxvalue = lng_dif
areacode = geometry['code']
if (lat_dif > maxvalue):
maxvalue = lat_dif
areacode = geometry['code']
return areacode
def get_yearsuffix_from_filepath(filepath):
"""
Get year suffix from file path of boundary file
"""
re_match = re.search(r"(\d{4})", filepath)
if re_match:
year = re_match.group(1)
return year[2:]
else: return None
def get_feature_name_code(properties, yearsuffix):
"""
Get name and code from GeoJSON feature
"""
code = None
if 'code' in properties: code = properties['code']
elif ('lau1' + yearsuffix + 'cd') in properties: code = properties['lau1' + yearsuffix + 'cd']
elif ('ctyua' + yearsuffix + 'cd') in properties: code = properties['ctyua' + yearsuffix + 'cd']
name = None
if 'Name' in properties: name = properties['Name']
elif 'name' in properties: name = properties['name']
elif ('lau1' + yearsuffix + 'nm') in properties: name = properties['lau1' + yearsuffix + 'nm']
elif ('ctyua' + yearsuffix + 'nm') in properties: name = properties['ctyua' + yearsuffix + 'nm']
return {'name': name, 'code': code}
def processspecialcases():
"""
Perform additional ad-hoc processing
- Replace Scotland LAU1s with separate local authority boundaries as BEIS data uses non-standard LAU1 naming
"""
# Replace Scotland LAU1s with separate unitary authority boundaries as BEIS data uses unitary authorities for Scotland data at large scale
scottishareas = Geometry.objects.filter(code__startswith="S", type='lau1').delete()
print("Loading supplemental file for Scottish LAs", subregion_scotland_correction)
with open(subregion_scotland_correction) as f:
geojson_codes, topologysafe_codes = [], []
yearsuffix = get_yearsuffix_from_filepath(subregion_scotland_correction)
geometrydata = geojson.load(f)
# Create a list of all feature codes for entire file
for feature in geometrydata['features']:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
if feature_namecode['code']: geojson_codes.append(feature_namecode['code'])
geometrytopology = tp.Topology(geometrydata)
# Create a list of all feature codes that topojson processed successfully
topologysafefeatures = json.loads(geometrytopology.to_geojson())
for feature in topologysafefeatures['features']:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
if feature_namecode['code']: topologysafe_codes.append(feature_namecode['code'])
# Get difference of feature codes between original file and topojson successfully processed feature codes
code_diff = list(set(geojson_codes) - set(topologysafe_codes))
# Create a custom feature set from the features that topojson failed to process
# TODO: investigate why topojson fails on certain polygons
diff_features = []
for feature in geometrydata['features']:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
if feature_namecode['code'] in code_diff:
diff_features.append(feature)
print("Number of polygons topojson failed on =", len(diff_features))
for zoom in range(0, zoomrange + 1):
print("Creating identical polygons for all zoom levels for polygons topojson was not able to process", len(diff_features))
for feature in diff_features:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
if feature_namecode['code'][:1] == 'S': # Is scottish feature
try:
geometry = GEOSGeometry(str(feature['geometry']))
print("Saving geometry for", feature_namecode['code'], "zoom level", zoom)
geometryobject = Geometry(name=feature_namecode['name'], type='lau1', code=feature_namecode['code'], zoom=zoom, geometry=geometry)
geometryobject.save()
except:
print("Failed to create geometry object - probably too small for zoom level", code, "zoom level", zoom, "degree resolution", zoomepsilon)
zoomepsilon = get_degrees_per_pixel(zoom)
print("Simplifying", subregion_scotland_correction, "for zoom level", zoom, "equivalent to degree resolution", zoomepsilon)
simplifiedfeatures = json.loads(geometrytopology.toposimplify(
epsilon=zoomepsilon,
simplify_algorithm='dp',
prevent_oversimplify=True
).to_geojson())
count = 0
for feature in simplifiedfeatures['features']:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
geometry = GEOSGeometry(str(feature['geometry']))
if feature_namecode['code'][:1] == 'S': # Is scottish feature
print("Saving geometry for", feature_namecode['code'], feature_namecode['name'])
geometryobject = Geometry(name=feature_namecode['name'], type='lau1', code=feature_namecode['code'], zoom=zoom, geometry=geometry)
geometryobject.save()
def generategeometries():
"""
Generates multiple geometries of boundaries for multiple zoom levels using simplification
"""
for areatype in subregions:
Geometry.objects.filter(type=areatype).delete()
for areafile in subregions[areatype]:
print("Loading area file", areafile)
with open(areafile) as f:
geojson_codes, topologysafe_codes = [], []
yearsuffix = get_yearsuffix_from_filepath(areafile)
geometrydata = geojson.load(f)
# Create a list of all feature codes for entire file
for feature in geometrydata['features']:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
if feature_namecode['code']: geojson_codes.append(feature_namecode['code'])
geometrytopology = tp.Topology(geometrydata)
# Create a list of all feature codes that topojson processed successfully
topologysafefeatures = json.loads(geometrytopology.to_geojson())
for feature in topologysafefeatures['features']:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
if feature_namecode['code']: topologysafe_codes.append(feature_namecode['code'])
# Get difference of feature codes between original file and topojson successfully processed feature codes
code_diff = list(set(geojson_codes) - set(topologysafe_codes))
# Create a custom feature set from the features that topojson failed to process
# TODO: investigate why topojson fails on certain polygons
diff_features = []
for feature in geometrydata['features']:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
if feature_namecode['code'] in code_diff:
diff_features.append(feature)
print("Number of polygons topojson failed on =", len(diff_features))
for zoom in range(0, zoomrange + 1):
print("Creating identical polygons for all zoom levels for polygons topojson was not able to process", len(diff_features))
for feature in diff_features:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
try:
geometry = GEOSGeometry(str(feature['geometry']))
print("Saving geometry for", feature_namecode['code'], "zoom level", zoom)
geometryobject = Geometry(name=feature_namecode['name'], type=areatype, code=feature_namecode['code'], zoom=zoom, geometry=geometry)
geometryobject.save()
except:
print("Failed to create geometry object - probably too small for zoom level", code, "zoom level", zoom, "degree resolution", zoomepsilon)
zoomepsilon = get_degrees_per_pixel(zoom)
print("Simplifying", areafile, "for zoom level", zoom, "equivalent to degree resolution", zoomepsilon)
simplifiedfeatures = json.loads(geometrytopology.toposimplify(
epsilon=zoomepsilon,
simplify_algorithm='dp',
prevent_oversimplify=True
).to_geojson())
for feature in simplifiedfeatures['features']:
feature_namecode = get_feature_name_code(feature['properties'], yearsuffix)
try:
geometry = GEOSGeometry(str(feature['geometry']))
print("Saving geometry for", feature_namecode['code'], "zoom level", zoom, "degree resolution", zoomepsilon)
geometryobject = Geometry(name=feature_namecode['name'], type=areatype, code=feature_namecode['code'], zoom=zoom, geometry=geometry)
geometryobject.save()
except:
print("Failed to create geometry object - probably too small for zoom level", code, "zoom level", zoom, "degree resolution", zoomepsilon)
processspecialcases()
def importdatabygeometrytype(geometrytype, year, datatype):
"""
Import data for a specific geometry type and year
"""
datatypecode = 'ELEC'
if datatype == 1: datatypecode = 'GAS'
geometry_prefix = 'LSOA'
geometrycode_row = 'LSOACode'
multiplier_meter = 1
multiplier_value = 1
if geometrytype == 'msoa':
geometry_prefix = 'MSOA'
geometrycode_row = 'MSOACode'
if geometrytype == 'lau1':
geometry_prefix = 'LAU1'
geometrycode_row = 'LA Code'
multiplier_meter = 1000
multiplier_value = 1000000
filepath = 'BEIS/' + geometry_prefix + '_' + datatypecode + '_' + str(year) + '.csv'
Data.objects.filter(geometrytype=geometrytype, year=year, type=datatype).delete()
count = 0
if os.path.isfile(filepath):
with open(filepath, 'r' ) as fileobj:
reader = csv.DictReader(fileobj)
for row in reader:
count += 1
print("Importing line", count, filepath)
geometrycode = row[geometrycode_row].strip()
if geometrytype == 'lau1':
if row['Total consumption'] == '..': continue
if row['Total consumption'] == ' - ': continue
value = float(non_decimal.sub("", row['Total consumption']))
meters = float(row['Total number of meters'])
else:
value = float(row['KWH'])
meters = float(row['METERS'])
meters = meters * multiplier_meter
value = value * multiplier_value
data = Data(
type=datatype,
year=str(year),
value=value,
meters=meters,
geometrycode=geometrycode,
geometrytype=geometrytype)
data.save()
print("Imported " + geometry_prefix + " for type " + str(datatype) + " for " + str(year))
else:
print(filepath, "not found")
def importdata(geometrytype, yearstart, yearend):
"""
Import data for specify geometry type and year range
"""
for year in range(int(yearstart), 1 + int(yearend)):
print ("Importing data for year", year)
for datatype in DATATYPES_CHOICES:
importdatabygeometrytype(geometrytype, year, datatype[0])
def checkgeometries():
"""
Check to see if any geometries corrupted
"""
allgeometries = Geometry.objects.all().annotate(json=AsGeoJSON('geometry')).values('name', 'code', 'zoom', 'type', 'json').order_by('code', 'type', 'zoom')
for geometry in allgeometries:
print("Checking geometry", geometry['code'], geometry['type'], geometry['zoom'])
json_data = json.dumps(list(geometry), cls=DjangoJSONEncoder)
def renameduplicateshortcodes():
"""
Runs custom piece of SQL to rename duplicate shortcodes in location table
"""
cursor = connection.cursor()
cursor.execute("""
UPDATE backend_location
SET shortcode = CONCAT(shortcode, REPLACE(LOWER(county), ' ', ''))
WHERE shortcode IN
(
SELECT s.shortcode
FROM
(
SELECT shortcode,COUNT(*) AS num
FROM backend_location GROUP BY shortcode
) AS s
WHERE s.num > 1
);
""", [])
transaction.commit()
def computescale(population):
"""
Computes appropriate scale to show locations with specific population
"""
if population == '': population = 0
population = int(population)
if population < 20000: return 15
if population < 40000: return 14.5
if population < 60000: return 14
if population < 80000: return 13.5
if population < 100000: return 13
if population < 200000: return 12.5
if population < 400000: return 12
if population < 600000: return 11.5
if population < 800000: return 11
if population < 1000000: return 10.5
return 10
def importlocations():
"""
Imports location data from file that is used to geolocate specific locations
"""
with open('Towns_List_Extended.csv') as csvfile:
reader = csv.DictReader(csvfile)
count = 0
Location.objects.all().delete()
for row in reader:
shortcode = row['Town'].lower()
shortcode = re.sub("[ ]", "", shortcode)
scale = computescale(row['Population'])
p = Location( shortcode=shortcode,
town=row['Town'],
county=row['County'],
country=row['Country'],
population=row['Population'],
longitude=row['Longitude'],
latitude=row['Latitude'],
url=row['url'],
scale=scale)
p.save()
count += 1
renameduplicateshortcodes()
print("Import locations finished, imported: " + str(count))
if len(sys.argv) == 1:
print("""
****** Carbon Map Batch Processing *******
Possible arguments are:
checkgeometries
Check whether any geometries are corrupted
importlocations
Imports location data from file that is used to geolocate specific locations
generategeometries
Generates multiple geometries of boundaries for multiple zoom levels using simplification
processspecialcases
Perform additional ad-hoc processing
importdata [lsoa/msoa/lau1] [yearstart] [yearend]
Imports data for specific area scale and year range (assuming BEIS data)
Leaving off [yearend] will only import for [yearstart]
""")
else:
primaryargument = sys.argv[1]
if primaryargument == "checkgeometries":
checkgeometries()
if primaryargument == "importlocations":
importlocations()
if primaryargument == "generategeometries":
generategeometries()
if primaryargument == "processspecialcases":
processspecialcases()
if primaryargument == "importdata":
if len(sys.argv) >= 4:
yearstart = sys.argv[3]
yearend = yearstart
if len(sys.argv) == 5: yearend = sys.argv[4]
geometrytype = sys.argv[2]
importdata(geometrytype, yearstart, yearend)
else:
print("Not enough arguments provided for importdata. Format is importdata lsoa/msoa/lau1 yearstart yearend")
|
nilq/baby-python
|
python
|
"""This module contains definitions and data structures for 2-, 4-, and 8-valued logic operations.
8 logic values are defined as integer constants.
* For 2-valued logic: ``ZERO`` and ``ONE``
* 4-valued logic adds: ``UNASSIGNED`` and ``UNKNOWN``
* 8-valued logic adds: ``RISE``, ``FALL``, ``PPULSE``, and ``NPULSE``.
The bits in these constants have the following meaning:
* bit 0: Final/settled binary value of a signal
* bit 1: Initial binary value of a signal
* bit 2: Activity or transitions are present on a signal
Special meaning is given to values where bits 0 and 1 differ, but bit 2 (activity) is 0.
These values are interpreted as ``UNKNOWN`` or ``UNASSIGNED`` in 4-valued and 8-valued logic.
In general, 2-valued logic only considers bit 0, 4-valued logic considers bits 0 and 1, and 8-valued logic
considers all 3 bits.
The only exception is constant ``ONE=0b11`` which has two bits set for all logics including 2-valued logic.
"""
import math
from collections.abc import Iterable
import numpy as np
from . import numba, hr_bytes
ZERO = 0b000
"""Integer constant ``0b000`` for logic-0. ``'0'``, ``0``, ``False``, ``'L'``, and ``'l'`` are interpreted as ``ZERO``.
"""
UNKNOWN = 0b001
"""Integer constant ``0b001`` for unknown or conflict. ``'X'``, or any other value is interpreted as ``UNKNOWN``.
"""
UNASSIGNED = 0b010
"""Integer constant ``0b010`` for unassigned or high-impedance. ``'-'``, ``None``, ``'Z'``, and ``'z'`` are
interpreted as ``UNASSIGNED``.
"""
ONE = 0b011
"""Integer constant ``0b011`` for logic-1. ``'1'``, ``1``, ``True``, ``'H'``, and ``'h'`` are interpreted as ``ONE``.
"""
PPULSE = 0b100
"""Integer constant ``0b100`` for positive pulse, meaning initial and final values are 0, but there is some activity
on a signal. ``'P'``, ``'p'``, and ``'^'`` are interpreted as ``PPULSE``.
"""
RISE = 0b101
"""Integer constant ``0b110`` for a rising transition. ``'R'``, ``'r'``, and ``'/'`` are interpreted as ``RISE``.
"""
FALL = 0b110
"""Integer constant ``0b101`` for a falling transition. ``'F'``, ``'f'``, and ``'\\'`` are interpreted as ``FALL``.
"""
NPULSE = 0b111
"""Integer constant ``0b111`` for negative pulse, meaning initial and final values are 1, but there is some activity
on a signal. ``'N'``, ``'n'``, and ``'v'`` are interpreted as ``NPULSE``.
"""
def interpret(value):
"""Converts characters, strings, and lists of them to lists of logic constants defined above.
:param value: A character (string of length 1), Boolean, Integer, None, or Iterable.
Iterables (such as strings) are traversed and their individual characters are interpreted.
:return: A logic constant or a (possibly multi-dimensional) list of logic constants.
"""
if isinstance(value, Iterable) and not (isinstance(value, str) and len(value) == 1):
return list(map(interpret, value))
if value in [0, '0', False, 'L', 'l']:
return ZERO
if value in [1, '1', True, 'H', 'h']:
return ONE
if value in [None, '-', 'Z', 'z']:
return UNASSIGNED
if value in ['R', 'r', '/']:
return RISE
if value in ['F', 'f', '\\']:
return FALL
if value in ['P', 'p', '^']:
return PPULSE
if value in ['N', 'n', 'v']:
return NPULSE
return UNKNOWN
_bit_in_lut = np.array([2 ** x for x in range(7, -1, -1)], dtype='uint8')
@numba.njit
def bit_in(a, pos):
return a[pos >> 3] & _bit_in_lut[pos & 7]
class MVArray:
"""An n-dimensional array of m-valued logic values.
This class wraps a numpy.ndarray of type uint8 and adds support for encoding and
interpreting 2-valued, 4-valued, and 8-valued logic values.
Each logic value is stored as an uint8, manipulations of individual values are cheaper than in
:py:class:`BPArray`.
:param a: If a tuple is given, it is interpreted as desired shape. To make an array of ``n`` vectors
compatible with a simulator ``sim``, use ``(len(sim.interface), n)``. If a :py:class:`BPArray` or
:py:class:`MVArray` is given, a deep copy is made. If a string, a list of strings, a list of characters,
or a list of lists of characters are given, the data is interpreted best-effort and the array is
initialized accordingly.
:param m: The arity of the logic. Can be set to 2, 4, or 8. If None is given, the arity of a given
:py:class:`BPArray` or :py:class:`MVArray` is used, or, if the array is initialized differently, 8 is used.
"""
def __init__(self, a, m=None):
self.m = m or 8
assert self.m in [2, 4, 8]
# Try our best to interpret given a.
if isinstance(a, MVArray):
self.data = a.data.copy()
"""The wrapped 2-dimensional ndarray of logic values.
* Axis 0 is PI/PO/FF position, the length of this axis is called "width".
* Axis 1 is vector/pattern, the length of this axis is called "length".
"""
self.m = m or a.m
elif hasattr(a, 'data'): # assume it is a BPArray. Can't use isinstance() because BPArray isn't declared yet.
self.data = np.zeros((a.width, a.length), dtype=np.uint8)
self.m = m or a.m
for i in range(a.data.shape[-2]):
self.data[...] <<= 1
self.data[...] |= np.unpackbits(a.data[..., -i-1, :], axis=1)[:, :a.length]
if a.data.shape[-2] == 1:
self.data *= 3
elif isinstance(a, int):
self.data = np.full((a, 1), UNASSIGNED, dtype=np.uint8)
elif isinstance(a, tuple):
self.data = np.full(a, UNASSIGNED, dtype=np.uint8)
else:
if isinstance(a, str): a = [a]
self.data = np.asarray(interpret(a), dtype=np.uint8)
self.data = self.data[:, np.newaxis] if self.data.ndim == 1 else np.moveaxis(self.data, -2, -1)
# Cast data to m-valued logic.
if self.m == 2:
self.data[...] = ((self.data & 0b001) & ((self.data >> 1) & 0b001) | (self.data == RISE)) * ONE
elif self.m == 4:
self.data[...] = (self.data & 0b011) & ((self.data != FALL) * ONE) | ((self.data == RISE) * ONE)
elif self.m == 8:
self.data[...] = self.data & 0b111
self.length = self.data.shape[-1]
self.width = self.data.shape[-2]
def __repr__(self):
return f'<MVArray length={self.length} width={self.width} m={self.m} mem={hr_bytes(self.data.nbytes)}>'
def __str__(self):
return str([self[idx] for idx in range(self.length)])
def __getitem__(self, vector_idx):
"""Returns a string representing the desired vector."""
chars = ["0", "X", "-", "1", "P", "R", "F", "N"]
return ''.join(chars[v] for v in self.data[:, vector_idx])
def __len__(self):
return self.length
def mv_cast(*args, m=8):
return [a if isinstance(a, MVArray) else MVArray(a, m=m) for a in args]
def mv_getm(*args):
return max([a.m for a in args if isinstance(a, MVArray)] + [0]) or 8
def _mv_not(m, out, inp):
np.bitwise_xor(inp, 0b11, out=out) # this also exchanges UNASSIGNED <-> UNKNOWN
if m > 2:
np.putmask(out, (inp == UNKNOWN), UNKNOWN) # restore UNKNOWN
def mv_not(x1, out=None):
"""A multi-valued NOT operator.
:param x1: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param out: Optionally an :py:class:`MVArray` as storage destination. If None, a new :py:class:`MVArray`
is returned.
:return: An :py:class:`MVArray` with the result.
"""
m = mv_getm(x1)
x1 = mv_cast(x1, m=m)[0]
out = out or MVArray(x1.data.shape, m=m)
_mv_not(m, out.data, x1.data)
return out
def _mv_or(m, out, *ins):
if m > 2:
any_unknown = (ins[0] == UNKNOWN) | (ins[0] == UNASSIGNED)
for inp in ins[1:]: any_unknown |= (inp == UNKNOWN) | (inp == UNASSIGNED)
any_one = (ins[0] == ONE)
for inp in ins[1:]: any_one |= (inp == ONE)
out[...] = ZERO
np.putmask(out, any_one, ONE)
for inp in ins:
np.bitwise_or(out, inp, out=out, where=~any_one)
np.putmask(out, (any_unknown & ~any_one), UNKNOWN)
else:
out[...] = ZERO
for inp in ins: np.bitwise_or(out, inp, out=out)
def mv_or(x1, x2, out=None):
"""A multi-valued OR operator.
:param x1: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param x2: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param out: Optionally an :py:class:`MVArray` as storage destination. If None, a new :py:class:`MVArray`
is returned.
:return: An :py:class:`MVArray` with the result.
"""
m = mv_getm(x1, x2)
x1, x2 = mv_cast(x1, x2, m=m)
out = out or MVArray(np.broadcast(x1.data, x2.data).shape, m=m)
_mv_or(m, out.data, x1.data, x2.data)
return out
def _mv_and(m, out, *ins):
if m > 2:
any_unknown = (ins[0] == UNKNOWN) | (ins[0] == UNASSIGNED)
for inp in ins[1:]: any_unknown |= (inp == UNKNOWN) | (inp == UNASSIGNED)
any_zero = (ins[0] == ZERO)
for inp in ins[1:]: any_zero |= (inp == ZERO)
out[...] = ONE
np.putmask(out, any_zero, ZERO)
for inp in ins:
np.bitwise_and(out, inp | 0b100, out=out, where=~any_zero)
if m > 4: np.bitwise_or(out, inp & 0b100, out=out, where=~any_zero)
np.putmask(out, (any_unknown & ~any_zero), UNKNOWN)
else:
out[...] = ONE
for inp in ins: np.bitwise_and(out, inp, out=out)
def mv_and(x1, x2, out=None):
"""A multi-valued AND operator.
:param x1: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param x2: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param out: Optionally an :py:class:`MVArray` as storage destination. If None, a new :py:class:`MVArray`
is returned.
:return: An :py:class:`MVArray` with the result.
"""
m = mv_getm(x1, x2)
x1, x2 = mv_cast(x1, x2, m=m)
out = out or MVArray(np.broadcast(x1.data, x2.data).shape, m=m)
_mv_and(m, out.data, x1.data, x2.data)
return out
def _mv_xor(m, out, *ins):
if m > 2:
any_unknown = (ins[0] == UNKNOWN) | (ins[0] == UNASSIGNED)
for inp in ins[1:]: any_unknown |= (inp == UNKNOWN) | (inp == UNASSIGNED)
out[...] = ZERO
for inp in ins:
np.bitwise_xor(out, inp & 0b011, out=out)
if m > 4: np.bitwise_or(out, inp & 0b100, out=out)
np.putmask(out, any_unknown, UNKNOWN)
else:
out[...] = ZERO
for inp in ins: np.bitwise_xor(out, inp, out=out)
def mv_xor(x1, x2, out=None):
"""A multi-valued XOR operator.
:param x1: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param x2: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param out: Optionally an :py:class:`MVArray` as storage destination. If None, a new :py:class:`MVArray`
is returned.
:return: An :py:class:`MVArray` with the result.
"""
m = mv_getm(x1, x2)
x1, x2 = mv_cast(x1, x2, m=m)
out = out or MVArray(np.broadcast(x1.data, x2.data).shape, m=m)
_mv_xor(m, out.data, x1.data, x2.data)
return out
def mv_transition(init, final, out=None):
"""Computes the logic transitions from the initial values of ``init`` to the final values of ``final``.
Pulses in the input data are ignored. If any of the inputs are ``UNKNOWN``, the result is ``UNKNOWN``.
If both inputs are ``UNASSIGNED``, the result is ``UNASSIGNED``.
:param init: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param final: An :py:class:`MVArray` or data the :py:class:`MVArray` constructor accepts.
:param out: Optionally an :py:class:`MVArray` as storage destination. If None, a new :py:class:`MVArray`
is returned.
:return: An :py:class:`MVArray` with the result.
"""
m = mv_getm(init, final)
init, final = mv_cast(init, final, m=m)
init = init.data
final = final.data
out = out or MVArray(np.broadcast(init, final).shape, m=8)
out.data[...] = (init & 0b010) | (final & 0b001)
out.data[...] |= ((out.data << 1) ^ (out.data << 2)) & 0b100
unknown = (init == UNKNOWN) | (init == UNASSIGNED) | (final == UNKNOWN) | (final == UNASSIGNED)
unassigned = (init == UNASSIGNED) & (final == UNASSIGNED)
np.putmask(out.data, unknown, UNKNOWN)
np.putmask(out.data, unassigned, UNASSIGNED)
return out
class BPArray:
"""An n-dimensional array of m-valued logic values that uses bit-parallel storage.
The primary use of this format is in aiding efficient bit-parallel logic simulation.
The secondary benefit over :py:class:`MVArray` is its memory efficiency.
Accessing individual values is more expensive than with :py:class:`MVArray`.
Therefore it may be more efficient to unpack the data into an :py:class:`MVArray` and pack it again into a
:py:class:`BPArray` for simulation.
See :py:class:`MVArray` for constructor parameters.
"""
def __init__(self, a, m=None):
if not isinstance(a, MVArray) and not isinstance(a, BPArray):
a = MVArray(a, m)
self.m = a.m
if isinstance(a, MVArray):
if m is not None and m != a.m:
a = MVArray(a, m) # cast data
self.m = a.m
assert self.m in [2, 4, 8]
nwords = math.ceil(math.log2(self.m))
nbytes = (a.data.shape[-1] - 1) // 8 + 1
self.data = np.zeros(a.data.shape[:-1] + (nwords, nbytes), dtype=np.uint8)
"""The wrapped 3-dimensional ndarray.
* Axis 0 is PI/PO/FF position, the length of this axis is called "width".
* Axis 1 has length ``ceil(log2(m))`` for storing all bits.
* Axis 2 are the vectors/patterns packed into uint8 words.
"""
for i in range(self.data.shape[-2]):
self.data[..., i, :] = np.packbits((a.data >> i) & 1, axis=-1)
else: # we have a BPArray
self.data = a.data.copy() # TODO: support conversion to different m
self.m = a.m
self.length = a.length
self.width = a.width
def __repr__(self):
return f'<BPArray length={self.length} width={self.width} m={self.m} mem={hr_bytes(self.data.nbytes)}>'
def __len__(self):
return self.length
def bp_buf(out, inp):
md = out.shape[-2]
assert md == inp.shape[-2]
if md > 1:
unknown = inp[..., 0, :] ^ inp[..., 1, :]
if md > 2: unknown &= ~inp[..., 2, :]
out[..., 0, :] = inp[..., 0, :] | unknown
out[..., 1, :] = inp[..., 1, :] & ~unknown
if md > 2: out[..., 2, :] = inp[..., 2, :] & ~unknown
else:
out[..., 0, :] = inp[..., 0, :]
def bp_not(out, inp):
md = out.shape[-2]
assert md == inp.shape[-2]
if md > 1:
unknown = inp[..., 0, :] ^ inp[..., 1, :]
if md > 2: unknown &= ~inp[..., 2, :]
out[..., 0, :] = ~inp[..., 0, :] | unknown
out[..., 1, :] = ~inp[..., 1, :] & ~unknown
if md > 2: out[..., 2, :] = inp[..., 2, :] & ~unknown
else:
out[..., 0, :] = ~inp[..., 0, :]
def bp_or(out, *ins):
md = out.shape[-2]
for inp in ins: assert md == inp.shape[-2]
out[...] = 0
if md == 1:
for inp in ins: out[..., 0, :] |= inp[..., 0, :]
elif md == 2:
any_unknown = ins[0][..., 0, :] ^ ins[0][..., 1, :]
for inp in ins[1:]: any_unknown |= inp[..., 0, :] ^ inp[..., 1, :]
any_one = ins[0][..., 0, :] & ins[0][..., 1, :]
for inp in ins[1:]: any_one |= inp[..., 0, :] & inp[..., 1, :]
for inp in ins:
out[..., 0, :] |= inp[..., 0, :] | any_unknown
out[..., 1, :] |= inp[..., 1, :] & (~any_unknown | any_one)
else:
any_unknown = (ins[0][..., 0, :] ^ ins[0][..., 1, :]) & ~ins[0][..., 2, :]
for inp in ins[1:]: any_unknown |= (inp[..., 0, :] ^ inp[..., 1, :]) & ~inp[..., 2, :]
any_one = ins[0][..., 0, :] & ins[0][..., 1, :] & ~ins[0][..., 2, :]
for inp in ins[1:]: any_one |= inp[..., 0, :] & inp[..., 1, :] & ~inp[..., 2, :]
for inp in ins:
out[..., 0, :] |= inp[..., 0, :] | any_unknown
out[..., 1, :] |= inp[..., 1, :] & (~any_unknown | any_one)
out[..., 2, :] |= inp[..., 2, :] & (~any_unknown | any_one) & ~any_one
def bp_and(out, *ins):
md = out.shape[-2]
for inp in ins: assert md == inp.shape[-2]
out[...] = 0xff
if md == 1:
for inp in ins: out[..., 0, :] &= inp[..., 0, :]
elif md == 2:
any_unknown = ins[0][..., 0, :] ^ ins[0][..., 1, :]
for inp in ins[1:]: any_unknown |= inp[..., 0, :] ^ inp[..., 1, :]
any_zero = ~ins[0][..., 0, :] & ~ins[0][..., 1, :]
for inp in ins[1:]: any_zero |= ~inp[..., 0, :] & ~inp[..., 1, :]
for inp in ins:
out[..., 0, :] &= inp[..., 0, :] | (any_unknown & ~any_zero)
out[..., 1, :] &= inp[..., 1, :] & ~any_unknown
else:
any_unknown = (ins[0][..., 0, :] ^ ins[0][..., 1, :]) & ~ins[0][..., 2, :]
for inp in ins[1:]: any_unknown |= (inp[..., 0, :] ^ inp[..., 1, :]) & ~inp[..., 2, :]
any_zero = ~ins[0][..., 0, :] & ~ins[0][..., 1, :] & ~ins[0][..., 2, :]
for inp in ins[1:]: any_zero |= ~inp[..., 0, :] & ~inp[..., 1, :] & ~inp[..., 2, :]
out[..., 2, :] = 0
for inp in ins:
out[..., 0, :] &= inp[..., 0, :] | (any_unknown & ~any_zero)
out[..., 1, :] &= inp[..., 1, :] & ~any_unknown
out[..., 2, :] |= inp[..., 2, :] & (~any_unknown | any_zero) & ~any_zero
def bp_xor(out, *ins):
md = out.shape[-2]
for inp in ins: assert md == inp.shape[-2]
out[...] = 0
if md == 1:
for inp in ins: out[..., 0, :] ^= inp[..., 0, :]
elif md == 2:
any_unknown = ins[0][..., 0, :] ^ ins[0][..., 1, :]
for inp in ins[1:]: any_unknown |= inp[..., 0, :] ^ inp[..., 1, :]
for inp in ins: out[...] ^= inp
out[..., 0, :] |= any_unknown
out[..., 1, :] &= ~any_unknown
else:
any_unknown = (ins[0][..., 0, :] ^ ins[0][..., 1, :]) & ~ins[0][..., 2, :]
for inp in ins[1:]: any_unknown |= (inp[..., 0, :] ^ inp[..., 1, :]) & ~inp[..., 2, :]
for inp in ins:
out[..., 0, :] ^= inp[..., 0, :]
out[..., 1, :] ^= inp[..., 1, :]
out[..., 2, :] |= inp[..., 2, :]
out[..., 0, :] |= any_unknown
out[..., 1, :] &= ~any_unknown
out[..., 2, :] &= ~any_unknown
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 14:46:51 2020
@author: gabriel
"""
#%% MATLAB Code to reproduce
# function thresh_calc(nbeg,nend,n_noise,sig_fact,necdf_flag,nlbound)
# % Calculate the threshold using one of two methods
# % P=mean|W| + c sigma
# % P is computed from the empirical cdf of the noise signal at some
# % desired confidence level
# %
# % Experimental version for bc v1.1, March 6, 2019
# %
# global Wx as Wx_old Wx_new as_old as_new t na n clim_orig
# global M S P
# if necdf_flag == 1
# % Compute empirical cdf statistics and noise threshold
# [nrow,ncol]=size(Wx_new)
# conf=1.0 - nlbound.*0.01;
# % For each row in the matrix
# for k=1:nrow
# W(1:n_noise)=abs(Wx_new(k,nbeg:nend))';
# [f,x]=ecdf(W);
# % % plot every 10th cdf
# % kmod=floor(k/10);
# % if k == 1 || k == kmod.*10
# % scale=as_new(k);
# % str_scale=num2str(scale);
# % figure;
# % plot(x,f);
# % tdum=strcat('ECDF for k=',num2str(k),' scale=',str_scale);
# % title(tdum);
# % xlabel('Data Value');
# % ylabel('Probability');
# % end
# P(k)=interp1(f,x,conf);
# end
# M=mean(abs(Wx_new(:,nbeg:nend)'));
# % P=P';
# % plot the results in a figure
# figure('Name','ECDF Threshold');
# hold on
# aslg=log10(as_new);
# % length(M)
# % length(P)
# % length(aslg)
# plot(aslg,M,'-k');
# plot(aslg,P,'-r');
# hold off
# xlabel('log10 Scale (s)');
# ylabel('Coefficient Amplitude');
# legend('mean','threshold');
# tdum=strcat(num2str(conf.*100),'% Confidence Level');
# title(tdum);
# else
# % Compute Gaussian noise statistics and noise threshold
# M=mean(abs(Wx_new(:,nbeg:nend)'));
# S=std(abs(Wx_new(:,nbeg:nend)'));
# P=M + sig_fact.*S;
# Ekur=sqrt(.9).*(kurtosis(abs(Wx_new(:,nbeg:nend)'))-3.0)./sqrt(24.0./n_noise);
# % plot the results in a figure
# % changed 2/19/19 to show the Threshold defined by sig_fact
# figure('Name','Noise Mean and Threshold');
# hold on
# aslg=log10(as_new);
# plot(aslg,M,'-k');
# plot(aslg,P,'-r');
# hold off
# xlabel('log10 Scale (s)');
# ylabel('Coefficient Amplitude');
# legend('mean','threshold');
# % plot the Excess kurtosis statistic in a figure
# figure('Name','Noise Estimate Excess Kurtosis');
# aslg=log10(as_new);
# naslg=length(aslg);
# hold on
# plot(aslg,Ekur,'-k');
# plot([aslg(1) aslg(naslg)],[1.0 1.0],'-k');
# plot([aslg(1) aslg(naslg)],[-1.0 -1.0],'-k');
# hold off
# xlabel('log10 Scale (s)');
# ylabel('Non-Gaussianity');
# axis([-2.5 2.5 -50 50]);
# grid on
# end
# end
#%% Python Implementation
def threshold_calc(nbeg,nend,n_noise,sig_fact,necdf_flag,nlbound):
return
|
nilq/baby-python
|
python
|
from torch.utils import data
import yaml
from argparse import ArgumentParser
from typing import Any, List, Tuple
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.distributions as dist
from torch.utils.data import DataLoader, RandomSampler
from torchvision import transforms as transform_lib
from torchvision.datasets import MNIST, ImageFolder, SVHN
from infogan.components import InfoGANGenerator, InfoGANDiscriminator, QNetwork
from infogan.utils import TensorboardGenerativeModelImageSampler, CodeLatentDimInterpolator
class InfoGAN(pl.LightningModule):
"""
InfoGAN Implementation
"""
def __init__(self,
lambda_coeff: float = 1,
betas_opt: List[float] = [0.5, 0.999],
feature_maps_gen: List = [64, ],
feature_maps_disc: List = [64, 128],
fc_layers_gen: List = [1024, ],
fc_layers_disc: List = [1024, ],
img_channels: int = 1,
noise_dim: int = 62,
categ_code_dims: list = [10, ],
cont_code_dim: int = 2,
conv_start_shape_gen: List = [128, 7, 7],
conv_end_shape_disc: List = [128, 5, 5],
hinge_loss: bool = False,
learning_rate_gen: float = 1e-3,
learning_rate_disc: float = 2e-4,
num_gen_opts: int = 1,
**kwargs: Any,
) -> None:
"""
Args:
lambda_coeff - Weight of the MI term in the objective fn
beats_opt - Beta values for Adam optimizer
feature_maps_gen - Feature map size for each deconv layer in the generator
feature_maps_disc - Feature map size for each conv layer in the discriminator
fc_layers_gen - Fully connected layer dimensions prior to deconv blocks in the generator
fc_layers_disc - Fully connected layer dimensions after the conv blocks in the discriminator
img_channels - Number of channels in the image
noise_dim - Dimension of random noise variables
categ_code_dims - A list with the number of categories in each categorical distribution of the
latent code variable
cont_code_dim - Number of variables in the gaussina code
conv_start_shape_gen - Shape of the input to the deconv block in the generator (for batch size=1)
conv_end_shape_disc - Shape of the output form the conv_layers in the discriminator (for batch size=1)
hinge_loss - Use Hinge loss instead of the standard GAN loss
learning_rate_gen - learning rate for the generator
learning_rate_disc - learning rate for the discriminator
"""
super().__init__()
self.save_hyperparameters()
self.automatic_optimization = False
self.generator = InfoGANGenerator(noise_dim,
sum(categ_code_dims)+cont_code_dim,
feature_maps_gen,
img_channels,
conv_start_shape=conv_start_shape_gen,
fc_layers=fc_layers_gen)
self.generator.apply(self._initialize_weights)
self.discriminator = InfoGANDiscriminator(sum(categ_code_dims),
cont_code_dim,
feature_maps_disc,
img_channels,
conv_end_shape_disc,
fc_layers=fc_layers_disc,
sn=self.hparams.hinge_loss)
self.q_network = QNetwork(self.discriminator.base_feat_dim,
sum(categ_code_dims),
cont_code_dim)
self.q_network.apply(self._initialize_weights)
if not hinge_loss:
self.discriminator.apply(self._initialize_weights)
self._initialize_samplers()
if not hinge_loss:
self.adverserial_loss = nn.BCEWithLogitsLoss()
if len(categ_code_dims) > 0:
self.categorical_loss = nn.CrossEntropyLoss()
if cont_code_dim > 0:
self.gaussian_loss = nn.MSELoss()
def _initialize_samplers(self) -> None:
if len(self.hparams.categ_code_dims) > 0:
self.categ_dists = [dist.OneHotCategorical(logits=torch.ones(c_dim))
for c_dim in self.hparams.categ_code_dims]
if self.hparams.cont_code_dim > 0:
self._normal_dist = dist.MultivariateNormal(torch.zeros(self.hparams.cont_code_dim),
torch.eye(self.hparams.cont_code_dim))
@staticmethod
def _initialize_weights(module) -> None:
classname = module.__class__.__name__
if classname.find("Conv") != -1 or classname.find("Linear") != -1:
torch.nn.init.normal_(module.weight, 0.0, 0.02)
if module.bias is not None:
torch.nn.init.zeros_(module.bias)
elif classname.find("BatchNorm") != -1:
torch.nn.init.normal_(module.weight, 1.0, 0.02)
torch.nn.init.zeros_(module.bias)
def configure_optimizers(self) -> Tuple[List[optim.Optimizer], List]:
gen_opt = optim.Adam(self.generator.parameters(),
lr=self.hparams.learning_rate_gen,
betas=self.hparams.betas_opt)
disc_opt = optim.Adam([*self.discriminator.parameters(), *self.q_network.parameters()],
lr=self.hparams.learning_rate_disc,
betas=self.hparams.betas_opt)
return ([gen_opt, disc_opt], [])
def forward(self, z: torch.Tensor, c: torch.Tensor) -> torch.Tensor:
"""
Generates an image
Args:
z - Noise vector
c - Latent code
"""
return self.generator(z, c)
def training_step(self,
batch: Tuple,
batch_idx: int) -> torch.Tensor:
gen_opt, disc_opt = self.optimizers(use_pl_optimizer=True)
real_img, _ = batch
gen_loss = torch.zeros([], device=self.device)
for i in range(self.hparams.num_gen_opts):
gen_loss_iter = self._get_gen_loss(len(real_img))
gen_opt.zero_grad()
self.manual_backward(gen_loss_iter)
gen_opt.step()
gen_loss += gen_loss_iter
gen_loss /= self.hparams.num_gen_opts
self.log("gen_train/loss", gen_loss, on_epoch=True)
disc_loss = self._get_disc_loss(real_img)
disc_opt.zero_grad()
self.manual_backward(disc_loss)
disc_opt.step()
self.log("disc_train/loss", disc_loss, on_epoch=True)
return {"gen_loss": gen_loss.detach(), "disc_loss": disc_loss.detach()}
def sample_noise(self, batch_size: int) -> torch.Tensor:
return torch.randn((batch_size, self.hparams.noise_dim), device=self.device)
def sample_code(self, batch_size: int) -> torch.Tensor:
cat_code = cont_code = None
if len(self.hparams.categ_code_dims) > 0:
cat_codes = [categ_dist.sample([batch_size])
for categ_dist in self.categ_dists]
cat_code = torch.cat(cat_codes, dim=-1)
if self.hparams.cont_code_dim > 0:
cont_code = self._normal_dist.sample([batch_size])
return torch.cat([code for code in [cat_code, cont_code]
if code is not None], dim=-1).to(self.device)
def _get_latents(self, batch_size: int) -> Tuple[torch.Tensor, torch.Tensor]:
z = self.sample_noise(batch_size)
c = self.sample_code(batch_size)
return z, c
def _get_gen_loss(self, batch_size: int) -> torch.Tensor:
# Calculate adverserial loss
z, c = self._get_latents(batch_size)
fake_img = self.generator(z, c)
fake_pred, disc_latents = self.discriminator(fake_img)
q_pred = self.q_network(disc_latents)
if self.hparams.hinge_loss:
adv_loss = -fake_pred.mean()
else:
target = torch.ones_like(fake_pred)
adv_loss = self.adverserial_loss(fake_pred, target)
q_categ_loss, q_gauss_loss = (torch.zeros([], device=self.device),
torch.zeros([], device=self.device))
# Calculate loss from categorical latent code prediction
start_dim = 0
if len(self.hparams.categ_code_dims) > 0:
for c_dim in self.hparams.categ_code_dims:
end_dim = start_dim + c_dim
categ_posterior = self.categorical_loss(q_pred['categ'][:, start_dim:end_dim],
c[:, start_dim:end_dim].argmax(dim=-1))
categ_prior = - \
torch.log(torch.ones_like(categ_posterior) / c_dim)
q_categ_loss -= (categ_prior - categ_posterior)
start_dim += c_dim
q_categ_loss = q_categ_loss/len(self.hparams.categ_code_dims)
# Calculate loss from gaussian latent code prediction
if self.hparams.cont_code_dim > 0:
q_gauss = dist.Independent(dist.Normal(
q_pred['gauss_mean'], q_pred['gauss_std']), reinterpreted_batch_ndims=1)
q_gauss_loss = self.gaussian_loss(
q_gauss.rsample(), c[:, start_dim:])
mi_loss = q_categ_loss + q_gauss_loss
self.log("gen_train/adv_loss", adv_loss, on_epoch=False)
self.log("gen_train/categ_info_loss", q_categ_loss, on_epoch=False)
self.log("gen_train/gauss_info_loss", q_gauss_loss, on_epoch=False)
return adv_loss + self.hparams.lambda_coeff * mi_loss
def _get_disc_loss(self, real_img: torch.Tensor) -> torch.Tensor:
# Calculate adverserial loss from real images
real_pred = self.discriminator(real_img, need_base_feat=False)
if self.hparams.hinge_loss:
real_loss = F.relu(1-real_pred).mean()
else:
real_target = torch.ones_like(real_pred)
real_loss = self.adverserial_loss(real_pred, real_target)
# Calculate adverserial loss from fake images
z, c = self._get_latents(len(real_img))
fake_img = self.generator(z, c)
fake_pred, disc_latents = self.discriminator(fake_img)
q_pred = self.q_network(disc_latents.detach())
if self.hparams.hinge_loss:
fake_loss = F.relu(1+fake_pred).mean()
else:
fake_target = torch.zeros_like(fake_pred)
fake_loss = self.adverserial_loss(fake_pred, fake_target)
adv_loss = real_loss + fake_loss
q_categ_loss, q_gauss_loss = (torch.zeros([], device=self.device),
torch.zeros([], device=self.device))
# Calculate loss from categorical latent code prediction
start_dim = 0
if len(self.hparams.categ_code_dims) > 0:
for c_dim in self.hparams.categ_code_dims:
end_dim = start_dim + c_dim
categ_posterior = self.categorical_loss(q_pred['categ'][:, start_dim:end_dim],
c[:, start_dim:end_dim].argmax(dim=-1))
categ_prior = - \
torch.log(torch.ones_like(categ_posterior) / c_dim)
q_categ_loss -= (categ_prior - categ_posterior)
start_dim += c_dim
q_categ_loss = q_categ_loss/len(self.hparams.categ_code_dims)
# Calculate loss from gaussian latent code prediction
if self.hparams.cont_code_dim > 0:
q_gauss = dist.Independent(dist.Normal(
q_pred['gauss_mean'], q_pred['gauss_std']), reinterpreted_batch_ndims=1)
q_gauss_loss = self.gaussian_loss(
q_gauss.rsample(), c[:, start_dim:])
mi_loss = q_categ_loss + q_gauss_loss
self.log("disc_train/adv_loss", adv_loss, on_epoch=False)
self.log("disc_train/categ_info_loss", q_categ_loss, on_epoch=False)
self.log("disc_train/gauss_info_loss", q_gauss_loss, on_epoch=False)
return adv_loss + self.hparams.lambda_coeff * mi_loss
def cli_main(args=None):
parser = ArgumentParser()
parser.add_argument("--dataset", type=str, default='mnist')
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--data_dir", type=str, default="./data/")
parser.add_argument("--gpus", type=int, default=1)
parser.add_argument("--num_workers", type=int, default=6)
parser.add_argument("--max_epochs", type=int, default=100)
parser.add_argument("--seed", type=int, default=1234)
script_args, _ = parser.parse_known_args(args)
pl.seed_everything(script_args.seed)
if script_args.dataset == 'mnist':
transforms = transform_lib.Compose([
transform_lib.Resize((28, 28)),
transform_lib.ToTensor(),
transform_lib.Normalize((0.5,), (0.5,)),
])
dataset = MNIST(root=script_args.data_dir,
download=True, transform=transforms)
elif script_args.dataset in ['celeba', 'svhn']:
transforms = transform_lib.Compose([
transform_lib.Resize((32, 32)),
transform_lib.ToTensor(),
transform_lib.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
if script_args.dataset == 'celeba':
dataset = ImageFolder(root=script_args.data_dir+script_args.dataset,
transform=transforms)
elif script_args.dataset == 'svhn':
dataset = SVHN(root=script_args.data_dir,
download=True, transform=transforms)
args = parser.parse_args(args)
with open('configs/%s.yml' % (script_args.dataset), 'r') as cfg:
config_args = yaml.safe_load(cfg)
for k, v in config_args.items():
args.__setattr__(k, v)
num_batches = args.num_batches if args.num_batches is not None else len(
dataset) // args.batch_size
sampler = RandomSampler(dataset, True, num_batches * args.batch_size)
dataloader = DataLoader(dataset, batch_size=script_args.batch_size,
sampler=sampler, num_workers=script_args.num_workers)
model = InfoGAN(**vars(args))
callbacks = [
TensorboardGenerativeModelImageSampler(
num_samples=5, log_epoch_interval=5),
CodeLatentDimInterpolator(epoch_interval=10)
]
trainer = pl.Trainer.from_argparse_args(args, callbacks=callbacks)
trainer.fit(model, dataloader)
if __name__ == "__main__":
cli_main()
|
nilq/baby-python
|
python
|
"""
Code for defining the architecture of the Encoder and the Decoder blocks.
"""
import tensorflow as tf
class Encoder():
def __init__(self, vocab_size, embedding_dim, encoder_units):
# print(vocab_size, embedding_dim, encoder_units, "##########################################ENCODER########################")
self.encoder_units = encoder_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim, mask_zero=True)
self.gru = tf.keras.layers.GRU(self.encoder_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def __call__(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
class Decoder():
def __init__(self, vocab_size, embedding_dim, decoder_units):
self.decoder_units = decoder_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim, mask_zero=True)
self.gru = tf.keras.layers.GRU(self.decoder_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.attention = BahdanauAttention(self.decoder_units)
def __call__(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
context_vector, attention_weights = self.attention(hidden, enc_output)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * 1, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size, vocab)
x = self.fc(output)
return x, state, attention_weights
class BahdanauAttention():
def __init__(self, units):
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def __call__(self, query, values):
# query hidden state shape == (batch_size, hidden size)
# query_with_time_axis shape == (batch_size, 1, hidden size)
# values shape == (batch_size, max_len, hidden size)
# we are doing this to broadcast addition along the time axis to calculate the score
query_with_time_axis = tf.expand_dims(query, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(
self.W1(query_with_time_axis) + self.W2(values)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
#TODO: Attribute tensorflow guys
|
nilq/baby-python
|
python
|
'''
Created on Mar 13, 2018
@author: abelit
'''
import os
import json
from utils import filepath
project_settings = {
# 项目信息配置
'package': 'dev',
'version':'3.14',
'name':'__dbreport__.py',
'author':'abelit',
'email':'ychenid@live.com',
'description':'',
}
path_settings = {
'image': filepath.get_root_path(project_settings['name']) + os.sep + 'images' + os.sep,
'log': filepath.get_root_path(project_settings['name']) + os.sep + 'logs' + os.sep,
'font': filepath.get_root_path(project_settings['name']) + os.sep + 'fonts' + os.sep,
'resource': filepath.get_root_path(project_settings['name']) + os.sep + 'resource' + os.sep,
'config': filepath.get_root_path(project_settings['name']) + os.sep + 'config' + os.sep,
}
USERCONF = path_settings['config']+'dbreport.json'
if __name__ == '__main__':
print(USERCONF)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# jvparidon@gmail.com
from .timer import timer
|
nilq/baby-python
|
python
|
import base64
import json
import cv2
import numpy as np
from decouple import config
from flask import Flask, request
from api.face import FaceVerification
from db.mongo import FaceEncodings
app = Flask(__name__)
face_verification = FaceVerification(
FaceEncodings(
config("DATABASE_URI")
)
)
@app.route('/register', methods=["POST"])
def register_post():
username = request.form.get('username')
image = request.form['image']
img = base64.b64decode(image)
img = cv2.imdecode(np.fromstring(img, np.uint8), cv2.IMREAD_ANYCOLOR)
registration_response = face_verification.registration(image=np.array(img), username=username)
if registration_response['success']:
registration_response.update({"code": 200})
else:
registration_response.update({"code": 400})
return json.dumps(registration_response)
@app.route('/login', methods=["POST"])
def login_post():
username = None
if request.form['username']:
username = request.form.get('username')
image = request.form['image']
img = base64.b64decode(image)
img = cv2.imdecode(np.fromstring(img, np.uint8), cv2.IMREAD_ANYCOLOR)
auth_response = face_verification.authenticate(image=np.array(img), username=username)
if auth_response['success']:
auth_response.update({"code": 200})
else:
auth_response.update({"code": 400})
return json.dumps(auth_response)
@app.route('/', methods=["GET"])
def health_check():
return {"success": True}
|
nilq/baby-python
|
python
|
import pocketcasts as pc
import requests
import re
import configparser
from pathlib import Path
def get_valid_filename(s):
s = str(s).strip()
return re.sub(r"(?u)[^-\w. ]", "", s)
def get_extension(url):
if "?" in url:
url, _ = url.split("?", 1)
return url.split(".")[-1]
print("Reading configuration file config.ini.")
config = configparser.ConfigParser()
config.read("config.ini")
print("Connecting to Pocketcasts API.")
api = pc.Api(config["pocketcasts"]["user"], config["pocketcasts"]["password"])
print("Fetching all starred episodes.")
starred = api.starred_episodes()
print("Downloading starred episodes.")
total_size = len(starred)
Path("podcasts/").mkdir(parents=True, exist_ok=True)
for index, i in enumerate(starred, 1):
print("########## Processing :")
# print(i)
filename = get_valid_filename(
f"{i._published_at.strftime('%Y%m%d')} - {i._podcast._title} - {i._title}.{get_extension(i._url)}"
)
if not Path(filename).is_file():
print(f"Downloading {index}/{total_size} : {filename}")
r = requests.get(i._url)
with open("podcasts/" + filename, "wb") as f:
f.write(r.content)
else:
print(f"{filename} already exists. Skipping.")
|
nilq/baby-python
|
python
|
import numpy as np
from config import handTrackConfig as htconf
from math import sin, cos, sqrt, atan2, radians
import cv2
# 8 12 16 20
# | | | |
# 7 11 15 19
# 4 | | | |
# | 6 10 14 18
# 3 | | | |
# | 5---9---13--17
# 2 \ /
# \ \ /
# 1 \ /
# \ \ /
# ------0-
threshold = [
(-0.90, -1), # 0, 1, 2, 3
(0, -0.85, -1), # 0, 1, 2
(0.7, -0.85, -1), # 0, 1, 2
(0.7, -0.85, -1), # 0, 1, 2
(0.7, -0.85, -1) # 0, 1, 2
]
pos = [
[(3, 2, 1), (4, 3, 2)],
[(8, 6, 0)],
[(12, 10, 0)],
[(16, 14, 0)],
[(20, 18, 0)]
]
record = {
# '0': [[0, 0, 0, 0, 0]],
'1': [[0, 2, 0, 0, 0]],
'2': [[0, 2, 2, 0, 0]],
'3': [[0, 2, 2, 2, 0]],
'4': [[0, 2, 2, 2, 2]],
'5': [[1, 2, 2, 2, 2]],
'OK': [[0, 0, 2, 2, 2]],
'GOOD': [[1, 0, 0, 0, 0]],
'8': [[1, 2, 0, 0, 0], [1, 1, 0, 0, 0]],
}
def cal_finger_angle(points):
res = []
for p in pos:
temp = []
for i in p:
start, mid, end = i
v1 = points[start] - points[mid]
v1 /= np.linalg.norm(v1)
v2 = points[end] - points[mid]
v2 /= np.linalg.norm(v2)
cos_ang = v1.dot(v2)
temp.append(cos_ang)
res.append(sum(temp) / len(temp))
# print(res)
return res
def recog_gesture(points):
if get_distance(points[4], points[8]) < 30:
return "CATCH"
conf = cal_finger_angle(points)
res = []
for i, pred in enumerate(conf):
thre = threshold[i]
for c, t in enumerate(thre):
if pred > t:
res.append(c)
break
# print(res)
for k, v in record.items():
for v1 in v:
if v1 == res:
return k
return None
def get_vis_gesture_map(map, points, vis_window_shape):
points[:, 0] -= points[:, 0].min()
points[:, 1] -= points[:, 1].min()
points[:, 0] /= points[:, 0].max()
points[:, 1] /= points[:, 1].max()
points += 0.1
points *= 0.8 * vis_window_shape[0]
for i, point in enumerate(points):
x, y = point
cv2.circle(map, (int(x), int(y)), htconf.THICKNESS, htconf.POINT_COLOR, htconf.THICKNESS)
for connection in htconf.connections:
x0, y0 = points[connection[0]]
x1, y1 = points[connection[1]]
cv2.line(map, (int(x0), int(y0)), (int(x1), int(y1)), htconf.CONNECTION_COLOR, htconf.THICKNESS)
return map
def get_distance(point1, point2):
result = ((((point2[0] - point1[0]) ** 2) + ((point2[1] - point1[1]) ** 2)) ** 0.5)
return result
|
nilq/baby-python
|
python
|
"""
This module provides various utility functions.
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
from typing import Tuple
import numpy as np
# -----------------------------------------------------------------------------
# FUNCTION DEFINITIONS
# -----------------------------------------------------------------------------
def crop_center(array: np.ndarray,
size: Tuple[int, ...]) -> np.ndarray:
"""
Crop an n-dimensional array to the given size around its center.
Args:
array: The numpy array to be cropped.
size: A tuple containing the size of the cropped array. To not
crop along a specific axis, you can specify the size of
that axis as -1.
Returns:
The input array, cropped to the desired size around its center.
"""
# Ensure that the the array shape and the size variable match
assert array.ndim == len(size), \
'Length of size must match number of dimensions of array!'
# Loop over the the axes of the array to create slices
slices = list()
for old_len, new_len in zip(array.shape, size):
# Compute start and end position for axis
start = old_len // 2 - new_len // 2 if new_len != -1 else None
end = start + new_len if start is not None else None
# Create a slice object for axis
slices.append(slice(start, end))
return array[tuple(slices)]
def get_subaperture_centers(
grid_size: int,
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the positions of the centers of the subapertures of the
sensor. This assumes a simple geometry, where the sensor is taken
to be the largest square that can fit inside the unit circle, and
consists of a grid of `grid_size` x `grid_size` subapertures.
Args:
grid_size: An integer specifying the size of the (quadratic)
grid of subapertures in the HSWFS sensor.
Returns:
A mesh grid, consisting of two numpy arrays which specify the
`x` and `y` positions of the centers of the subapertures.
"""
x = np.linspace((1 / grid_size - 1), (1 - 1 / grid_size), grid_size)
x = 1 / np.sqrt(2) * np.repeat(x.reshape(1, -1), grid_size, axis=0)
y = np.linspace((1 - 1 / grid_size), (1 / grid_size - 1), grid_size)
y = 1 / np.sqrt(2) * np.repeat(y.reshape(-1, 1), grid_size, axis=1)
return x, y
def get_unit_disk_meshgrid(
resolution: int,
) -> Tuple[np.array, np.array]:
"""
Get a (Cartesian) mesh grid of positions on the unit disk, that is,
all positions with with a Euclidean distance <= 1 from (0, 0).
Args:
resolution: An integer specifying the size of the mesh grid,
that is, the number of points in each dimensions.
Returns:
A mesh grid consisting of the tuple `x_0`, `y_0`, which are each
numpy arrays of shape `(resolution, resolution)`. For positions
that are on the unit disk, they contain the coordinates of the
position; otherwise, they contain `np.nan`.
"""
# Create a meshgrid of (Cartesian) positions: [-1, 1] x [-1, 1]
x_0, y_0 = np.meshgrid(np.linspace(-1, 1, resolution),
np.linspace(-1, 1, resolution))
# Create a mask for the unit disk (only select position with radius <= 1)
unit_disk_mask = np.sqrt(x_0**2 + y_0**2) <= 1
# Mask out all the position that are not on the unit disk
x_0[~unit_disk_mask] = np.nan
y_0[~unit_disk_mask] = np.nan
return x_0, y_0
|
nilq/baby-python
|
python
|
"""
Based on https://github.com/ikostrikov/pytorch-a2c-ppo-acktr
"""
import gym
import torch
import random
from environments.env_utils.vec_env import VecEnvWrapper
from environments.env_utils.vec_env.dummy_vec_env import DummyVecEnv
from environments.env_utils.vec_env.subproc_vec_env import SubprocVecEnv
from environments.env_utils.vec_env.vec_normalize import VecNormalize
from environments.wrappers import TimeLimitMask, VariBadWrapper
def make_env(env_id, seed, rank, episodes_per_task, tasks, add_done_info, **kwargs):
def _thunk():
env = gym.make(env_id, **kwargs)
if tasks is not None:
env.unwrapped.reset_task = lambda x: env.unwrapped.set_task(random.choice(tasks))
if seed is not None:
env.seed(seed + rank)
if str(env.__class__.__name__).find('TimeLimit') >= 0:
env = TimeLimitMask(env)
env = VariBadWrapper(env=env, episodes_per_task=episodes_per_task, add_done_info=add_done_info)
return env
return _thunk
def make_vec_envs(env_name, seed, num_processes, gamma,
device, episodes_per_task,
normalise_rew, ret_rms, tasks,
rank_offset=0,
add_done_info=None,
**kwargs):
"""
:param ret_rms: running return and std for rewards
"""
envs = [make_env(env_id=env_name, seed=seed, rank=rank_offset + i,
episodes_per_task=episodes_per_task,
tasks=tasks,
add_done_info=add_done_info,
**kwargs)
for i in range(num_processes)]
if len(envs) > 1:
envs = SubprocVecEnv(envs)
else:
envs = DummyVecEnv(envs)
if len(envs.observation_space.shape) == 1:
if gamma is None:
envs = VecNormalize(envs, normalise_rew=normalise_rew, ret_rms=ret_rms)
else:
envs = VecNormalize(envs, normalise_rew=normalise_rew, ret_rms=ret_rms, gamma=gamma)
envs = VecPyTorch(envs, device)
return envs
class VecPyTorch(VecEnvWrapper):
def __init__(self, venv, device):
"""Return only every `skip`-th frame"""
super(VecPyTorch, self).__init__(venv)
self.device = device
# TODO: Fix data types
def reset_mdp(self, index=None):
obs = self.venv.reset_mdp(index=index)
if isinstance(obs, list):
obs = [torch.from_numpy(o).float().to(self.device) for o in obs]
else:
obs = torch.from_numpy(obs).float().to(self.device)
return obs
def reset(self, index=None, task=None):
if task is not None:
assert isinstance(task, list)
state = self.venv.reset(index=index, task=task)
if isinstance(state, list):
state = [torch.from_numpy(s).float().to(self.device) for s in state]
else:
state = torch.from_numpy(state).float().to(self.device)
return state
def step_async(self, actions):
# actions = actions.squeeze(1).cpu().numpy()
actions = actions.cpu().numpy()
self.venv.step_async(actions)
def step_wait(self):
state, reward, done, info = self.venv.step_wait()
if isinstance(state, list): # raw + normalised
state = [torch.from_numpy(s).float().to(self.device) for s in state]
else:
state = torch.from_numpy(state).float().to(self.device)
if isinstance(reward, list): # raw + normalised
reward = [torch.from_numpy(r).unsqueeze(dim=1).float().to(self.device) for r in reward]
else:
reward = torch.from_numpy(reward).unsqueeze(dim=1).float().to(self.device)
return state, reward, done, info
def __getattr__(self, attr):
""" If env does not have the attribute then call the attribute in the wrapped_env """
if attr in ['_max_episode_steps', 'task_dim', 'belief_dim', 'num_states']:
return self.unwrapped.get_env_attr(attr)
try:
orig_attr = self.__getattribute__(attr)
except AttributeError:
orig_attr = self.unwrapped.__getattribute__(attr)
if callable(orig_attr):
def hooked(*args, **kwargs):
result = orig_attr(*args, **kwargs)
return result
return hooked
else:
return orig_attr
|
nilq/baby-python
|
python
|
"""
Sets permission for the API (ONLY)
"""
from rest_framework import permissions
# TODO: Add restriction to users (get token, refresh token, verify token, post request)
class IsReadOnly(permissions.DjangoModelPermissions):
"""
Custom permission to only allow reading.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
class IsOwnerOrReadOnly(permissions.DjangoModelPermissions):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Read permissions are allowed to any request,
# so we'll always allow GET, HEAD or OPTIONS requests.
if request.method in permissions.SAFE_METHODS:
return True
# Write permissions are only allowed to the owner of the snippet.
return obj.user == request.user
class IsOwner(permissions.DjangoModelPermissions):
"""
Custom permission to only allow owners of an object to edit it.
"""
def has_object_permission(self, request, view, obj):
# Permissions are only allowed to the owner of the snippet.
return obj.user == request.user
|
nilq/baby-python
|
python
|
###############################################################################
##
## Copyright 2011 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys, shelve
from twisted.python import log
from twisted.internet import reactor
from autobahn.websocket import listenWS
from autobahn.wamp import exportRpc, WampServerFactory, WampServerProtocol
class KeyValue:
"""
Simple, persistent key-value store.
"""
def __init__(self, filename):
self.store = shelve.open(filename)
@exportRpc
def set(self, key = None, value = None):
if key is not None:
k = str(key)
if value is not None:
self.store[k] = value
else:
if self.store.has_key(k):
del self.store[k]
else:
self.store.clear()
@exportRpc
def get(self, key = None):
if key is None:
return self.store.items()
else:
return self.store.get(str(key), None)
@exportRpc
def keys(self):
return self.store.keys()
class KeyValueServerProtocol(WampServerProtocol):
"""
Demonstrates creating a server with Autobahn WebSockets that provides
a persistent key-value store which can we access via RPCs.
"""
def onSessionOpen(self):
## register the key-value store, which resides on the factory within
## this connection
self.registerForRpc(self.factory.keyvalue, "http://example.com/simple/keyvalue#")
class KeyValueServerFactory(WampServerFactory):
protocol = KeyValueServerProtocol
def __init__(self, url):
WampServerFactory.__init__(self, url)
## the key-value store resides on the factory object, since it is to
## be shared among all client connections
self.keyvalue = KeyValue("keyvalue.dat")
if __name__ == '__main__':
log.startLogging(sys.stdout)
factory = KeyValueServerFactory("ws://localhost:9000")
listenWS(factory)
reactor.run()
|
nilq/baby-python
|
python
|
from typing import Set, List, Tuple, Dict
def fibonacci(n: int) -> int:
"""
Returns n-th Fibonacci number
n must be more than 0, otherwise it raise a ValueError.
>>> fibonacci(0)
0
>>> fibonacci(1)
1
>>> fibonacci(2)
1
>>> fibonacci(10)
55
>>> fibonacci(-2)
Traceback (most recent call last):
...
ValueError: n must be more or equal than 0
"""
if n < 0:
raise ValueError('n must be more or equal than 0')
elif n == 0:
return 0
fib = [0, 1] + [0] * (n - 1)
for i in range(2, n + 1):
fib[i] = fib[i - 1] + fib[i - 2]
return fib[n]
def count_trajectories(n: int) -> int:
"""
The Grasshopper is in position 1.
The Grasshopper may jump to +1, +2 or +3.
How many possible trajectories does the grasshopper have to get to position n?
If n<=1, consider that the Grasshopper has 0 possible trajectory.
>>> count_trajectories(0)
0
>>> count_trajectories(1)
0
>>> count_trajectories(2)
1
>>> count_trajectories(3)
2
>>> count_trajectories(4)
3
>>> count_trajectories(7)
20
>>> count_trajectories(-3)
0
"""
if n <= 1:
return 0
trajectories = [0, 0, 1, 2, 3] + [0] * (n - 4)
for i in range(5, n + 1):
trajectories[i] = trajectories[i - 1] + trajectories[i - 2] + trajectories[i - 3]
return trajectories[n]
def count_trajectories_with_forbidden_cells(n: int, forbidden_cells: Set[int]) -> int:
"""
The Grasshopper is in position 1.
The Grasshopper may jump to +1, +2 or +3.
The function receives a set of numbers of cells that cannot be jumped.
How many possible trajectories does the grasshopper have to get to position n?
If n<=1, consider that the Grasshopper has 0 possible trajectory.
If 1 is forbidden, consider that the Grasshopper has 0 possible trajectory.
>>> count_trajectories_with_forbidden_cells(0, set())
0
>>> count_trajectories_with_forbidden_cells(1, set())
0
>>> count_trajectories_with_forbidden_cells(2, set())
1
>>> count_trajectories_with_forbidden_cells(3, set())
2
>>> count_trajectories_with_forbidden_cells(4, set())
3
>>> count_trajectories_with_forbidden_cells(4, {2})
2
>>> count_trajectories_with_forbidden_cells(4, {3})
2
>>> count_trajectories_with_forbidden_cells(4, {4})
0
>>> count_trajectories_with_forbidden_cells(9, {2,6,7})
3
>>> count_trajectories_with_forbidden_cells(12, {3,6,7,10})
9
>>> count_trajectories_with_forbidden_cells(8, {5})
13
>>> count_trajectories_with_forbidden_cells(8, {1})
0
>>> count_trajectories_with_forbidden_cells(-3, set())
0
"""
if n <= 1 or 1 in forbidden_cells:
return 0
trajectories = [0] * 5
for i in range(2, 5):
if i not in forbidden_cells:
for k in range(i-1, 0, -1):
if k not in forbidden_cells:
trajectories[i] = trajectories[k]+1
break
trajectories += [0] * (n - 4)
for i in range(5, n + 1):
if i not in forbidden_cells:
trajectories[i] = trajectories[i - 1] + trajectories[i - 2] + trajectories[i - 3]
return trajectories[n]
def count_min_cost(n: int, prices: Dict[int, int]) -> Tuple[int, List[int]]:
"""The Grasshopper is in position 1.
The Grasshopper may jump to +1, +2 or +3.
The function returns the tuple with the lowest cost to reach n and with a list of points to visit.
The function gets a dict of visit prices for each point.
If there is no value in the prices for the desired point, it is assumed that the visiting price is 0.
If there are several trajectories with minimal cost, it returns any of them.
If n<0, it is considered that the Grasshopper could have visited only the first point.
>>> count_min_cost(11, {1:1, 2:2, 3:1, 4:3, 5:1, 6:1, 7:2, 8:3, 9:3, 10:2, 11:1})
(7, [1, 3, 5, 8, 11])
>>> count_min_cost(-2, {1:2, 2:1, 3:2, 4:1})
(2, [1])
>>> count_min_cost(6, {2:2, 3:2, 5:1})
(0, [1, 4, 6])
>>> count_min_cost(6, {1:3, 2:-5, 3:1, 4:-3, 5:5, 6:1})
(-4, [1, 2, 4, 6])
>>> count_min_cost(5, {})
(0, [1, 2, 5])
"""
if n <= 1:
return prices.get(1, 0), [1]
trajectories = [
(0, [0]),
(prices.get(1, 0), [1]),
(prices.get(2, 0) + prices.get(1, 0), [1, 2])
]
min_cost_trajectory = min(trajectories[1], trajectories[2])
trajectories.append((min_cost_trajectory[0] + prices.get(3, 0), min_cost_trajectory[1] + [3]))
for i in range(4, n+1):
min_cost_trajectory = min(trajectories[i-1], trajectories[i-2], trajectories[i-3])
trajectories.append((min_cost_trajectory[0] + prices.get(i, 0), min_cost_trajectory[1] + [i]))
return trajectories[n]
def largest_common_subsequence(sequence_1: List[int], sequence_2:List[int]) -> int:
"""
For two p-sequences of numbers it returns the length of the largest common subsequence.
If there is no common subsequence, it returns 0.
Note: a subsequence is not a substring.
For sequence 1,2,3,4,5 the sequence 1,3,5 is a subsequence (although it is not a substring).
>>> largest_common_subsequence([1,2,3,4,5],[1,2,3,4,5])
5
>>> largest_common_subsequence([1,2,3,4,5],[4,8,1,2,3,4,6,9])
4
>>> largest_common_subsequence([0,3,6,1,2,3,8,9],[1,2,3,4,5])
3
>>> largest_common_subsequence([1,2,0,3,4,5],[1,2,3,4,5])
5
>>> largest_common_subsequence([1,2,3,0,5],[1,2,3,4,5])
4
>>> largest_common_subsequence([1,2,3,4,5],[6,7,8,9])
0
>>> largest_common_subsequence([],[1,2,3,4,5])
0
"""
res = [[0]*(len(sequence_2)+1) for _ in range(len(sequence_1)+1)]
for i in range(1, len(sequence_1)+1):
for j in range(1, len(sequence_2)+1):
if sequence_1[i-1] == sequence_2[j-1]:
res[i][j] = 1 + res[i-1][j-1]
else:
res[i][j] = max(res[i-1][j], res[i][j-1])
return res[-1][-1]
def largest_increasing_subsequence(sequence: List[int]) -> int:
"""
Returns the length of the longest increasing subsequence.
Note: a subsequence is not a substring.
For sequence 2,0,3,1,5 the sequence 2,3,5 is a subsequence (although it is not a substring).
>>> largest_increasing_subsequence([2,0,3,1,5])
3
>>> largest_increasing_subsequence([5,4,3,2,1])
1
>>> largest_increasing_subsequence([5,10,6,12,3,24,7,8])
4
>>> largest_increasing_subsequence([])
0
"""
return largest_common_subsequence(sequence_1=sequence, sequence_2=sorted(sequence))
if __name__ == "__main__":
import doctest
doctest.testmod()
|
nilq/baby-python
|
python
|
from functools import wraps
from . import environment as env
import wx, os
class VirtualEnvMustExistDecorator:
"""装饰器:虚拟环境必须存在!!!"""
def __init__(self, *args, **kwargs): ...
def __call__(self, func, e=None):
@wraps(func)
def decorator(obj, *args, **kwargs):
env_path = env.getPython3Env()
if '' == env_path.strip() or not os.path.exists(env_path):
wx.MessageBox(f'虚拟环境未绑定,或绑定失败!', "错误警告", wx.OK | wx.ICON_INFORMATION)
return
if len(args) > 0:
e = args[0]
return func(obj, e)
return decorator
class RegisterOriginOrderDecorator:
"""系统命令装饰"""
def __init__(self, *args, **kwargs):
self.cmdCodes = []
self.info_cmdCodes = {}
if 'msg' in kwargs:
self.msg = kwargs['msg']
else:
self.msg = 'UnKnown'
def __call__(self, func, e=None):
@wraps(func)
def decorator(obj, *args, **kwargs):
if len(args) > 0:
e = args[0]
func_return = func(obj, e)
if not func_return:
return
cmdObj, self.cmdCodes, self.info_cmdCodes = func_return
self.cmdCodes.append(cmdObj)
self.info_cmdCodes[cmdObj] = self.msg
return decorator
|
nilq/baby-python
|
python
|
import os, json, sys, shutil, distutils
from distutils import dir_util
if_block_template = '\tif(!strcmp(cmd, "{}")){{\n\
\t\treturn {}(argv, argc);\n\
\t}}else '
ending = '{\n\
\t\tstde("Not a command:");\n\
\t\tstde(argv[0]);\n\
\t}'
set_driver_template = "\tdrivers[{}] = &{};\n"
include_template = '#include "{}"\n'
call_template = '\t{}();\n'
def list_programs(d='./programs/'):
out = []
for i in os.listdir(d):
if not i.startswith('prg_'):
continue
i = os.path.join(d,i)
if os.path.isdir(i):
out.append(i)
return out
def list_drivers(d='./drivers/'):
out = []
for i in os.listdir(d):
if not i.startswith('drv_'):
continue
i = os.path.join(d,i)
if os.path.isdir(i):
out.append(i)
return out
def setup_programs():
with open('.//programs/programs.tplt') as f:
template = f.read()
program_dirs = list_programs()
output_ifs = ''
include_str = ''
inits = ''
for i in program_dirs:
with open(os.path.join(i, 'conf.json')) as f:
conf = json.load(f)
if 'disabled' in conf.keys() and conf['disabled']:
continue # skip it
cmd = conf['cmd']
func = conf['entry']
inc = os.path.basename(i)+'/'+conf['include']
output_ifs+=if_block_template.format(cmd, func)
include_str+=include_template.format(inc)
if 'init' in conf.keys():
inits+=call_template.format(conf['init'])
output_ifs+=ending
with open('./programs/program.c', 'w') as f:
f.write(template.format(include_str, output_ifs, inits))
def setup_drivers():
with open('./drivers/drivers.tplt') as f:
template = f.read()
driver_dirs = list_drivers()
driver_set = ''
preinits = ''
inits = ''
postinits = ''
include_str = ''
n_drivers = 0
for i in driver_dirs:
with open(os.path.join(i, 'conf.json')) as f:
conf = json.load(f)
if 'disabled' in conf.keys() and conf['disabled']:
continue # skip it
inc = os.path.basename(i)+'/'+conf['include']
include_str+=include_template.format(inc)
if 'preinit' in conf.keys():
preinits+=call_template.format(conf['preinit'])
if 'init' in conf.keys():
inits+=call_template.format(conf['init'])
if 'postinit' in conf.keys():
postinits+=call_template.format(conf['postinit'])
driver_set+=set_driver_template.format(n_drivers, conf['name'])
n_drivers+=1
with open('./drivers/drivers.c', 'w') as f:
f.write(template.format(include_str, n_drivers, driver_set, preinits, inits, postinits))
setup_programs()
setup_drivers()
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
from typing import Dict
import torch
from torch import optim
from datasets import Dataset
from models import CP, ComplEx
from regularizers import N2, N3
from optimizers import KBCOptimizer
big_datasets = ['FB15K', 'WN', 'WN18RR', 'FB237', 'YAGO3-10']
datasets = big_datasets
parser = argparse.ArgumentParser(
description="Relational learning contraption"
)
parser.add_argument(
'--dataset', choices=datasets,
help="Dataset in {}".format(datasets)
)
models = ['CP', 'ComplEx']
parser.add_argument(
'--model', choices=models,
help="Model in {}".format(models)
)
regularizers = ['N3', 'N2']
parser.add_argument(
'--regularizer', choices=regularizers, default='N3',
help="Regularizer in {}".format(regularizers)
)
optimizers = ['Adagrad', 'Adam', 'SGD']
parser.add_argument(
'--optimizer', choices=optimizers, default='Adagrad',
help="Optimizer in {}".format(optimizers)
)
parser.add_argument(
'--max_epochs', default=50, type=int,
help="Number of epochs."
)
parser.add_argument(
'--valid', default=3, type=float,
help="Number of epochs before valid."
)
parser.add_argument(
'--rank', default=1000, type=int,
help="Factorization rank."
)
parser.add_argument(
'--batch_size', default=1000, type=int,
help="Batch size."
)
parser.add_argument(
'--reg', default=0, type=float,
help="Regularization weight"
)
parser.add_argument(
'--init', default=1e-3, type=float,
help="Initial scale"
)
parser.add_argument(
'--learning_rate', default=1e-1, type=float,
help="Learning rate"
)
parser.add_argument(
'--decay1', default=0.9, type=float,
help="decay rate for the first moment estimate in Adam"
)
parser.add_argument(
'--decay2', default=0.999, type=float,
help="decay rate for second moment estimate in Adam"
)
args = parser.parse_args()
dataset = Dataset(args.dataset)
examples = torch.from_numpy(dataset.get_train().astype('int64'))
print(dataset.get_shape())
model = {
'CP': lambda: CP(dataset.get_shape(), args.rank, args.init),
'ComplEx': lambda: ComplEx(dataset.get_shape(), args.rank, args.init),
}[args.model]()
regularizer = {
'N2': N2(args.reg),
'N3': N3(args.reg),
}[args.regularizer]
device = 'cuda'
model.to(device)
optim_method = {
'Adagrad': lambda: optim.Adagrad(model.parameters(), lr=args.learning_rate),
'Adam': lambda: optim.Adam(model.parameters(), lr=args.learning_rate, betas=(args.decay1, args.decay2)),
'SGD': lambda: optim.SGD(model.parameters(), lr=args.learning_rate)
}[args.optimizer]()
optimizer = KBCOptimizer(model, regularizer, optim_method, args.batch_size)
def avg_both(mrrs: Dict[str, float], hits: Dict[str, torch.FloatTensor]):
"""
aggregate metrics for missing lhs and rhs
:param mrrs: d
:param hits:
:return:
"""
m = (mrrs['lhs'] + mrrs['rhs']) / 2.
h = (hits['lhs'] + hits['rhs']) / 2.
return {'MRR': m, 'hits@[1,3,10]': h}
cur_loss = 0
curve = {'train': [], 'valid': [], 'test': []}
for e in range(args.max_epochs):
cur_loss = optimizer.epoch(examples)
if (e + 1) % args.valid == 0:
valid, test, train = [
avg_both(*dataset.eval(model, split, -1 if split != 'train' else 50000))
for split in ['valid', 'test', 'train']
]
curve['valid'].append(valid)
curve['test'].append(test)
curve['train'].append(train)
print("\t TRAIN: ", train)
print("\t VALID : ", valid)
results = dataset.eval(model, 'test', -1)
print("\n\nTEST : ", results)
|
nilq/baby-python
|
python
|
# Author : BIZZOZZERO Nicolas
# Completed on Sun, 24 Jan 2016, 23:11
#
# This program find the solution of the problem 5 of the Project Euler.
# The problem is the following :
#
# 2520 is the smallest number that can be divided by each of the
# numbers from 1 to 10 without any remainder.
# What is the smallest positive number that is evenly divisible
# by all of the numbers from 1 to 20?
#
# The answer to this problem is :
# 232792560
# The easiest solution is to print 2*2*2*2*3*3*5*7*11*13*17*19
# But here's the bruteforce method
def isEvenlyDivisibleByAllTheNumbersFrom1To20(n):
for i in range(1, 21):
if n % i:
return False
return True
def main():
i = 1
while 1:
print(i)
if isEvenlyDivisibleByAllTheNumbersFrom1To20(i):
print(i)
break
i += 1
if __name__ == '__main__':
print(2 * 2 * 2 * 2 * 3 * 3 * 5 * 7 * 11 * 13 * 17 * 19)
|
nilq/baby-python
|
python
|
from pyqtgraph.Qt import QtGui, QtCore
import pyqtgraph as pg
import numpy as np
from communication import Communication
import math
from dataBase import data_base
from PyQt5.QtWidgets import QPushButton
pg.setConfigOption('background', (33, 33, 33))
pg.setConfigOption('foreground', (197, 198, 199))
# Interface variables
app = QtGui.QApplication([])
view = pg.GraphicsView()
Layout = pg.GraphicsLayout()
view.setCentralItem(Layout)
view.show()
view.setWindowTitle('Flight monitoring')
view.resize(1200, 700)
# declare object for serial Communication
ser = Communication()
# declare object for storage in CSV
data_base = data_base()
# Fonts for text items
font = QtGui.QFont()
font.setPixelSize(90)
# Title at top
text = """
Flight monitoring interface for cansats and OBC's <br>
developed at the Universidad Distrital FJC.
"""
Layout.addLabel(text, col=1, colspan=21)
Layout.nextRow()
# Put vertical label on left side
Layout.addLabel('LIDER - ATL research hotbed',
angle=-90, rowspan=3)
Layout.nextRow()
# Save data buttons
# buttons style
style = "background-color:rgb(29, 185, 84);color:rgb(0,0,0);font-size:14px;"
lb = Layout.addLayout(colspan=21)
proxy = QtGui.QGraphicsProxyWidget()
save_button = QtGui.QPushButton('Start storage')
save_button.setStyleSheet(style)
save_button.clicked.connect(data_base.start)
proxy.setWidget(save_button)
lb.addItem(proxy)
lb.nextCol()
proxy2 = QtGui.QGraphicsProxyWidget()
end_save_button = QtGui.QPushButton('Stop storage')
end_save_button.setStyleSheet(style)
end_save_button.clicked.connect(data_base.stop)
proxy2.setWidget(end_save_button)
lb.addItem(proxy2)
Layout.nextRow()
# Altitude graph
l1 = Layout.addLayout(colspan=20, rowspan=2)
l11 = l1.addLayout(rowspan=1, border=(83, 83, 83))
p1 = l11.addPlot(title="Altitude (m)")
altitude_plot = p1.plot(pen=(29, 185, 84))
altitude_data = np.linspace(0, 0, 30)
ptr1 = 0
def update_altitude(value_chain):
global altitude_plot, altitude_data, ptr1
altitude_data[:-1] = altitude_data[1:]
altitude_data[-1] = float(value_chain[1])
ptr1 += 1
altitude_plot.setData(altitude_data)
altitude_plot.setPos(ptr1, 0)
# Speed graph
p2 = l11.addPlot(title="Speed (m/s)")
vel_plot = p2.plot(pen=(29, 185, 84))
vel_data = np.linspace(0, 0, 30)
ptr6 = 0
vx = 0
vy = 0
vz = 0
vel = 0
def update_vel(value_chain):
global vel_plot, vel_data, ptr6, vx, vy, vz, vel
# 500 es dt
i = 0
if(i == 0):
vzo = float(value_chain[10])
i += 1
vx += (float(value_chain[8])) * 500
vy += (float(value_chain[9])) * 500
vz += (float(value_chain[10]) - vzo) * 500
sum = math.pow(vx, 2) + math.pow(vy, 2) + math.pow(vz, 2)
vel = math.sqrt(sum)
vel_data[:-1] = vel_data[1:]
vel_data[-1] = vel
ptr6 += 1
vel_plot.setData(vel_data)
vel_plot.setPos(ptr6, 0)
l1.nextRow()
l12 = l1.addLayout(rowspan=1, border=(83, 83, 83))
# Acceleration graph
acc_graph = l12.addPlot(title="Accelerations (m/s²)")
# adding legend
acc_graph.addLegend()
acc_graph.hideAxis('bottom')
accX_plot = acc_graph.plot(pen=(102, 252, 241), name="X")
accY_plot = acc_graph.plot(pen=(29, 185, 84), name="Y")
accZ_plot = acc_graph.plot(pen=(203, 45, 111), name="Z")
accX_data = np.linspace(0, 0)
accY_data = np.linspace(0, 0)
accZ_data = np.linspace(0, 0)
ptr2 = 0
def update_acc(value_chain):
global accX_plot, accY_plot, accZ_plot, accX_data, accY_data, accZ_data, ptr2
accX_data[:-1] = accX_data[1:]
accY_data[:-1] = accY_data[1:]
accZ_data[:-1] = accZ_data[1:]
accX_data[-1] = float(value_chain[8])
accY_data[-1] = float(value_chain[9])
accZ_data[-1] = float(value_chain[10])
ptr2 += 1
accX_plot.setData(accX_data)
accY_plot.setData(accY_data)
accZ_plot.setData(accZ_data)
accX_plot.setPos(ptr2, 0)
accY_plot.setPos(ptr2, 0)
accZ_plot.setPos(ptr2, 0)
# Gyro graph
gyro_graph = l12.addPlot(title="Gyro")
gyro_graph.hideAxis('bottom')
# adding legend
gyro_graph.addLegend()
pitch_plot = gyro_graph.plot(pen=(102, 252, 241), name="Pitch")
roll_plot = gyro_graph.plot(pen=(29, 185, 84), name="Roll")
yaw_plot = gyro_graph.plot(pen=(203, 45, 111), name="Yaw")
pitch_data = np.linspace(0, 0)
roll_data = np.linspace(0, 0)
yaw_data = np.linspace(0, 0)
ptr3 = 0
def update_gyro(value_chain):
global pitch_plot, roll_plot, yaw_plot, pitch_data, roll_data, yaw_data, ptr3
pitch_data[:-1] = pitch_data[1:]
roll_data[:-1] = roll_data[1:]
yaw_data[:-1] = yaw_data[1:]
pitch_data[-1] = float(value_chain[5])
roll_data[-1] = float(value_chain[6])
yaw_data[-1] = float(value_chain[7])
ptr3 += 1
pitch_plot.setData(pitch_data)
roll_plot.setData(roll_data)
yaw_plot.setData(yaw_data)
pitch_plot.setPos(ptr3, 0)
roll_plot.setPos(ptr3, 0)
yaw_plot.setPos(ptr3, 0)
# Pressure Graph
pressure_graph = l12.addPlot(title="Barometric pressure")
pressure_plot = pressure_graph.plot(pen=(102, 252, 241))
pressure_data = np.linspace(0, 0, 30)
ptr4 = 0
def update_pressure(value_chain):
global pressure_plot, pressure_data, ptr4
pressure_data[:-1] = pressure_data[1:]
pressure_data[-1] = float(value_chain[4])
ptr4 += 1
pressure_plot.setData(pressure_data)
pressure_plot.setPos(ptr4, 0)
# Temperature graph
graf_temp = l12.addPlot(title="Temperature (ºc)")
temp_plot = graf_temp.plot(pen=(29, 185, 84))
temp_data = np.linspace(0, 0, 30)
ptr5 = 0
def update_temp(value_chain):
global temp_plot, temp_data, ptr5
temp_data[:-1] = temp_data[1:]
temp_data[-1] = float(value_chain[3])
ptr5 += 1
temp_plot.setData(temp_data)
temp_plot.setPos(ptr5, 0)
# Time, battery and free fall graphs
l2 = Layout.addLayout(border=(83, 83, 83))
# Time graph
time_graph = l2.addPlot(title="Time (min)")
time_graph.hideAxis('bottom')
time_graph.hideAxis('left')
time_text = pg.TextItem("test", anchor=(0.5, 0.5), color="w")
time_text.setFont(font)
time_graph.addItem(time_text)
def update_time(value_chain):
global time_text
time_text.setText('')
tiempo = round(int(value_chain[0]) / 60000, 2)
time_text.setText(str(tiempo))
l2.nextRow()
# Battery graph
battery_graph = l2.addPlot(title="battery satus")
battery_graph.hideAxis('bottom')
battery_graph.hideAxis('left')
battery_text = pg.TextItem("test", anchor=(0.5, 0.5), color="w")
battery_text.setFont(font)
battery_graph.addItem(battery_text)
def update_battery(value_chain):
pass
l2.nextRow()
freeFall_graph = l2.addPlot(title="Free fall")
freeFall_graph.hideAxis('bottom')
freeFall_graph.hideAxis('left')
freeFall_text = pg.TextItem("test", anchor=(0.5, 0.5), color="w")
freeFall_text.setFont(font)
freeFall_graph.addItem(freeFall_text)
def update_freeFall(value_chain):
global freeFall_text
freeFall_text.setText('')
if(value_chain[2] == '0'):
freeFall_text.setText('No')
else:
freeFall_text.setText('Yes')
def update():
try:
value_chain = []
value_chain = ser.getData()
update_altitude(value_chain)
update_vel(value_chain)
update_time(value_chain)
update_acc(value_chain)
update_gyro(value_chain)
update_pressure(value_chain)
update_temp(value_chain)
update_freeFall(value_chain)
data_base.guardar(value_chain)
except IndexError:
print('starting, please wait a moment')
# desconozco si es necesario esto
# QtGui.QApplication.processEvents()
if(ser.isOpen()) or (ser.dummyMode()):
timer = pg.QtCore.QTimer()
timer.timeout.connect(update)
timer.start(500)
else:
print("something is wrong with the update call")
# Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
nilq/baby-python
|
python
|
import sys
import click
import os
import datetime
from unittest import TestCase, main
from frigate.video import process_frames, start_or_restart_ffmpeg, capture_frames, get_frame_shape
from frigate.util import DictFrameManager, SharedMemoryFrameManager, EventsPerSecond, draw_box_with_label
from frigate.motion import MotionDetector
from frigate.edgetpu import LocalObjectDetector
from frigate.objects import ObjectTracker
import multiprocessing as mp
import numpy as np
import cv2
from frigate.object_processing import COLOR_MAP, CameraState
class ProcessClip():
def __init__(self, clip_path, frame_shape, config):
self.clip_path = clip_path
self.frame_shape = frame_shape
self.camera_name = 'camera'
self.frame_manager = DictFrameManager()
# self.frame_manager = SharedMemoryFrameManager()
self.frame_queue = mp.Queue()
self.detected_objects_queue = mp.Queue()
self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
def load_frames(self):
fps = EventsPerSecond()
skipped_fps = EventsPerSecond()
stop_event = mp.Event()
detection_frame = mp.Value('d', datetime.datetime.now().timestamp()+100000)
current_frame = mp.Value('d', 0.0)
ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel panic -i {self.clip_path} -f rawvideo -pix_fmt rgb24 pipe:".split(" ")
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.frame_shape[0]*self.frame_shape[1]*self.frame_shape[2])
capture_frames(ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue, 1, fps, skipped_fps, stop_event, detection_frame, current_frame)
ffmpeg_process.wait()
ffmpeg_process.communicate()
def process_frames(self, objects_to_track=['person'], object_filters={}):
mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
mask[:] = 255
motion_detector = MotionDetector(self.frame_shape, mask)
object_detector = LocalObjectDetector(labels='/labelmap.txt')
object_tracker = ObjectTracker(10)
process_fps = mp.Value('d', 0.0)
detection_fps = mp.Value('d', 0.0)
current_frame = mp.Value('d', 0.0)
stop_event = mp.Event()
process_frames(self.camera_name, self.frame_queue, self.frame_shape, self.frame_manager, motion_detector, object_detector, object_tracker, self.detected_objects_queue,
process_fps, detection_fps, current_frame, objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
def objects_found(self, debug_path=None):
obj_detected = False
top_computed_score = 0.0
def handle_event(name, obj):
nonlocal obj_detected
nonlocal top_computed_score
if obj['computed_score'] > top_computed_score:
top_computed_score = obj['computed_score']
if not obj['false_positive']:
obj_detected = True
self.camera_state.on('new', handle_event)
self.camera_state.on('update', handle_event)
while(not self.detected_objects_queue.empty()):
camera_name, frame_time, current_tracked_objects = self.detected_objects_queue.get()
if not debug_path is None:
self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
self.camera_state.update(frame_time, current_tracked_objects)
for obj in self.camera_state.tracked_objects.values():
print(f"{frame_time}: {obj['id']} - {obj['computed_score']} - {obj['score_history']}")
self.frame_manager.delete(self.camera_state.previous_frame_id)
return {
'object_detected': obj_detected,
'top_score': top_computed_score
}
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
current_frame = self.frame_manager.get(f"{self.camera_name}{frame_time}", self.frame_shape)
# draw the bounding boxes on the frame
for obj in tracked_objects:
thickness = 2
color = (0,0,175)
if obj['frame_time'] != frame_time:
thickness = 1
color = (255,0,0)
else:
color = (255,255,0)
# draw the bounding boxes on the frame
box = obj['box']
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
# draw the regions on the frame
region = obj['region']
draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))
@click.command()
@click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
@click.option("-l", "--label", default='person', help="Label name to detect.")
@click.option("-t", "--threshold", default=0.85, help="Threshold value for objects.")
@click.option("--debug-path", default=None, help="Path to output frames for debugging.")
def process(path, label, threshold, debug_path):
clips = []
if os.path.isdir(path):
files = os.listdir(path)
files.sort()
clips = [os.path.join(path, file) for file in files]
elif os.path.isfile(path):
clips.append(path)
config = {
'snapshots': {
'show_timestamp': False,
'draw_zones': False
},
'zones': {},
'objects': {
'track': [label],
'filters': {
'person': {
'threshold': threshold
}
}
}
}
results = []
for c in clips:
frame_shape = get_frame_shape(c)
config['frame_shape'] = frame_shape
process_clip = ProcessClip(c, frame_shape, config)
process_clip.load_frames()
process_clip.process_frames(objects_to_track=config['objects']['track'])
results.append((c, process_clip.objects_found(debug_path)))
for result in results:
print(f"{result[0]}: {result[1]}")
positive_count = sum(1 for result in results if result[1]['object_detected'])
print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
if __name__ == '__main__':
process()
|
nilq/baby-python
|
python
|
"""
This module performs all basic DFA operations.
It is an interface for pyfst.
"""
# /usr/bin/python
from operator import attrgetter
import fst
from alphabet import createalphabet
EPSILON = fst.EPSILON
def TropicalWeight(param):
"""
Returns fst TropicalWeight
Args:
param (str): The input
Returns:
bool: The arc weight
"""
return fst.TropicalWeight(param)
class FstDFA(fst.StdAcceptor):
"""
Contains extra method to consume input and produce outputs.
The underline library is pyfst, the python bindings of openFST library.
"""
def __init__(self, alphabet = createalphabet()):
"""
Args:
alphabet (list): pyfst input symbol list
Returns:
None
"""
isyms = None
self.alphabet = alphabet
fst.StdAcceptor.__init__(self, isyms)
num = 1
for char in self.alphabet:
self.isyms.__setitem__(char, num)
num = num + 1
def fixminimized(self, alphabet):
"""
After pyfst minimization,
all unused arcs are removed,
and all sink states are removed.
However this may break compatibility.
Args:
alphabet (list): The input alphabet
Returns:
None
"""
endstate = len(list(self.states))
for state in self.states:
for char in alphabet:
found = 0
for arc in state.arcs:
if self.isyms.find(arc.ilabel) == char:
found = 1
break
if found == 0:
self.add_arc(state.stateid, endstate, char)
self[endstate].final = TropicalWeight(float('inf'))
for char in alphabet:
self.add_arc(endstate, endstate, char)
def _addsink(self, alphabet):
"""
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
"""
endstate = len(list(self.states))
for state in self.states:
for char in alphabet:
found = 0
for arc in state.arcs:
if self.isyms.find(arc.ilabel) == char:
found = 1
break
if found == 0:
self.add_arc(state.stateid, endstate, char)
self[endstate].final = TropicalWeight(float('inf'))
for char in alphabet:
self.add_arc(endstate, endstate, char)
def _path_to_str(self, path):
"""
Convert a path to the string representing the path
Args:
path (tuple): A tuple of arcs
Returns:
inp (str): The path concatenated as as string
"""
inp = ''
for arc in path:
i = self.isyms.find(arc.ilabel)
# Ignore \epsilon transitions both on input
if i != fst.EPSILON:
inp += i
return inp
def init_from_acceptor(self, acceptor):
"""
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
"""
states = sorted(
acceptor.states,
key=attrgetter('initial'),
reverse=True)
for state in states:
for arc in state.arcs:
itext = acceptor.isyms.find(arc.ilabel)
if itext in self.alphabet:
self.add_arc(state.stateid, arc.nextstate, itext)
if state.final:
self[state.stateid].final = True
if state.initial:
self[state.stateid].initial = True
def consume_input(self, inp):
"""
Return True/False if the machine accepts/reject the input.
Args:
inp (str): input string to be consumed
Returns:
bool: A true or false value depending on if the DFA
accepts the provided input
"""
cur_state = sorted(
self.states,
key=attrgetter('initial'),
reverse=True)[0]
while len(inp) > 0:
found = False
for arc in cur_state.arcs:
if self.isyms.find(arc.ilabel) == inp[0]:
cur_state = self[arc.nextstate]
inp = inp[1:]
found = True
break
if not found:
return False
return cur_state.final != TropicalWeight(float('inf'))
def empty(self):
"""""
Return True if the DFA accepts the empty language.
"""
return len(list(self.states)) == 0
def random_strings(self, string_length=1):
"""
Generate string_length random strings that belong to the automaton.
Args:
string_length (integer): The size of the random string
Returns:
str: The generated string
"""
str_list = []
for path in self.uniform_generate(string_length):
str_list.append(self._path_to_str(path))
return str_list
def complement(self, alphabet):
"""
Generate the complement of a DFA automaton
Args:
alphabet (list): The input alphabet
Returns:
None
"""
self._addsink(alphabet)
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
if state.final:
state.final = False
else:
state.final = True
def save(self, txt_fst_filename):
"""
Save the machine in the openFST format in the file denoted by
txt_fst_filename.
Args:
txt_fst_filename (str): The name of the file
Returns:
None
"""
txt_fst = open(txt_fst_filename, 'w+')
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
for arc in state.arcs:
itext = self.isyms.find(arc.ilabel)
otext = self.osyms.find(arc.ilabel)
txt_fst.write(
'{}\t{}\t{}\t{}\n'.format(
state.stateid,
arc.nextstate,
itext.encode('hex'),
otext.encode('hex')))
if state.final:
txt_fst.write('{}\n'.format(state.stateid))
txt_fst.close()
def load(self, txt_fst_filename):
"""
Save the transducer in the text file format of OpenFST.
The format is specified as follows:
arc format: src dest ilabel olabel [weight]
final state format: state [weight]
lines may occur in any order except initial state must be first line
Args:
txt_fst_filename (string): The name of the file
Returns:
None
"""
with open(txt_fst_filename, 'r') as txt_fst:
for line in txt_fst:
line = line.strip()
splitted_line = line.split()
if len(splitted_line) == 1:
self[int(splitted_line[0])].final = True
else:
self.add_arc(int(splitted_line[0]), int(
splitted_line[1]), splitted_line[2].decode('hex'))
|
nilq/baby-python
|
python
|
from singlecellmultiomics.universalBamTagger.digest import DigestFlagger
from singlecellmultiomics.tagtools import tagtools
class NlaIIIFlagger(DigestFlagger):
def __init__(self, **kwargs):
DigestFlagger.__init__(self, **kwargs)
def addSite(self, reads, strand, restrictionChrom, restrictionPos):
if not reads[0].has_tag(
self.sampleTag) or not reads[0].has_tag(
self.umiTag):
return
sample = reads[0].get_tag(self.sampleTag)
umi = reads[0].get_tag(self.umiTag)
allele = None if not reads[0].has_tag(
self.alleleTag) else reads[0].get_tag(
self.alleleTag)
siteInfo = tuple([x for x in [strand, allele, umi] if x is not None])
moleculeId = self.increaseAndRecordOversequencing(
sample, restrictionChrom, restrictionPos, siteInfo=siteInfo)
for read in reads:
if read is None:
continue
self.setSiteOversequencing(read, moleculeId)
self.setSiteCoordinate(read, restrictionPos)
self.setSource(read, 'NLA'), {}
if allele is not None:
self.setAllele(read, allele)
self.setStrand(read, '+' if strand ==
1 else ('-' if strand == 0 else '?'))
def digest(self, reads):
if len(reads) != 2:
if len(reads) == 1:
self.setRejectionReason(reads[0], 'unmapped mate')
else:
self.setRejectionReason(reads[0], 'nopair')
return None # Only made for mate pair
R1, R2 = reads
self.addAlleleInfo([read for read in reads if read is not None])
""" Valid configs:
CATG######## R1 ########## ^ ########## R2 ##########
############ R2 ########## ^ ########### R1 #####CATG reverse case
!BWA inverts the query sequence if it maps to the negative strand!
or R2.is_unmapped:
if R1.is_unmapped and R2.is_unmapped:
self.setRejectionReason(R1, 'unmapped R1;R2')
elif R1.is_unmapped:
self.setRejectionReason(R1, 'unmapped R1')
self.setRejectionReason(R2, 'unmapped R1')
else:
self.setRejectionReason(R1, 'unmapped R2')
self.setRejectionReason(R2, 'unmapped R2')
return(None)
"""
# Obtain RT hexamer:
if R2 is not None:
hstart, hseq = tagtools.getRandomPrimerHash(
R2, onStart=True, primerLength=6)
self.setRandomPrimer(R1, R2, hstart, hseq)
if R1 is None or R1.is_unmapped:
self.setRejectionReason(R1, 'unmapped R1')
self.setRejectionReason(R2, 'unmapped R1')
return None
if R1.seq[:4] == 'CATG' and not R1.is_reverse:
rpos = (R1.reference_name, R1.reference_start)
self.addSite([R1, R2], strand=0,
restrictionChrom=rpos[0], restrictionPos=rpos[1])
self.setRecognizedSequence(R1, 'CATG')
self.setRecognizedSequence(R2, 'CATG')
return(rpos)
elif R1.seq[-4:] == 'CATG' and R1.is_reverse:
rpos = (R1.reference_name, R1.reference_end - 4)
self.addSite([R1, R2], strand=1,
restrictionChrom=rpos[0], restrictionPos=rpos[1])
self.setRecognizedSequence(R1, 'CATG')
self.setRecognizedSequence(R2, 'CATG')
return(rpos)
# Sometimes the cycle is off
elif R1.seq[:3] == 'ATG' and not R1.is_reverse:
rpos = (R1.reference_name, R1.reference_start - 1)
self.addSite([R1, R2], strand=0,
restrictionChrom=rpos[0], restrictionPos=rpos[1])
self.setRecognizedSequence(R1, 'ATG')
self.setRecognizedSequence(R2, 'ATG')
return(rpos)
elif R1.seq[-3:] == 'CAT' and R1.is_reverse: # First base was trimmed or lost
rpos = (R1.reference_name, R1.reference_end - 3)
self.addSite([R1, R2], strand=1,
restrictionChrom=rpos[0], restrictionPos=rpos[1])
self.setRecognizedSequence(R1, 'CAT')
self.setRecognizedSequence(R2, 'CAT')
return(rpos)
else:
if R1.seq[:4] == 'CATG' and R1.is_reverse:
self.setRejectionReason(R1, 'found CATG R1 REV exp FWD')
self.setRejectionReason(R2, 'found CATG R1 REV exp FWD')
elif R1.seq[-4:] == 'CATG' and not R1.is_reverse:
self.setRejectionReason(R1, 'found CATG R1 FWD exp REV')
self.setRejectionReason(R2, 'found CATG R1 FWD exp REV')
else:
self.setRejectionReason(R1, 'no CATG')
self.setRejectionReason(R2, 'no CATG')
return None
try:
start, end = tagtools.getPairGenomicLocations(
R1, R2, R1PrimerLength=4, R2PrimerLength=6)
self.setFragmentSize(R1, end - start)
self.setFragmentSize(R2, end - start)
self.setFragmentTrust(R1, start, end)
self.setFragmentTrust(R2, start, end)
except Exception as e:
self.setFragmentSize(R1, 'unknown')
self.setFragmentSize(R2, 'unknown')
"""
if R1.seq[:4]=='CATG' and R1.reference_start<=R2.reference_start: # Site on the start of R1, R2 should map behind
self.addSite( [R1,R2], strand=0, restrictionChrom=R1.reference_name, restrictionPos=R1.reference_start )
return(( R1.reference_name, R1.reference_start))
if R1.seq[-4:]=='CATG' and R1.reference_start>=R2.reference_start: # Site on the end of R1, R2 should map before
self.addSite( [R1,R2], strand=1, restrictionChrom=R1.reference_name, restrictionPos=R1.reference_end-4 )
return( (R1.reference_name, R1.reference_end-4))
"""
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
import sys
import time
import logging
import argparse
def main():
"""
Tumor Map Calc Agent
"""
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument("-i", "--interval", type=int, default=0,
help="Minutes between calc, default calc once and exits")
args = parser.parse_args()
while True:
start = time.time()
logging.info("Starting calc at {}".format(time.asctime(time.localtime(start))))
try:
logging.info("Do something here")
end = time.time()
logging.info("Finished calc at {} taking {} seconds".format(
time.asctime(time.localtime(end)), end - start))
except Exception as e:
logging.error("Problems calc: {}".format(e))
if args.interval:
logging.info("Sleeping for {} minutes...".format(args.interval))
time.sleep(args.interval * 60)
else:
break
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""SOG run processor.
Do various operations related to running the the SOG bio-physical
model of deep estuaries. Most notably, run the model.
This module provides services to the SOG command processor.
:Author: Doug Latornell <djl@douglatornell.ca>
:License: Apache License, Version 2.0
Copyright 2010-2014 Doug Latornell and The University of British Columbia
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import os
from tempfile import NamedTemporaryFile
from textwrap import TextWrapper
from time import sleep
from .infile_processor import create_infile
__all__ = ['dry_run', 'prepare', 'watch_outfile']
def prepare(args):
"""Return the command line string that will execute the requested SOG run.
"""
if not args.outfile:
args.outfile = os.path.abspath(os.path.basename(args.infile) + '.out')
if not os.path.exists(args.infile):
raise IOError('infile not found: {0.infile}'.format(args))
else:
if args.legacy_infile:
infile = args.infile
else:
if args.dry_run:
infile = NamedTemporaryFile(suffix='.infile').name
else:
infile = create_infile(args.infile, args.editfile)
if not os.path.exists(args.SOG_exec):
raise IOError('SOG executable not found: {0.SOG_exec}'.format(args))
else:
cmd = (
'nice -n {0.nice} {0.SOG_exec} < {infile} > {0.outfile} 2>&1'
.format(args, infile=infile))
return cmd
def dry_run(cmd, args):
"""Dry-run handler for `SOG run` command.
"""
wrapper = TextWrapper()
print(wrapper.fill('Command that would have been used to run SOG:'))
print(' {0}'.format(cmd))
if args.watch:
print(wrapper.fill(
'Contents of {0} would have been shown on screen while '
'SOG run was in progress.'.format(args.outfile))
)
def watch_outfile(proc, outfile_name):
"""Generator that yields lines from SOG run outfile while run is
in progress, and continues yielding the lines that are flushed
when the run finishes.
"""
# Wait for the SOG process to create the outfile
sleep(0.1)
with open(outfile_name) as outfile:
while proc.poll() is None:
# Echo lines flushed to outfile while SOG is running
line = outfile.readline()
if not line:
sleep(0.1)
continue
yield line
else:
# Echo lines flushed to outfile when SOG run finishes
for line in outfile:
yield line
|
nilq/baby-python
|
python
|
import datetime
from dataclasses import asdict
from dataclasses import dataclass
from dataclasses import field
from enum import Enum
from typing import List
from typing import Union
try:
from typing import Literal
except ImportError:
from typing_extensions import Literal
from pglet import BarChart
from pglet import Checkbox
from pglet import Dialog
from pglet import Form
from pglet import SpinButton
from pglet import Stack
from pglet import Text
from pglet.barchart import Point
from pydantic import BaseModel
from pydantic import EmailStr
from pydantic import Field
try:
from replit import db
except ImportError:
db = dict()
class Content:
def introduction(self):
"""
# Easy web development for Python developers
Web technologies are available everywhere, and they seem like a good way to create a graphical user interface
for your Python application, either on the desktop or on the web. Unfortunately, most Python coders like
Python, and would like to avoid using a lot of other languages (HTML, Javascript and CSS) just to get a UI
created.
Pglet ("pagelet") is a web server that is written in Go and uses Fluent UI React components. Actual applications
("clients") provide the content and react to events using a proprietary protocol.
None of the above is visible to a Python developer, since the server comes nicely bundled in the client install.
All you need is:
`pip install pglet`
and you are off to creating a web-enabled UI in pure Python (3.7+).
As a taste of what pglet-python code looks like, this is the code for the box on the right:
[code]
```
pglet.page().add(main_view)
```
As another example, this site has also been created with pglet-python, with no HTML and no CSS in the code.
For more details, supported controls and a tutorial, see the [pglet Python docs](https://pglet.io/docs/).
# Easy forms
One of the most-repeated type of UI is some kind of form for entering and updating information. While creating
forms is easy in pglet, it is nevertheless a task that provides little programming joy. The next pages
show how to create forms using a Form control that eats annotated Python classes, for example, Python's
[dataclasses](https://docs.python.org/3/library/dataclasses.html).
# Easy forms with validation
Typically you also need to somehow validate user input before you can use it for anything: you need to check
that necessary values have been provided, check that numbers are numbers, check dates etc.
You can avoid this repetitive code by giving the Form control a [pydantic](https://pydantic-docs.helpmanual.io/)
object. Validation defined on the object is performed before the data is returned to you. In some cases,
you can use the exact same data definition with your APIs (e.g. FastAPI) or when writing to a document or SQL
data store.
"""
from pglet import Image, Stack, Text
main_view = Stack(
horizontal_align="center",
padding=20, gap=20,
controls=[
Image(width="50%", src="https://www.python.org/static/img/python-logo@2x.png"),
Image(width="30%", src="https://pglet.io/img/logo_dark.svg"),
Text(value="pydantic", bold=True, size="xxLarge"),
],
)
return main_view
def data_first_forms(self):
"""
Using the Form control, form definition focuses on the data you need out of it, so we define a class with
necessary information as attributes, information types as annotations, and default values as assignements.
Python dataclasses are convenient for this, as we do not need to spend time creating the `__init__` and other
boilerplate.
To create the form, all we need to do is give the class definition to the Form control:
[code]
You can see the resulting form on the right. Change values, and click "OK" to see the data you would get.
Form control understands the following data types explicitly, others will be by default be represented with a
basic text box on the form:
- str (see later for how you can decide between single line and multiline controls for text)
- int
- float
- bool
- datetime
- date (current DatePicker timezone restrictions prevent using it here)
- time
- Decimal (current SpinButton restrictions prevent using it here)
"""
@dataclass
class DataclassDataModel:
name: str = "Dataclass Person"
birthdate: datetime.datetime = "2000-01-01"
address: str = "Some Street 1, Some Town, Some Country"
age: int = 33
happy_today: bool = True
email: str = "some@email.com"
def show_data_on_submit(event):
event.control.page.add(Dialog(
open=True,
title="Submitted data",
blocking=True,
controls=[
Text(value=str(event.control.value.__dict__))
]
))
form = Form(
value=DataclassDataModel,
width=500,
on_submit=show_data_on_submit
)
return form
data_first_forms.display_name = "Data-first forms"
def selecting_values(self):
"""
Python enums are supported for selecting from a specific set of values.
[code]
"""
class ContactOption(str, Enum):
EMAIL = 'email'
PHONE = 'phone'
@dataclass
class DataclassDataModel:
name: str = "Dataclass Person"
ok_to_contact: bool = True
contact_option: ContactOption = ContactOption.EMAIL
return Form(value=DataclassDataModel, width=500, on_submit=show_submitted_data)
def more_values(self):
"""
If there are more than 3 values, we switch to a dropdown. The threshold is configurable with the
`threshold_for_dropdown` Form attribute.
"""
class ContactOption(str, Enum):
EMAIL = 'email'
PHONE = 'phone'
MESSAGE = 'message'
DOVE = 'dove'
@dataclass
class DataclassDataModel:
name: str = "Dataclass Person"
ok_to_contact: bool = True
contact_option: ContactOption = ContactOption.EMAIL
return Form(value=DataclassDataModel, width=500, on_submit=show_submitted_data)
def selecting_multiple_values(self):
"""
Annotating a field with a **list** of enums allows for multiple selection.
[code]
"""
class ContactOption(str, Enum):
EMAIL = "email"
PHONE = "phone"
PIGEON = "pigeon"
SMOKE_SIGNALS = "smoke signals"
@dataclass
class DataclassDataModel:
contact_option: List[ContactOption] = field(
default_factory=lambda: [ContactOption.EMAIL]
)
return Form(value=DataclassDataModel, width=500, on_submit=show_submitted_data)
def lists_of_fields(self):
"""
Field with a list annotation of other type than an enum is turned into a list control.
"""
@dataclass
class DataclassDataModel:
alphabet: List[str] = field(
default_factory=lambda: ["a", "b", "c"]
)
return Form(value=DataclassDataModel, width=500, on_submit=show_submitted_data)
def nested_class_definitions(self):
"""
Often we need to reuse parts of data structures. Form control supports nested class definitions, like `Movie`
in the example below.
[code]
"""
@dataclass
class Movie:
title: str
year: int
@dataclass
class DataclassDataModel:
name: str = "Dataclass Person"
email: str = "some@email.com"
favorite_movie: Movie = Movie(
title="My Little Pony: The Movie",
year=2017,
)
return Form(value=DataclassDataModel, width=500, on_submit=show_submitted_data)
def several_nested_objects(self):
"""
Several nested objects are supported with a List annotation. Lists rely on a sensible `str()` implementation to
display nicely, open up in a separate panel for editing, and support adding and deleting items.
[code]
Note that to support adding more movies, we need to provide default values for Movie attributes.
"""
@dataclass
class Movie:
title: str = ""
year: int = 2000
def __str__(self):
return f"{self.title} ({self.year})"
movies = [
Movie(title="The Name of the Rose", year=1986),
Movie(title="My Little Pony: The Movie", year=2017),
]
@dataclass
class DataclassDataModel:
name: str = "Dataclass Person"
email: str = "some@email.com"
favorite_movies: List[Movie] = field(default_factory=lambda: movies)
return Form(value=DataclassDataModel, width=500, on_submit=show_submitted_data)
def styling_and_dimensions(self):
"""
Form is a Stack control, and inherits all the
[attributes of Stacks](https://pglet.io/docs/controls/stack#properties).
You can toggle the switch on the left to experiment with the light and dark themes.
Example below shows using:
- `title` to add a form title at the top,
- `control_style` to define an alternative general style, with underlined text boxes,
- `toggle_for_bool` to use a toggle instead of a checkbox for boolean values, and
- a standard Stack attribute `gap` to add extra space between the lines.
[code]
"""
@dataclass
class DataclassDataModel:
name: str = "Dataclass Person"
birthdate: datetime.date = "2000-01-01"
address: str = "Some Street 1, Some Town, Some Country"
age: int = 33
happy_today: bool = True
email: str = "some@email.com"
form = Form(
value=DataclassDataModel,
title="Your information",
control_style="line",
toggle_for_bool=True,
gap=24,
width=500,
on_submit=show_submitted_data,
)
return form
def customizing_controls(self):
"""
There are several ways to customize controls in a `Form`:
1. Additional parameters for a specific field, as part of the data definition
2. Additional parameters for a specific field, in an `__init__` parameter
3. Map a data type to a specific control, for a single form
4. Map a data type to a specific control, globally
Example below covers the first 3:
1. For dataclasses, the contents of the pglet metadata dictionary is passed as parameters for the control.
Here we turn a single-line Textbox into a multiline one.
2. If you do not want to mix your data model with UI specifics, you can pass a parameter to the Form,
containing extra control initialization kwargs by field name.
3. If you want to set all fields of a type to be mapped to a specific control, provide additional type to
control mappings to Form. Here we want all `Amount`s have two decimals in the UI.
[code]
For option #4, you can set a type/control mapping globally by updating the mapping in the `Form` class directly,
like this:
```
Form.default_data_to_control_mapping["Amount"] = partial(
SpinButton, step=.01,
)
```
... or, of course, by subclassing the Form and setting values in the subclass `__init__`.
"""
from dataclasses import field
from functools import partial
from typing import NewType
Amount = NewType('Amount', float)
@dataclass
class DataclassDataModel:
item: str = field(
default="",
metadata={"pglet": {
"multiline": True # <<< 1
}}
)
description: str = ""
amount: Amount = Amount(0)
form = Form(
value=DataclassDataModel,
control_kwargs={
'description': {'multiline': True} # <<< 2
},
control_mapping={
"Amount": partial( # <<< 3
SpinButton, step=.01,
)
},
width=500,
on_submit=show_submitted_data,
)
return form
def introducing_pydantic(self):
"""
Pydantic is not a dependency of pglet nor of the Form control, so you need to install it separately with:
```
pip install pydantic
```
If you need validation of email addresses, you should also install:
```
pip install pydantic[email]
```
Once you have pydantic, your data can inherit from `pydantic.BaseModel` instead of being decorated as a `dataclass`.
[code]
Next pages will cover the benefits of having pydantic in place.
"""
from pydantic import BaseModel
class PydanticDataModel(BaseModel):
name: str = "Pydantic Person"
email: str = "some@email.com"
return Form(value=PydanticDataModel(), width=500, on_submit=show_submitted_data)
def change_the_labels(self):
"""
Normally, the labels on each line derived from the attribute names in your data. If you need something
different, for example punctuation, use `Field.title`.
[code]
See pydantic docs for the full documentation on the
[Field function](https://pydantic-docs.helpmanual.io/usage/schema/#field-customization).
"""
from pydantic import Field
class PydanticDataModel(BaseModel):
name: str = "Pydantic Person"
happy: bool = Field(True, title="Are you happy today?")
return Form(value=PydanticDataModel(), width=500, on_submit=show_submitted_data)
def placeholders(self):
"""
If an input does not have text, a placeholder is shown, defined by `Field.description`.
"""
class PydanticDataModel(BaseModel):
name: str = "Pydantic Person"
email: str = Field("", description="Enter a valid email address")
return Form(value=PydanticDataModel(), width=500, on_submit=show_submitted_data)
def customizing_pydantic_fields(self):
"""
Pydantic equivalent of the `dataclasses.field.metadata["pglet"]` is `Field.pglet`.
Here we use this option for the `notes` attribute to get a multiline `Textbox`.
[code]
"""
class PydanticDataModel(BaseModel):
name: str = "Pydantic Person"
notes: str = Field("", pglet={'multiline': True})
return Form(value=PydanticDataModel(), width=500, on_submit=show_submitted_data)
def validation(self):
"""
Main benefit of adding pydantic is that get your data validated with the minimum of boilerplate code.
Experiment with this view to see how the validation works.
[code]
Errors are pydantic validation errors, in English.
Check pydantic documentation on the available
[validating field types](https://pydantic-docs.helpmanual.io/usage/types/) and their usage.
Form control currently maps the field types below to specific controls - everything else is a Textbox.
- ConstrainedDecimal
- ConstrainedFloat
- ConstrainedInt
- EmailStr
- FutureDate
- NegativeFloat
- NegativeInt
- PastDate
- PositiveFloat
- PositiveInt
- StrictBool
- StrictFloat
- StrictInt
"""
from pydantic import conint
from pydantic import EmailStr
class PydanticDataModel(BaseModel):
name: str = "Pydantic Person"
birthdate: datetime.date = "2000-01-01"
age: conint(ge=0, lt=150) = 0
email: EmailStr = Field("", description="Enter a valid email address")
return Form(value=PydanticDataModel(), width=500, on_submit=show_submitted_data)
def cross_field_validation(self):
"""
Pydantic supports defining more complex relationships between fields.
In this example, email must be filled (and a valid email) if newsletters have been requested.
If validator returns an error, the capitalised str value of the error is shown to the user as the error
message under the field. Pydantic standard validation errors are in English.
[code]
Again, pydantic docs contain a lot more information about
[validators](https://pydantic-docs.helpmanual.io/usage/validators/).
"""
from pydantic import validator
class PydanticDataModel(BaseModel):
name: str = "Pydantic Person"
newsletter_ok: bool = Field(
False, title="Send me the monthly newsletter"
)
email: Union[EmailStr, Literal[""]] = Field("", description="Valid email needed for newsletter")
@validator('email', pre=True, allow_reuse=True)
def email_filled_if_needed(cls, value, values):
if values.get("newsletter_ok") and not value:
raise ValueError("Need email for newsletter")
return value
return Form(value=PydanticDataModel(), width=500, on_submit=show_submitted_data)
cross_field_validation.display_name = "Cross-field validation"
def status(self):
"""
The status of the Form control is: **early Proof of Concept for discussion and feedback**
*Some* todo items remain.
[no code]
"""
todo = '''
Ordering of lists
Slider option for number ranges
Proper Documentation
Responsive layout
Align/integrate with Grid control
Dates with DatePicker
Manage decimal.Decimal values
'''
done = '''
Lists for basic types
Toggle as an alternative for Checkbox
Support custom control parameters (e.g. multiline)
Support customising controls for types
Multiple selection from enums
Some tests
Documentation in the shape of this demo
'''
return Stack(
padding=20,
width=400,
controls=[
Checkbox(label=label.strip(), value=False, disabled=True)
for label in todo.strip().splitlines()
] + [
Checkbox(label=label.strip(), value=True, disabled=True)
for label in done.strip().splitlines()
]
)
def grande_finale(self):
"""
As one last thing, let's combine the Form control, replit database utility and the pglet graph control into a
quick poll.
Please select the options you are interested in and click "OK".
[code]
The `db` object here is a replit database that is essentially a dict with per-program persistent contents.
"""
@dataclass
class PollData:
pglet: bool = False
python: bool = False
pglet_with_python: bool = False
pglet_with_some_other_language: bool = False
pglet_with_forms: bool = False
pydantic: bool = False
pydantic_form_validation: bool = False
chart = BarChart(
data_mode='fraction',
padding=20,
width=210,
points=[]
)
def update_chart():
chart.points.clear()
values = list(reversed(sorted(
[
(field, db.get(field, 0))
for field in PollData.__annotations__
if field != "answers"
],
key=lambda value: value[1]
)))
max_value = values[0][1]
for field, value in values:
display_name = field.replace("_", " ").capitalize()
chart.points.append(
Point(legend=display_name, x=value, y=max_value),
)
update_chart()
poll = Form(
value=PollData,
title="I am interested in...",
width=300,
label_width="100%",
control_width="fit-content",
)
def update_db_values(event):
value = event.control.value
for key, value in asdict(value).items():
if value:
db[key] = db.get("key", 0) + 1
db["answers"] = db.get("answers", 0) + 1
update_chart()
poll.submit_button.disabled = True
event.control.page.update()
poll.on_submit = update_db_values
stack = Stack(controls=[poll, chart])
return stack
grande_finale.display_name = "Grande Finale"
def show_submitted_data(event):
value = event.control.value
event.control.page.add(Dialog(open=True, title="Submitted data", blocking=True, controls=[
Text(value=str(value.__dict__))
]))
content = [value for attribute, value in Content.__dict__.items() if callable(value)]
|
nilq/baby-python
|
python
|
from .home import bp as home
from .dashboard import bp as dashboard
from .api import bp as api
# the .home syntax direct the program to find the module name home then import BP routes.
|
nilq/baby-python
|
python
|
"""A class to hold data parsed from PDFs."""
import dataclasses
@dataclasses.dataclass
class Datum:
"""A class to hold data parsed from PDFs."""
text: str = ""
traits: list[dict] = dataclasses.field(default_factory=list)
reject: bool = False
|
nilq/baby-python
|
python
|
import sounddevice as sd
from scipy.io import wavfile
from scipy import signal
import sys
import matplotlib.pyplot as plt
import os
import subprocess as sp
from collections import defaultdict, Counter
import pyprind
import numpy as np
import random
from .utils import readwav
output_seconds = 5
n_gram = 2
fname = 'Night_And_Day.flac'
fs, data, outname = readwav(fname)
def show_pcm(d):
x = np.arange(d.shape[0])
plt.plot(x, d)
plt.ylabel('PCM')
plt.xlabel('Time [sec]')
plt.show()
def show_spectrogram(d):
# from matplotlib.pyplot import specgram
# specgram(d, NFFT=256, Fs=fs)
f, t, Sxx = signal.spectrogram(d, fs, nperseg=256)
plt.pcolormesh(t, f, Sxx)
plt.ylabel('Frequency [Hz]')
plt.xlabel('Time [sec]')
plt.show()
def show_fft(d):
fft = np.fft.fft(d)
x = np.arange(d.shape[0])
plt.plot(x, fft)
plt.ylabel('FFT')
plt.xlabel('Time [sec]')
plt.show()
sample = data[fs * 70:fs * 80]
wavfile.write('sample.wav', fs, sample)
# sd.play(sample, fs, blocking=True)
# show_pcm(data)
# show_fft(data)
show_spectrogram(data)
if input("cont: ").lower() == 'n':
exit()
output_frames = fs * output_seconds
print(fs)
# data = np.fft.fft(data)
def find_ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
# for each step, take each pair an
transitionsL = defaultdict(Counter)
# transitionsR = defaultdict(Counter)
bs = 1
# sd.play(sample, fs, blocking=True)
print("Training")
prog = pyprind.ProgBar(sample.shape[0], stream=1)
for gram in find_ngrams(sample, n_gram):
# learn the difference between each step given a history
transitionsL[gram[0]][gram[1] - gram[0]] += 1
prog.update()
# transitionsR[gram[0][1]][gram[1][1]] += 1
generated = np.zeros_like(data[:output_frames])
# choose a random starting frame from the transition table
# stateL = np.random.choice(list(transitionsL.keys()))
all_states = list(transitionsL.keys())
stateL = random.choice(all_states)
prog = pyprind.ProgBar(output_frames + 1, width=64, stream=1)
print("\nGenerating")
restarts = 0
for i in range(output_frames):
node = transitionsL[stateL]
if len(node) == 0:
restarts += 1
stateL = random.choice(all_states)
node = transitionsL[stateL]
counts = np.array(list(node.values()), dtype=np.float32)
keys = list(node.keys())
key_idxs = np.arange(len(keys))
ps = counts / counts.sum()
col_idx = np.random.choice(key_idxs, p=ps)
generated[i] = stateL + keys[col_idx]
generated[i] *= bs
stateL = stateL + keys[col_idx]
# stateR
prog.update()
print("Restarts={}".format(restarts / output_frames))
# generated = np.fft.ifft(generated).real
print("\nPlaying")
all_frames = np.concatenate((sample, generated))
sd.play(generated, fs, blocking=True)
print("Finished playing")
wavfile.write(outname, fs, all_frames)
print(sp.check_output([ffmpeg, '-i', outname, '-vn',
'-ar', '44100', '-ac', '2', '-ab', '192k', '-y', '-f', 'mp3', outname[:-4] + '.mp3']))
|
nilq/baby-python
|
python
|
#!/usr/bin/python3.7
import subprocess
import sys
v = sys.argv[1]
subprocess.call(["amixer", "sset", "Speaker", v + "%"])
|
nilq/baby-python
|
python
|
from PyQt5.QtWidgets import QWidget, QGridLayout, QComboBox, \
QLabel, QVBoxLayout, QSizePolicy, \
QCheckBox, QLineEdit, QPushButton, QHBoxLayout, \
QSpinBox, QTabWidget, QMessageBox
from PyQt5.QtCore import Qt, pyqtSlot
from PyQt5.QtGui import QFont
from utils import HSep, VSep
from constants import COMP_MODE__QUALIFYING_LAPS, \
COMP_MODE__QUALIFYING_TIME, \
COMP_MODE__QUALIFYING_LAPS_SEQ, \
COMP_MODE__QUALIFYING_TIME_SEQ, \
COMP_MODE__RACE_LAPS, \
COMP_MODE__RACE_TIME, \
DUMMY_IDS
class CompTime(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.vbox = QVBoxLayout(self)
self.dtext = QLabel(self.tr('Duration in minutes:'))
self.vbox.addWidget(self.dtext)
self.duration = QSpinBox()
self.duration.setMinimum(1)
self.duration.setSuffix(self.tr(' Minutes'))
self.duration.setValue(10)
self.vbox.addWidget(self.duration)
self.setLayout(self.vbox)
class CompLaps(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.vbox = QVBoxLayout(self)
self.dtext = QLabel(self.tr('Duration in laps'))
self.vbox.addWidget(self.dtext)
self.duration = QSpinBox()
self.duration.setMinimum(1)
self.duration.setSuffix(self.tr(' Laps'))
self.duration.setValue(20)
self.vbox.addWidget(self.duration)
self.setLayout(self.vbox)
class RaceParams(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.vbox = QVBoxLayout(self)
self.modetab = QTabWidget()
self.vbox.addWidget(self.modetab)
self.complaps = CompLaps()
self.modetab.addTab(self.complaps, self.tr('Laps'))
self.comptime = CompTime()
self.modetab.addTab(self.comptime, self.tr('Time'))
self.setLayout(self.vbox)
def getCompMode(self):
if self.modetab.currentWidget() == self.complaps:
return COMP_MODE__RACE_LAPS
if self.modetab.currentWidget() == self.comptime:
return COMP_MODE__RACE_TIME
def getDuration(self):
if self.modetab.currentWidget() == self.complaps:
return self.complaps.duration.value()
if self.modetab.currentWidget() == self.comptime:
return self.comptime.duration.value()
class QualifyingParams(QWidget):
def __init__(self, parent=None):
super().__init__(parent)
self.vbox = QVBoxLayout(self)
self.modetab = QTabWidget()
self.vbox.addWidget(self.modetab)
self.complaps = CompLaps()
self.modetab.addTab(self.complaps, self.tr('Laps'))
self.comptime = CompTime()
self.modetab.addTab(self.comptime, self.tr('Time'))
self.sequential = QCheckBox()
self.sequential.setText(self.tr('Sequential'))
self.vbox.addWidget(self.sequential)
self.setLayout(self.vbox)
def getCompMode(self):
if self.modetab.currentWidget() == self.complaps:
if self.sequential.isChecked():
return COMP_MODE__QUALIFYING_LAPS_SEQ
return COMP_MODE__QUALIFYING_LAPS
if self.modetab.currentWidget() == self.comptime:
if self.sequential.isChecked():
return COMP_MODE__QUALIFYING_TIME_SEQ
return COMP_MODE__QUALIFYING_TIME
def getDuration(self):
if self.modetab.currentWidget() == self.complaps:
return self.complaps.duration.value()
if self.modetab.currentWidget() == self.comptime:
return self.comptime.duration.value()
class ControllerSet(QWidget):
def __init__(self, parent=None, database=None):
super().__init__(parent)
self.database = database
self.controller = QGridLayout()
self.controller_ok = []
self.controller_name = []
self.controller_car = []
cars = self.database.getAllCars()
self.carlbl = self.tr('Select Car')
self.carsep = '---'
for i in range(0, 6):
ok = QCheckBox()
ok.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Maximum)
ok.setText(self.tr('Controller ') + str(i+1))
self.controller.addWidget(ok, 0, i)
self.controller_ok.append(ok)
name = QLineEdit()
name.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Maximum)
self.controller.addWidget(name, 1, i)
self.controller_name.append(name)
car = QComboBox()
car.addItem(self.carlbl)
car.addItem(self.carsep)
for c in cars:
car.addItem(c.name)
self.controller.addWidget(car, 2, i)
self.controller_car.append(car)
self.setLayout(self.controller)
def getCar(self, addr):
t = self.controller_car[addr].currentText()
if t in [self.carlbl, self.carsep]:
QMessageBox.information(
self,
self.tr("No car selected"),
str(self.tr("Please select a car for Controller ")
+ str(addr+1) + '.'),
QMessageBox.Ok)
raise KeyError
return self.controller_car[addr].currentText()
def getOk(self, addr):
return self.controller_ok[addr].isChecked()
def getName(self, addr):
name = self.controller_name[addr].text()
if name is None or len(name) <= 0:
QMessageBox.information(
self,
self.tr("Driver name missing"),
str(self.tr("Please enter a driver name for Controller ")
+ str(addr+1) + '.'),
QMessageBox.Ok)
raise KeyError
return name
def setCar(self, addr, car):
index = self.controller_car[addr].findText(car)
if index >= 0:
self.controller_car[addr].setCurrentIndex(index)
def setOk(self, addr, checked):
self.controller_ok[addr].setChecked(checked)
def setName(self, addr, name):
self.controller_name[addr].setText(name)
def buildCarList(self):
cars = self.database.getAllCars()
for i in range(0, 6):
cw = self.controller_car[i]
car = cw.currentText()
cw.clear()
cw.addItem(self.carlbl)
cw.addItem(self.carsep)
for c in cars:
cw.addItem(c.name)
index = cw.findText(car)
if index >= 0:
cw.setCurrentIndex(index)
class Home(QWidget):
def __init__(self, parent=None, database=None):
super().__init__(parent)
self.database = database
self.initUI()
def initUI(self):
self.controller = ControllerSet(self, self.database)
self.vml = QVBoxLayout()
self.vml.setSpacing(10)
self.headFont = QFont()
self.headFont.setPointSize(45)
self.headFont.setBold(True)
self.headline = QLabel(self.tr('Carrera RMS'))
self.headline.setFont(self.headFont)
self.vml.addWidget(self.headline)
self.vml.addWidget(HSep())
self.vml.addWidget(self.controller)
self.vml.addWidget(HSep())
self.starts = QHBoxLayout()
self.vml.addLayout(self.starts)
self.vml.addWidget(HSep())
self.start_training = QPushButton()
self.start_training.setText(self.tr('Training'))
self.start_training.clicked.connect(self.startTraining_click)
self.start_training.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.starts.addWidget(self.start_training)
self.starts.addWidget(VSep())
self.qualifyingparams = QualifyingParams()
self.qualifyingparams.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Maximum)
self.qhbox = QVBoxLayout()
self.qhbox.addWidget(self.qualifyingparams)
self.start_qualifying = QPushButton()
self.start_qualifying.setText(self.tr('Qualifying'))
self.start_qualifying.clicked.connect(self.startQualifying_click)
self.start_qualifying.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.qhbox.addWidget(self.start_qualifying)
self.starts.addLayout(self.qhbox)
self.starts.addWidget(VSep())
self.raceparams = RaceParams()
self.raceparams.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Maximum)
self.rhbox = QVBoxLayout()
self.rhbox.addWidget(self.raceparams)
self.start_race = QPushButton()
self.start_race.setText(self.tr('Race'))
self.start_race.clicked.connect(self.startRace_click)
self.start_race.setSizePolicy(QSizePolicy.Expanding,
QSizePolicy.Expanding)
self.rhbox.addWidget(self.start_race)
self.starts.addLayout(self.rhbox)
self.btnrow = QHBoxLayout()
self.fullscreen = QPushButton()
self.fullscreen.setText(self.tr('Fullscreen'))
self.fullscreen.clicked.connect(self.fullscreen_click)
self.btnrow.addWidget(self.fullscreen)
self.statistics = QPushButton()
self.statistics.setText(self.tr('Statistics'))
self.btnrow.addWidget(self.statistics)
self.settings = QPushButton()
self.settings.setText(self.tr('Settings'))
self.settings.clicked.connect(self.settings_click)
self.btnrow.addWidget(self.settings)
self.vml.addLayout(self.btnrow)
self.exitrms = QPushButton()
self.exitrms.setText(self.tr('Exit'))
self.exitrms.clicked.connect(self.exitrms_click)
self.vml.addWidget(self.exitrms)
self.setLayout(self.vml)
@pyqtSlot()
def settings_click(self):
self.parent().parent().showSettings()
@pyqtSlot()
def exitrms_click(self):
self.parent().parent().close()
@pyqtSlot()
def fullscreen_click(self):
if self.parent().parent().windowState() & Qt.WindowFullScreen:
if self.parent().parent().cuv not in DUMMY_IDS:
self.parent().parent().showMaximized()
else:
self.parent().parent().showNormal()
self.fullscreen.setText(self.tr('Fullscreen'))
else:
self.parent().parent().showFullScreen()
self.fullscreen.setText(self.tr('Exit Fullscreen'))
def getDrivers(self):
d = {}
for i in range(0, 6):
if self.getOk(i):
c = self.getCar(i)
p = {'pos': 0, 'name': self.getName(i), 'car': c}
if self.qualifyingparams.getCompMode() in [
COMP_MODE__QUALIFYING_LAPS_SEQ,
COMP_MODE__QUALIFYING_TIME_SEQ]:
p['qualifying_cu_driver'] = None
d[i] = p
return d
@pyqtSlot()
def startRace_click(self):
try:
self.parent().parent().drivers = self.getDrivers()
self.parent().parent().startRace(self.raceparams.getCompMode(),
self.raceparams.getDuration())
except KeyError:
pass
@pyqtSlot()
def startQualifying_click(self):
try:
self.parent().parent().drivers = self.getDrivers()
self.parent().parent().startQualifying(
self.qualifyingparams.getCompMode(),
self.qualifyingparams.getDuration())
except KeyError:
pass
@pyqtSlot()
def startTraining_click(self):
try:
self.parent().parent().drivers = self.getDrivers()
self.parent().parent().startTraining()
except KeyError:
pass
def getCar(self, addr):
return self.controller.getCar(addr)
def getOk(self, addr):
return self.controller.getOk(addr)
def getName(self, addr):
return self.controller.getName(addr)
def setCar(self, addr, car):
self.controller.setCar(addr, car)
def setOk(self, addr, checked):
self.controller.setOk(addr, checked)
def setName(self, addr, name):
self.controller.setName(addr, name)
def buildCarList(self):
self.controller.buildCarList()
|
nilq/baby-python
|
python
|
import twoLSTMcuda as t
model = t.torch.load(open("twoLSTMentireModel.npy", 'rb'))
print(t.checkAcc(model, t.data, t.labels))
print(t.checkAcc(model, t.valData, t.valLabels))
|
nilq/baby-python
|
python
|
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
import os
@slash.requires(have_ffmpeg)
@slash.requires(have_ffmpeg_vaapi_accel)
class TranscoderTest(slash.Test):
def before(self):
self.refctx = []
def transcode_1to1(self):
self.decoded = get_media()._test_artifact(
"{case}_{width}x{height}_{mode}.{dstextension}".format(**vars(self)))
if vars(self).get("mode", None) == 'hwhw':
self.output = call(
"ffmpeg -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -v verbose"
" -hwaccel_output_format vaapi -i {source} -an -c:v {mcodec}"
" -vframes {frames} -y {decoded}".format(**vars(self)))
elif vars(self).get("mode", None) == 'hwsw':
self.output = call(
"ffmpeg -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -v verbose"
" -hwaccel_output_format vaapi -i {source} -vf 'hwdownload,format=nv12' -an -c:v {mcodec}"
" -vframes {frames} -y {decoded}".format(**vars(self)))
elif vars(self).get("mode", None) == 'swhw':
self.output = call(
"ffmpeg -vaapi_device /dev/dri/renderD128 -v verbose "
"-i {source} -vf 'format=nv12,hwupload' -an -c:v {mcodec}"
" -vframes {frames} -y {decoded}".format(**vars(self)))
else:
assert "non supported transcoding"
self.check_output()
self.convert_toyuv()
self.check_metrics()
def check_output(self):
m = re.search(
"not supported for hardware decode", self.output, re.MULTILINE)
assert m is None, "Failed to use hardware decode"
m = re.search(
"hwaccel initialisation returned error", self.output, re.MULTILINE)
assert m is None, "Failed to use hardware decode"
def convert_toyuv(self):
self.decodedYUV = get_media()._test_artifact(
"{case}_{width}x{height}_{mode}_{mcodec}.yuv".format(**vars(self)))
call("ffmpeg -i {decoded} -pix_fmt yuv420p -vframes {frames} -y {decodedYUV}".format(**vars(self)))
#src file to yuv
self.referenceFile = get_media()._test_artifact(
"{case}_{width}x{height}_{mode}_ref.yuv".format(**vars(self)))
call("ffmpeg -i {source} -pix_fmt yuv420p -vframes {frames} -y {referenceFile}".format(**vars(self)))
def check_metrics(self):
get_media().baseline.check_psnr(
psnr = calculate_psnr(
self.referenceFile, self.decodedYUV,
self.width, self.height,
self.frames),
context = self.refctx,
)
|
nilq/baby-python
|
python
|
from django.shortcuts import *
from django.http import *
from django.shortcuts import *
from django.urls import *
import traceback
import json
from .models import *
from django.db.utils import IntegrityError
from django.db.models import F
from .crypto import *
import math
from account_info.models import User
from wallet_info.models import Wallet
from task_info.models import Task
from accept_task_info.models import AcceptTask
# Create your views here.
MAX_PAGE_ITEMS = 6
def dealResponse(status_code, res_text={}):
traceback.print_exc()
dic = {
400 : 'Decode Failed',
406 : 'Verification Failed',
200 : 'Standard Successed',
201 : 'Create Resource Successed',
409 : 'Confict Field or MultipleAcceptance',
500 : 'Unknown Server Error',
404 : 'Not Exist',
416 : 'OutOfRange or CompletedTask',
401 : 'Unauthorized',
403 : 'TaskBeenFinished'
}
traceback.print_exc()
print('[+] ' + dic[status_code])
res_text['status_code'] = status_code
resp = HttpResponse(encrypt(json.dumps(res_text)))
# resp.status_code = status_code
return resp
def operate_task(request):
"""
if not request.method == 'GET' and not request.method == 'POST':
put = QueryDict(request.body)
id = put.get('taskID')
username = put.get('issuer')
print(put)
print(id)
print(username)
return dealResponse(200)
"""
if request.method == 'GET':
try:
id = decrypt(request.GET['taskID'])
except:
return dealResponse(400)
try:
result = Task.objects.get(taskID=id)
except Task.DoesNotExist:
return dealResponse(404)
res_text = {
'data':{
'title' : result.title,
'content' : result.content,
'type' : result.types,
'issuer' : result.issuer.username,
'reward' : result.reward,
'deadline' : result.deadline,
'repeatTime' : result.repeatTime,
'isCompleted' : result.isCompleted,
}
}
return dealResponse(200, res_text)
elif request.method == 'POST':
try:
raw_string = decrypt(str(request.body, 'utf-8'))
content = json.loads(raw_string)
ttitle = content['title']
tcontent = content['content']
ttype = content['type']
tissuer = content['issuer']
treward = content['reward']
trepeatTime = content['repeatTime']
tdeadline = content['deadline']
except:
return dealResponse(400)
try:
user = User.objects.get(username=tissuer)
except User.DoesNotExist:
return dealResponse(404)
task = Task(title=ttitle, content=tcontent, types=ttype,\
issuer=user, reward=treward, repeatTime=trepeatTime,\
deadline=tdeadline, )
task.save()
return dealResponse(201, {"data":{"taskID":task.taskID}})
"""
elif request.method == 'DELETE':
try:
id = decrypt(request.DELETE['taskID'])
username = decrypt(request.DELETE['issuer'])
except:
return dealResponse(400)
try:
result = Task.objects.get(taskID=id)
except Task.DoesNotExist:
return dealResponse(404)
if result.issuer.username != username:
return dealResponse(401)
result.isCompleted = True
result.save()
return dealResponse(200)
"""
def task_finished(request):
try:
# id = decrypt(request.POST['taskID'])
# username = decrypt(request.POST['issuer'])
raw_string = decrypt(str(request.body, 'utf-8'))
content = json.loads(raw_string)
id = content['taskID']
username = content['issuer']
except:
return dealResponse(400)
try:
result = Task.objects.get(taskID=id)
except Task.DoesNotExist:
return dealResponse(404)
if result.issuer.username != username:
return dealResponse(401)
result.isCompleted = True
result.save()
return dealResponse(200)
def get_tasks(request):
try:
page = int(decrypt(request.GET['page']))
title = decrypt(request.GET.get('title', default=''))
types = decrypt(request.GET.get('type', default=''))
issuer = decrypt(request.GET.get('issuer', default=''))
content = decrypt(request.GET.get('content', default=''))
isCompleted = decrypt(request.GET.get('isComplete', default=''))
except:
return dealResponse(400)
dic = {}
if title != '':
dic['title'] = title
if types != '':
dic['types'] = types
if issuer != '':
dic['issuer'] = issuer
if content != '':
dic['content'] = content
if isCompleted != '':
if isCompleted == 'true':
dic['isCompleted'] = True
elif isCompleted == 'false':
dic['isCompleted'] = False
result = Task.objects.filter(**dic)
# for item in result:
# print(item.taskID)
max_pages = math.ceil(float(len(result)) / MAX_PAGE_ITEMS)
if page > max_pages or page <= 0:
return dealResponse(416, {"data": {"max_pages": max_pages}})
page = page - 1
resp = {"data" : {
"tasks" : [],
"max_pages" : max_pages
}
}
startid = page * MAX_PAGE_ITEMS
endid = min(len(result), (page+1)*MAX_PAGE_ITEMS)
for i in range(startid, endid):
oner = {
"taskID": result[i].taskID,
"title": result[i].title,
"content": result[i].content,
"type": result[i].types,
"issuer": result[i].issuer.username,
"reward": result[i].reward,
"deadline": result[i].deadline,
"repeatTime": result[i].repeatTime,
# "isCompleted": result[i].isCompleted,
}
resp['data']['tasks'].append(oner)
return dealResponse(200, resp)
def operate_created_tasks(request):
if request.method == 'GET':
try:
page = int(decrypt(request.GET['page']))
tusername = decrypt(request.GET['issuer'])
except:
return dealResponse(400)
try:
fuser = User.objects.get(username=tusername)
except User.DoesNotExist:
return dealResponse(404)
result = Task.objects.filter(issuer=fuser)
max_pages = math.ceil(float(len(result)) / MAX_PAGE_ITEMS)
if page > max_pages or page <= 0:
return dealResponse(416, {"data": {"max_pages": max_pages}})
page = page - 1
resp = {"data" : {
"tasks" : [],
"max_pages" : max_pages
}
}
startid = page * MAX_PAGE_ITEMS
endid = min(len(result), (page+1)*MAX_PAGE_ITEMS)
for i in range(startid, endid):
oner = {
"taskID": result[i].taskID,
"title": result[i].title,
"content": result[i].content,
"type": result[i].types,
"issuer": result[i].issuer.username,
"reward": result[i].reward,
"deadline": result[i].deadline,
"repeatTime": result[i].repeatTime,
"isFinished": result[i].isCompleted,
}
resp['data']['tasks'].append(oner)
return dealResponse(200, resp)
elif request.method == 'POST':
try:
raw_string = decrypt(str(request.body, 'utf-8'))
content = json.loads(raw_string)
ttaskID = content['taskID']
ttitle = content['title']
tissuer = content['issuer']
treward = content['reward']
tdeadline = content['deadline']
except:
return dealResponse(400)
try:
task = Task.objects.get(taskID=ttaskID)
user = User.objects.get(username=tissuer)
except(Task.DoesNotExist, User.DoesNotExist):
return dealResponse(404)
if user != task.issuer:
return dealResponse(401)
task.title = ttitle
task.reward = treward
task.deadline = tdeadline
task.save()
return dealResponse(200)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import unittest
from utils.logger import Logger
class UtLogger(unittest.TestCase):
def setUp(self):
self.logger = Logger().getLogger("test.utils.UtLogger")
def testLogger(self):
self.logger.debug("1")
self.logger.info("2")
self.logger.warn("3")
self.logger.error("4")
self.logger.critical("5")
if __name__ == '__main__': unittest.main()
|
nilq/baby-python
|
python
|
from base_n_treble import db, auth
from base_n_treble.models.user import User
def setup_admin():
from tests.fixtures import TEST_ADMIN
db._adapter.reconnect()
admins = auth.id_group("admin") if auth.id_group("admin") else auth.add_group("admin")
print("Admin group id: '{}'".format(admins))
db._adapter.reconnect()
admin = db.User.validate_and_insert(
email=TEST_ADMIN.email,
first_name=TEST_ADMIN.first_name,
last_name=TEST_ADMIN.last_name,
password=TEST_ADMIN.password
)
db.commit()
db._adapter.reconnect()
auth.add_membership(admins, admin.id)
print("Admin created: \n{}".format(admin.as_dict()))
db.commit()
return admin
def remove_admin():
from tests.fixtures import TEST_ADMIN
db._adapter.reconnect()
print("\nRemoving test admin.")
dev_admin = db(User.email == TEST_ADMIN.email).select()
print("Admin: {}\n".format(dev_admin.as_dict()))
db(User.email == TEST_ADMIN.email).delete()
print("Test admin successfully deleted.")
db.commit()
def setup_user():
from tests.fixtures import TEST_USER
db._adapter.reconnect()
user = db.User.validate_and_insert(
email=TEST_USER.email,
first_name=TEST_USER.first_name,
last_name=TEST_USER.last_name,
password=TEST_USER.password
)
db.commit()
return user
def remove_user():
from tests.fixtures import TEST_USER
db._adapter.reconnect()
print("\nRemoving test user.")
dev_user = db(User.email == TEST_USER.email).select()
print("Admin: {}\n".format(dev_user.as_dict()))
db(User.email == TEST_USER.email).delete()
print("Test user successfully deleted.")
db.commit()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import sqlite3
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
# configuration
DATABASE = "/tmp/flaskr.db"
DEBUG = True
SECRET_KEY = "development key"
USERNAME= "admin"
PASSWORD = "default"
JSON_AS_ASCII = False
app = Flask(__name__)
app.config.from_object(__name__)
def connect_db():
return sqlite3.connect(app.config["DATABASE"])
def init_db():
with closing(connect_db()) as db:
with app.open_resource("schema.sql", mode="r") as f:
db.cursor().executescript(f.read())
db.commit()
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, "db", None)
if db is not None:
db.close()
@app.route("/")
def show_entries():
cur = g.db.execute("SELECT title, text FROM entries ORDER BY id DESC")
entries = [dict(title=row[0],text=row[1]) for row in cur.fetchall()]
return render_template("show_entries.html", entries=entries)
@app.route("/add", methods=["POST"])
def add_entry():
if not session.get("logged_in"):
abort(401)
g.db.execute("INSERT INTO entries (title, text) values (?, ?)", [request.form["title"], request.form["text"]])
g.db.commit()
flash(u"新しいエントリが投稿されました. New entry was successfully posted.")
return redirect(url_for("show_entries"))
@app.route("/login", methods=["GET", "POST"])
def login():
error = None
if request.method == "POST":
if request.form["username"] != app.config["USERNAME"]:
error = u"Invalid username, user nameが間違っています"
elif request.form["password"] != app.config["PASSWORD"]:
error = u"Invalid password, パスワードが間違っています"
else:
session["logged_in"] = True
flash(u"You were logged in, ログインしました")
return redirect(url_for("show_entries"))
return render_template("login.html", error=error)
@app.route("/logout")
def logout():
session.pop("logged_in", None)
flash(u"Your were logged out, ログアウトしました")
return redirect(url_for("show_entries"))
if __name__ == "__main__":
app.run()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
import sys
import os
import disttools
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
from strparser import *
from filehdl import *
from pyparser import *
def make_string_slash_ok(s):
sarr = re.split('\n', s)
rets = ''
for l in sarr:
l = l.rstrip('\r\n')
cexpr = None
if len(l) > 0 and l[-1] == '\\':
cexpr = get_random_name(20)
cont = True
while cont:
cont = False
matchl = re.sub(cexpr, '', l)
if matchl != l:
cont = True
cexpr = get_random_name(20)
l = re.sub('\\$', cexpr, l)
l = re.sub(r'\\', r'\\\\', l)
if cexpr is not None:
l = re.sub(cexpr, r'\\', l)
rets += '%s\n'%(l)
return rets
def get_import_file(fname):
rets = ''
started = False
with open(fname,'r+b') as f:
for l in f:
if sys.version[0] == '3':
l = l.decode('utf8')
l = l.rstrip('\r\n')
if not started:
if l.startswith('##extractcode_start'):
started = True
else:
if l.startswith('##extractcode_end'):
started = False
else:
rets += l
rets += '\n'
return rets
def make_filters_out(ims,files):
cont = True
jdx = 0
idx = 0
while cont:
cont = False
idx = 0
while idx < len(ims) and not cont:
jdx = 0
while jdx < len(files) and not cont:
if ims[idx].frommodule == files[jdx]:
cont = True
logging.info('del [%d] jdx [%d] [%s]'%(idx,jdx,ims[idx]))
del ims[idx]
logging.info('%s'%(ims))
break
jdx += 1
idx += 1
return ims
def fromat_ext_import_files(origfile,files):
curbase = re.sub('\.py$','',os.path.basename(origfile))
allims = []
for f in files:
allims.extend(get_import_names(f))
curims= get_import_names(origfile)
curims = packed_import(curims)
curims = make_filters_out(curims, files)
logging.info('curims %s'%(curims))
allims = packed_import(allims)
allims = make_filters_out(allims, files)
logging.info('allims %s'%(allims))
cont = True
seccont = True
while cont:
cont = False
idx = 0
while idx < len(allims) :
jdx = 0
while jdx < len(curims) :
if allims[idx].frommodule == curims[jdx].frommodule and \
allims[idx].module == curims[jdx].module:
cont = True
#logging.info('del [%d] %s'%(idx,allims[idx]))
del allims[idx]
break
jdx += 1
if cont:
break
idx += 1
rets = ''
for m in allims:
rets += '%s\n'%(format_import(m))
return rets
class ReleaseFiles(object):
def __init__(self, basefile=__file__):
self.__includes = []
self.__basefile = basefile
self.__repls = dict()
return
def add_python_file(self,path,rex):
c = get_import_file(path)
self.__repls[rex] = make_string_slash_ok(c)
self.__includes.append(path)
return
def add_repls(self,k,v):
self.__repls[k]= v
return
def get_repls(self):
return self.__repls
def get_includes(self):
return self.__includes
|
nilq/baby-python
|
python
|
import pytest
import abjad
import abjadext.nauert
class Job:
### INITIALIZER ###
def __init__(self, number):
self.number = number
### SPECIAL METHODS ###
def __call__(self):
self.result = [
x for x in abjad.math.yield_all_compositions_of_integer(self.number)
]
@pytest.mark.skip()
def test_ParallelJobHandler___call___01():
jobs = [Job(x) for x in range(1, 11)]
job_handler = abjadext.nauert.ParallelJobHandler()
job_handler(jobs)
@pytest.mark.skip()
def test_ParallelJobHandler___call___02():
job_id = 1
definition = {2: {2: {2: None}, 3: None}, 5: None}
search_tree = abjadext.nauert.UnweightedSearchTree(definition)
q_event_proxies = [
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent(0, ["A"], index=1), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((1, 5), ["B"], index=2), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((1, 4), ["C"], index=3), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((1, 3), ["D"], index=4), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((2, 5), ["E"], index=5), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((1, 2), ["F"], index=6), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((3, 5), ["G"], index=7), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((2, 3), ["H"], index=8), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((3, 4), ["I"], index=9), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent((4, 5), ["J"], index=10), 0, 1
),
abjadext.nauert.QEventProxy(
abjadext.nauert.SilentQEvent(1, ["K"], index=11), 0, 1
),
]
job_a = abjadext.nauert.QuantizationJob(job_id, search_tree, q_event_proxies)
job_b = abjadext.nauert.QuantizationJob(job_id, search_tree, q_event_proxies)
assert job_a == job_b
a_jobs = abjadext.nauert.SerialJobHandler()([job_a])
b_jobs = abjadext.nauert.ParallelJobHandler()([job_b])
assert len(a_jobs) == len(b_jobs)
a_rtms = sorted([q_grid.root_node.rtm_format for q_grid in a_jobs[0].q_grids])
b_rtms = sorted([q_grid.root_node.rtm_format for q_grid in b_jobs[0].q_grids])
assert a_rtms == b_rtms
assert sorted(a_jobs[0].q_grids, key=lambda x: x.root_node.rtm_format) == sorted(
b_jobs[0].q_grids, key=lambda x: x.root_node.rtm_format
)
|
nilq/baby-python
|
python
|
"""
===========
04. Run ICA
===========
This fits ICA on epoched data filtered with 1 Hz highpass,
for this purpose only using fastICA. Separate ICAs are fitted and stored for
MEG and EEG data.
To actually remove designated ICA components from your data, you will have to
run 05a-apply_ica.py.
"""
import itertools
import logging
from typing import List, Optional, Iterable, Literal
from tqdm import tqdm
import pandas as pd
import numpy as np
import mne
from mne.report import Report
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
from mne.parallel import parallel_func
from mne_bids import BIDSPath
import config
from config import make_epochs, gen_log_message, on_error, failsafe_run
logger = logging.getLogger('mne-bids-pipeline')
def filter_for_ica(
*,
raw: mne.io.BaseRaw,
subject: str,
session: str,
run: Optional[str] = None
) -> None:
"""Apply a high-pass filter if needed."""
if config.ica_l_freq is None:
msg = (f'Not applying high-pass filter (data is already filtered, '
f'cutoff: {raw.info["highpass"]} Hz).')
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session, run=run))
else:
msg = f'Applying high-pass filter with {config.ica_l_freq} Hz cutoff …'
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session, run=run))
raw.filter(l_freq=config.ica_l_freq, h_freq=None)
def fit_ica(epochs, subject, session):
algorithm = config.ica_algorithm
fit_params = None
if algorithm == 'picard':
fit_params = dict(fastica_it=5)
elif algorithm == 'extended_infomax':
algorithm = 'infomax'
fit_params = dict(extended=True)
ica = ICA(method=algorithm, random_state=config.random_state,
n_components=config.ica_n_components, fit_params=fit_params,
max_iter=config.ica_max_iterations)
ica.fit(epochs, decim=config.ica_decim, reject=config.get_ica_reject())
explained_var = (ica.pca_explained_variance_[:ica.n_components_].sum() /
ica.pca_explained_variance_.sum())
msg = (f'Fit {ica.n_components_} components (explaining '
f'{round(explained_var * 100, 1)}% of the variance) in '
f'{ica.n_iter_} iterations.')
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session))
return ica
def make_ecg_epochs(
*,
raw: mne.io.BaseRaw,
subject: str,
session: str,
run: Optional[str] = None
) -> Optional[mne.Epochs]:
# ECG either needs an ecg channel, or avg of the mags (i.e. MEG data)
if ('ecg' in raw.get_channel_types() or 'meg' in config.ch_types or
'mag' in config.ch_types):
msg = 'Creating ECG epochs …'
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session, run=run))
# Do not reject epochs based on amplitude.
ecg_epochs = create_ecg_epochs(raw, reject=None,
baseline=(None, -0.2),
tmin=-0.5, tmax=0.5)
if len(ecg_epochs) == 0:
msg = ('No ECG events could be found. Not running ECG artifact '
'detection.')
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session, run=run))
ecg_epochs = None
else:
msg = ('No ECG or magnetometer channels are present. Cannot '
'automate artifact detection for ECG')
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session, run=run))
ecg_epochs = None
return ecg_epochs
def make_eog_epochs(
*,
raw: mne.io.BaseRaw,
eog_channels: Optional[Iterable[str]],
subject: str,
session: str,
run: Optional[str] = None
) -> Optional[mne.Epochs]:
if eog_channels:
ch_names = eog_channels
assert all([ch_name in raw.ch_names
for ch_name in ch_names])
else:
ch_idx = mne.pick_types(raw.info, meg=False, eog=True)
ch_names = [raw.ch_names[i] for i in ch_idx]
del ch_idx
if ch_names:
msg = 'Creating EOG epochs …'
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session, run=run))
# Create the epochs. It's important not to reject epochs based on
# amplitude!
eog_epochs = create_eog_epochs(raw, ch_name=ch_names,
baseline=(None, -0.2))
if len(eog_epochs) == 0:
msg = ('No EOG events could be found. Not running EOG artifact '
'detection.')
logger.warning(gen_log_message(message=msg, step=4,
subject=subject,
session=session, run=run))
eog_epochs = None
else:
msg = ('No EOG channel is present. Cannot automate IC detection '
'for EOG')
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session, run=run))
eog_epochs = None
return eog_epochs
def detect_bad_components(
*,
which: Literal['eog', 'ecg'],
epochs: mne.BaseEpochs,
ica: mne.preprocessing.ICA,
subject: str,
session: str,
report: mne.Report
) -> List[int]:
evoked = epochs.average()
artifact = which.upper()
msg = f'Performing automated {artifact} artifact detection …'
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session))
if which == 'eog':
inds, scores = ica.find_bads_eog(
epochs,
threshold=config.ica_eog_threshold
)
else:
inds, scores = ica.find_bads_ecg(
epochs, method='ctps',
threshold=config.ica_ctps_ecg_threshold
)
if not inds:
adjust_setting = ('ica_eog_threshold' if which == 'eog'
else 'config.ica_ctps_ecg_threshold')
warn = (f'No {artifact}-related ICs detected, this is highly '
f'suspicious. A manual check is suggested. You may wish to '
f'lower "{adjust_setting}".')
logger.warning(gen_log_message(message=warn, step=4,
subject=subject,
session=session))
else:
msg = (f'Detected {len(inds)} {artifact}-related ICs in '
f'{len(epochs)} {artifact} epochs.')
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session))
# Mark the artifact-related components for removal
ica.exclude = inds
# Plot scores
fig = ica.plot_scores(scores, labels=which, show=config.interactive)
report.add_figs_to_section(figs=fig, captions=f'Scores - {artifact}',
section=f'sub-{subject}')
# Plot source time course
fig = ica.plot_sources(evoked, show=config.interactive)
report.add_figs_to_section(figs=fig,
captions=f'Source time course - {artifact}',
section=f'sub-{subject}')
# Plot original & corrected data
fig = ica.plot_overlay(evoked, show=config.interactive)
report.add_figs_to_section(figs=fig, captions=f'Corrections - {artifact}',
section=f'sub-{subject}')
return inds
def run_ica(subject, session=None):
"""Run ICA."""
task = config.get_task()
bids_basename = BIDSPath(subject=subject,
session=session,
task=task,
acquisition=config.acq,
recording=config.rec,
space=config.space,
datatype=config.get_datatype(),
root=config.get_deriv_root(),
check=False)
raw_fname = bids_basename.copy().update(processing='filt', suffix='raw')
ica_fname = bids_basename.copy().update(suffix='ica', extension='.fif')
ica_components_fname = bids_basename.copy().update(processing='ica',
suffix='components',
extension='.tsv')
report_fname = bids_basename.copy().update(processing='ica+components',
suffix='report',
extension='.html')
# Generate a list of raw data paths (i.e., paths of individual runs)
# we want to create epochs from.
raw_fnames = []
for run in config.get_runs(subject=subject):
raw_fname.run = run
if raw_fname.copy().update(split='01').fpath.exists():
raw_fname.update(split='01')
raw_fnames.append(raw_fname)
# Generate a unique event name -> event code mapping that can be used
# across all runs.
event_name_to_code_map = config.annotations_to_events(raw_paths=raw_fnames)
# Now, generate epochs from each individual run
epochs_all_runs = []
eog_epochs_all_runs = []
ecg_epochs_all_runs = []
for run, raw_fname in zip(config.get_runs(subject=subject), raw_fnames):
msg = f'Loading filtered raw data from {raw_fname} and creating epochs'
logger.info(gen_log_message(message=msg, step=3, subject=subject,
session=session, run=run))
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# EOG epochs
eog_epochs = make_eog_epochs(raw=raw, eog_channels=config.eog_channels,
subject=subject, session=session, run=run)
if eog_epochs is not None:
eog_epochs_all_runs.append(eog_epochs)
# ECG epochs
ecg_epochs = make_ecg_epochs(raw=raw, subject=subject, session=session,
run=run)
if ecg_epochs is not None:
ecg_epochs_all_runs.append(ecg_epochs)
# Produce high-pass filtered version of the data for ICA.
# Sanity check – make sure we're using the correct data!
if config.resample_sfreq is not None:
assert np.allclose(raw.info['sfreq'], config.resample_sfreq)
if config.l_freq is not None:
assert np.allclose(raw.info['highpass'], config.l_freq)
filter_for_ica(raw=raw, subject=subject, session=session, run=run)
# Only keep the subset of the mapping that applies to the current run
event_id = event_name_to_code_map.copy()
for event_name in event_id.copy().keys():
if event_name not in raw.annotations.description:
del event_id[event_name]
msg = 'Creating task-related epochs …'
logger.info(gen_log_message(message=msg, step=3, subject=subject,
session=session, run=run))
epochs = make_epochs(
raw=raw,
event_id=event_id,
tmin=config.epochs_tmin,
tmax=config.epochs_tmax,
metadata_tmin=config.epochs_metadata_tmin,
metadata_tmax=config.epochs_metadata_tmax,
metadata_keep_first=config.epochs_metadata_keep_first,
metadata_keep_last=config.epochs_metadata_keep_last,
event_repeated=config.event_repeated,
decim=config.decim
)
epochs_all_runs.append(epochs)
del raw, epochs, eog_epochs, ecg_epochs # free memory
# Lastly, we can concatenate the epochs and set an EEG reference
epochs = mne.concatenate_epochs(epochs_all_runs)
if eog_epochs_all_runs:
epochs_eog = mne.concatenate_epochs(eog_epochs_all_runs)
else:
epochs_eog = None
if ecg_epochs_all_runs:
epochs_ecg = mne.concatenate_epochs(ecg_epochs_all_runs)
else:
epochs_ecg = None
del epochs_all_runs, eog_epochs_all_runs, ecg_epochs_all_runs
epochs.load_data()
if "eeg" in config.ch_types:
projection = True if config.eeg_reference == 'average' else False
epochs.set_eeg_reference(config.eeg_reference, projection=projection)
# Now actually perform ICA.
msg = 'Calculating ICA solution.'
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session))
ica = fit_ica(epochs, subject=subject, session=session)
# Start a report
title = f'ICA – sub-{subject}'
if session is not None:
title += f', ses-{session}'
if task is not None:
title += f', task-{task}'
report = Report(info_fname=epochs, title=title, verbose=False)
# ECG and EOG component detection
if epochs_ecg:
ecg_ics = detect_bad_components(which='ecg', epochs=epochs_ecg,
ica=ica,
subject=subject, session=session,
report=report)
else:
ecg_ics = []
if epochs_eog:
eog_ics = detect_bad_components(which='eog', epochs=epochs_eog,
ica=ica,
subject=subject, session=session,
report=report)
else:
eog_ics = []
# Save ICA to disk.
# We also store the automatically identified ECG- and EOG-related ICs.
msg = 'Saving ICA solution and detected artifacts to disk.'
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session))
ica.exclude = sorted(set(ecg_ics + eog_ics))
ica.save(ica_fname)
# Create TSV.
tsv_data = pd.DataFrame(
dict(component=list(range(ica.n_components_)),
type=['ica'] * ica.n_components_,
description=['Independent Component'] * ica.n_components_,
status=['good'] * ica.n_components_,
status_description=['n/a'] * ica.n_components_))
for component in ecg_ics:
row_idx = tsv_data['component'] == component
tsv_data.loc[row_idx, 'status'] = 'bad'
tsv_data.loc[row_idx,
'status_description'] = 'Auto-detected ECG artifact'
for component in eog_ics:
row_idx = tsv_data['component'] == component
tsv_data.loc[row_idx, 'status'] = 'bad'
tsv_data.loc[row_idx,
'status_description'] = 'Auto-detected EOG artifact'
tsv_data.to_csv(ica_components_fname, sep='\t', index=False)
# Lastly, plot all ICs, and add them to the report for manual inspection.
msg = 'Adding diagnostic plots for all ICs to the HTML report …'
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session))
for component_num in tqdm(range(ica.n_components_)):
fig = ica.plot_properties(epochs,
picks=component_num,
psd_args={'fmax': 60},
show=False)
caption = f'IC {component_num}'
if component_num in eog_ics and component_num in ecg_ics:
caption += ' (EOG & ECG)'
elif component_num in eog_ics:
caption += ' (EOG)'
elif component_num in ecg_ics:
caption += ' (ECG)'
report.add_figs_to_section(fig, section=f'sub-{subject}',
captions=caption)
open_browser = True if config.interactive else False
report.save(report_fname, overwrite=True, open_browser=open_browser)
msg = (f"ICA completed. Please carefully review the extracted ICs in the "
f"report {report_fname.basename}, and mark all components you wish "
f"to reject as 'bad' in {ica_components_fname.basename}")
logger.info(gen_log_message(message=msg, step=4, subject=subject,
session=session))
@failsafe_run(on_error=on_error)
def main():
"""Run ICA."""
msg = 'Running Step 4: Compute ICA'
logger.info(gen_log_message(step=4, message=msg))
if config.spatial_filter == 'ica':
parallel, run_func, _ = parallel_func(run_ica, n_jobs=config.N_JOBS)
parallel(run_func(subject, session) for subject, session in
itertools.product(config.get_subjects(),
config.get_sessions()))
msg = 'Completed Step 4: Compute ICA'
logger.info(gen_log_message(step=4, message=msg))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import logging
import typing
import copy
from typing import Any, Dict, List, Text
from rasa.nlu.components import Component
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.tokenizers.tokenizer import Token, Tokenizer
from rasa.nlu.training_data import Message, TrainingData
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
pass
class MicroAddonsTokenizer(Tokenizer):
provides = ["tokens"]
language_list = ["zh"]
defaults = {
# default don't load custom dictionary
"custom_dict": None,
# Flag to check whether to split intents
"intent_tokenization_flag": False,
# Symbol on which intent should be split
"intent_split_symbol": "_",
}
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
self.custom_dict = component_config.pop("custom_dict", None)
if self.custom_dict:
self.load_custom_dictionary(self.custom_dict)
super().__init__(component_config)
@staticmethod
def load_custom_dictionary(custom_dict: Text) -> None:
import MicroTokenizer
MicroTokenizer.load_userdict(custom_dict)
@classmethod
def required_packages(cls) -> List[Text]:
return ["MicroTokenizer"]
def tokenize(self, message: Message, attribute: Text) -> List[Token]:
import MicroTokenizer
text = message.get(attribute)
tokenized = MicroTokenizer.cut(text)
tokens = []
offset = 0
for word in tokenized:
tokens.append(Token(word, offset))
offset += len(word)
return tokens
|
nilq/baby-python
|
python
|
import datetime
import enum
from sqlalchemy import (
Column,
Integer,
String,
Enum,
TIMESTAMP,
ForeignKey,
Index,
Float,
)
from sqlalchemy.orm import relationship
from .base import Base
class SystemUpdate(Base):
__tablename__ = "system_update"
pk = Column(Integer, primary_key=True)
system_pk = Column(Integer, ForeignKey("system.pk"))
class Status(enum.Enum):
SCHEDULED = 1
IN_PROGRESS = 2
SUCCESS = 3
FAILED = 4
status = Column(Enum(Status, native_enum=False), nullable=False)
status_message = Column(String) # TODO: rename stack trace?
total_duration = Column(Float)
scheduled_at = Column(TIMESTAMP(timezone=True), default=datetime.datetime.utcnow)
completed_at = Column(TIMESTAMP(timezone=True))
config = Column(String)
config_template = Column(String)
config_parameters = Column(String)
config_source_url = Column(String)
transiter_version = Column(String)
system = relationship("System", back_populates="updates")
__table_args__ = (
Index("system_update_system_pk_system_update_pk_idx", system_pk, pk),
)
|
nilq/baby-python
|
python
|
from italian_csv_type_prediction.simple_types import IntegerType
import numpy as np
def test_integer_type():
predictor = IntegerType()
valids = [
3,
6,
0,
"1.000.000.00"
]
invalids = [
"ciao",
"12.12.94",
"12.12.1994",
False,
True,
None,
np.nan
]
for valid in valids:
assert predictor.validate(valid)
assert predictor.validate(str(valid))
for invalid in invalids:
assert not predictor.validate(invalid)
assert not predictor.validate(str(invalid))
|
nilq/baby-python
|
python
|
class Node:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
class BinarySearchTree:
def __init__(self):
self.root = None
def insert(self, data):
new_node = Node(data)
if self.root == None:
self.root = new_node
else:
current_node = self.root
while True:
if data < current_node.data:
if current_node.left == None:
current_node.left = new_node
return
else:
current_node = current_node.left
elif data > current_node.data:
if current_node.right == None:
current_node.right = new_node
return
else:
current_node = current_node.right
def lookup(self, data):
current_node = self.root
while True:
if current_node == None:
return False
if current_node.data == data:
return True
elif data < current_node.data:
current_node = current_node.left
else:
current_node = current_node.right
def dfs_in_order(self, current_node, my_list):
if current_node.left:
self.dfs_in_order(current_node.left, my_list)
my_list.append(current_node.data)
if current_node.right:
self.dfs_in_order(current_node.right, my_list)
return my_list
def dfs_pre_order(self, current_node, my_list):
my_list.append(current_node.data)
if current_node.left:
self.dfs_pre_order(current_node.left, my_list)
if current_node.right:
self.dfs_pre_order(current_node.right, my_list)
return my_list
def dfs_post_order(self, current_node, my_list):
if current_node.left:
self.dfs_post_order(current_node.left, my_list)
if current_node.right:
self.dfs_post_order(current_node.right, my_list)
my_list.append(current_node.data)
return my_list
binary_search_tree = BinarySearchTree()
binary_search_tree.insert(9)
binary_search_tree.insert(4)
binary_search_tree.insert(6)
binary_search_tree.insert(20)
binary_search_tree.insert(170)
binary_search_tree.insert(15)
binary_search_tree.insert(1)
print(binary_search_tree.dfs_in_order(binary_search_tree.root, []))
print(binary_search_tree.dfs_pre_order(binary_search_tree.root, []))
print(binary_search_tree.dfs_post_order(binary_search_tree.root, []))
|
nilq/baby-python
|
python
|
"""Parameters for the model of marine particle microbial degradation
and coupling of degradation with sinking speed, part of the paper
"Sinking enhances the degradation of organic particle by marine bacteria"
Uria Alcolombri, François J. Peaudecerf, Vicente Fernandez, Lars Behrendt, Kang Soo Lee, Roman Stocker
Nature Geosciences (2021)
See Extended Data for more details on chosen parameter values.
Author: Francois Peaudecerf
Creation: 25.02.2019
History of modification
08.10.2019: modification of chosen parameter values
26.11.2019: modification of value of gamma_1 following new fit of experimental data
05.07.2021: editing for publication on Github
"""
# from __future__ import division
###### Numerical parameters #########
mu = 1e-3 # kg/m/s, dynamic viscosity of water
Drho = 0.54 # kg/m^3, difference of volumic mass
g = 10 # m/s^2, gravity
R_0 = 4.4e-4 # m, initial radius in experiments
gamma = 1.06e-10 # m/s, radius shrinking rate from experiments with no flow
R_l = 125e-6 # m, minimum cut-off radius for size distribution
R_g = 750e-6 # m, maximum cut-off radius for size distribution
z_0 = 100 # m, depth of particle distribution
C = 200e3 # m-3, total particle abundance
beta = 4.0 # power law parameter for particle distribution
omega = 0.26 # exponent in the power law for settling velocity with radius (Alldredge 1988)
B = 4.18e-3 # m^(1-omega) s-1, pre-factor for settling velocity with radius (Alldredge 1988)
nu = 1.20e-6 # m2/s, kinematic viscosity modified for sea water at 15 degrees C in v1
D = 1e-9 # m2/s, diffusivity for Peclet estimation
delta = 0.412 # exponent in the Sherwood expression
gamma1= 4.35e-10 # coefficient in the fit of degradation rate with Sh
gamma2= 0.619*gamma1*(nu/D)**(1.0/3.0)*(2.0/nu)**delta # aggregate coefficient for gamma
E = 4.55e-5 # kg/m^{chi}, pre-factor in the power law for dry mass as function of size (Alldredge 1988)
chi = 1.125 # exponent in the power law for dry mass as function of size (Alldredge 1988)
rho_dry= 15 # kg/m^3, dry mass per unit volume of alginate particles
# Alldredge 1988: Alldredge, A. L. & Gotschalk, C. In situ settling behavior of marine snow. Limnol. Ocean. 33, 339–35 (1988)
|
nilq/baby-python
|
python
|
import os
import json
import shutil
import grp
from pathlib import Path
def change_key(my_json, old_key, new_key):
if old_key in my_json:
# store the value
val = my_json[old_key]
# delete the old key/value pair
del my_json[old_key]
# error checking
if new_key in my_json:
print ("ERROR: new_key "+new_key+" is already in the dictionary")
# re-add the value with the new key
my_json[new_key] = val
else:
print ("ERROR: old_key "+old_key+" is not in the dictionary")
def change_value(my_json, old_key, new_value):
if old_key in my_json:
my_json[old_key] = new_value
else:
print ("ERROR: old_key "+old_key+" is not in the dictionary")
def up(config):
submitty_filename = str(Path(config.submitty['submitty_install_dir'], 'config', 'submitty.json'))
submitty_filename_tmp = str(Path(config.submitty['submitty_install_dir'], 'config', 'submitty_tmp.json'))
submitty_users_filename = str(Path(config.submitty['submitty_install_dir'], 'config', 'submitty_users.json'))
submitty_users_filename_tmp = str(Path(config.submitty['submitty_install_dir'], 'config', 'submitty_users_tmp.json'))
killall_path = Path(config.submitty['submitty_install_dir'], 'sbin', 'killall.py')
INSTALL_SUBMITTY_filename = str(Path(config.submitty['submitty_install_dir'], '.setup', 'INSTALL_SUBMITTY.sh'))
print ("my path",killall_path)
# stop the old scheduler daemon, if it is still in use -- was deprecated in early Spring 2018
os.system("systemctl stop submitty_grading_scheduler")
try:
os.remove("/etc/systemd/system/submitty_grading_scheduler.service")
except OSError:
pass
# stop all jobs that are using hwphp and hwcron
os.system("systemctl stop submitty_autograding_worker")
os.system("systemctl stop submitty_autograding_shipper")
os.system("systemctl stop apache2.service")
os.system("systemctl stop php7.0-fpm.service")
os.system("su -c 'crontab -r' hwcron")
os.system("su -c '"+str(killall_path)+"' hwcron")
# change the usernames
os.system("usermod -l submitty_php hwphp")
os.system("usermod -l submitty_cgi hwcgi")
os.system("usermod -l submitty_daemon hwcron")
# change the group names
os.system("groupmod --new-name submitty_daemon hwcron")
os.system("groupmod --new-name submitty_php hwphp")
os.system("groupmod --new-name submitty_cgi hwcgi")
os.system("groupmod --new-name submitty_daemonphp hwcronphp")
os.system("groupmod --new-name submitty_course_builders course_builders")
# cannot restart until the submitty code is installed
print ("WARNING: You will need to manually restart the website/shipper/worker")
print (" systemctl start apache2.service")
print (" systemctl start php7.0-fpm.service")
print (" systemctl start submitty_autograding_shipper")
print (" systemctl start submitty_autograding_worker")
if os.path.exists("/home/hwcron"):
shutil.move("/home/hwcron","/home/submitty_daemon")
if os.path.exists("/home/hwphp"):
shutil.move("/home/hwphp","/home/submitty_php")
if os.path.exists("/home/hwcgi"):
shutil.move("/home/hwcgi","/home/submitty_cgi")
# edit the variables stored by configure submitty/installation
with open (submitty_filename,"r") as open_file:
my_json = json.load(open_file)
change_value(my_json,"submitty_repository","/usr/local/submitty/GIT_CHECKOUT/Submitty")
with open (submitty_filename_tmp,"w") as open_file:
json.dump(my_json,open_file,indent=4)
# write to another file & then remove the write permissions
shutil.move(submitty_filename_tmp,submitty_filename)
os.chmod(submitty_filename, 0o440)
os.chown(submitty_filename, os.getuid(), grp.getgrnam('submitty_daemonphp').gr_gid)
with open (submitty_users_filename,"r") as open_file:
my_json = json.load(open_file)
change_key(my_json,"hwcron_uid","daemon_uid")
change_key(my_json,"hwcron_gid","daemon_gid")
change_key(my_json,"hwcron_user","daemon_user")
change_value(my_json,"daemon_user","submitty_daemon")
change_value(my_json,"course_builders_group","submitty_course_builders")
change_key(my_json,"hwphp_uid","php_uid")
change_key(my_json,"hwphp_gid","php_gid")
change_key(my_json,"hwphp_user","php_user")
change_value(my_json,"php_user","submitty_php")
change_key(my_json,"hwcgi_user","cgi_user")
change_value(my_json,"cgi_user","submitty_cgi")
change_key(my_json,"hwcronphp_group","daemonphp_group")
change_value(my_json,"daemonphp_group","submitty_daemonphp")
with open (submitty_users_filename_tmp,"w") as open_file:
json.dump(my_json,open_file,indent=4)
# write to another file & then remove the write permissions
shutil.move(submitty_users_filename_tmp,submitty_users_filename)
os.chmod(submitty_users_filename, 0o440)
os.chown(submitty_users_filename, os.getuid(), grp.getgrnam('submitty_daemonphp').gr_gid)
os.chmod(INSTALL_SUBMITTY_filename, 0o700)
os.system("sed -i -e \"s|'course_builders'|'submitty_course_builders'|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|HWCRON_USER='hwcron'|DAEMON_USER='submitty_daemon'|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|HWCRON_UID|DAEMON_UID|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|HWCRON_GID|DAEMON_GID|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|HWPHP_USER='hwphp'|PHP_USER='submitty_php'|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|HWPHP_UID|PHP_UID|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|HWPHP_GID|PHP_GID|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|HWCGI_USER='hwcgi'|CGI_USER='submitty_cgi'|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|HWCRONPHP_GROUP='hwcronphp'|DAEMONPHP_GROUP='submitty_daemonphp'|g\" "+INSTALL_SUBMITTY_filename)
os.chmod(INSTALL_SUBMITTY_filename, 0o500)
# repair & restart apache & phpfpm
APACHE_FILENAME="/etc/apache2/sites-enabled/submitty.conf"
os.system("sed -i -e \"s|hwcgi|submitty_cgi|g\" "+APACHE_FILENAME)
APACHE_FILENAME2="/etc/apache2/sites-enabled/submitty_http.conf"
os.system("sed -i -e \"s|hwcgi|submitty_cgi|g\" "+APACHE_FILENAME2)
APACHE_FILENAME3="/etc/apache2/sites-enabled/vcs.conf"
os.system("sed -i -e \"s|hwcgi|submitty_cgi|g\" "+APACHE_FILENAME3)
PHPFPM_FILENAME="/etc/php/7.0/fpm/pool.d/submitty.conf"
os.system("sed -i -e \"s|hwphp|submitty_php|g\" "+PHPFPM_FILENAME)
os.system("systemctl start apache2.service")
os.system("systemctl start php7.0-fpm.service")
print ("finished migration changing system user names")
pass
def down(config):
submitty_users_filename = str(Path(config.submitty['submitty_install_dir'], 'config', 'submitty_users.json'))
submitty_users_filename_tmp = str(Path(config.submitty['submitty_install_dir'], 'config', 'submitty_users_tmp.json'))
killall_path = Path(config.submitty['submitty_install_dir'], 'sbin', 'killall.py')
INSTALL_SUBMITTY_filename = str(Path(config.submitty['submitty_install_dir'], '.setup', 'INSTALL_SUBMITTY.sh'))
# stop all jobs that are using submitty_php and submitty_daemon
os.system("systemctl stop submitty_autograding_worker")
os.system("systemctl stop submitty_autograding_shipper")
os.system("systemctl stop apache2.service")
os.system("systemctl stop php7.0-fpm.service")
os.system("su -c 'crontab -r' submitty_daemon")
os.system("su -c '"+str(killall_path)+"' submitty_daemon")
# change the usernames
os.system("usermod -l hwphp submitty_php")
os.system("usermod -l hwcgi submitty_cgi")
os.system("usermod -l hwcron submitty_daemon")
# change the group names
os.system("groupmod --new-name hwcron submitty_daemon")
os.system("groupmod --new-name hwphp submitty_php")
os.system("groupmod --new-name hwcgi submitty_cgi")
os.system("groupmod --new-name hwcronphp submitty_daemonphp")
os.system("groupmod --new-name course_builders submitty_course_builders")
# cannot restart until the submitty code is installed
print ("WARNING: You will need to manually restart the website/shipper/worker")
print (" systemctl start apache2.service")
print (" systemctl start php7.0-fpm.service")
print (" systemctl start submitty_autograding_shipper")
print (" systemctl start submitty_autograding_worker")
if os.path.exists("/home/submitty_daemon"):
shutil.move("/home/submitty_daemon","/home/hwcron")
if os.path.exists("/home/submitty_php"):
shutil.move("/home/submitty_php","/home/hwphp")
if os.path.exists("/home/submitty_cgi"):
shutil.move("/home/submitty_cgi","/home/hwcgi")
# edit the variables stored by configure submitty/installation
with open (submitty_users_filename,"r") as open_file:
my_json = json.load(open_file)
change_key(my_json,"daemon_uid","hwcron_uid")
change_key(my_json,"daemon_gid","hwcron_gid")
change_key(my_json,"daemon_user","hwcron_user")
change_value(my_json,"hwcron_user","hwcron")
change_value(my_json,"course_builders_group","course_builders")
change_key(my_json,"php_uid","hwphp_uid")
change_key(my_json,"php_gid","hwphp_gid")
change_key(my_json,"php_user","hwphp_user")
change_value(my_json,"hwphp_user","hwphp")
change_key(my_json,"cgi_user","hwcgi_user")
change_value(my_json,"hwcgi_user","hwcgi")
change_key(my_json,"daemonphp_group","hwcronphp_group")
change_value(my_json,"hwcronphp_group","hwcronphp")
# write to another file & then remove the write permissions
with open (submitty_users_filename_tmp,"w") as open_file:
json.dump(my_json,open_file,indent=4)
shutil.move(submitty_users_filename_tmp,submitty_users_filename)
os.chmod(submitty_users_filename, 0o440)
os.chown(submitty_users_filename, os.getuid(), grp.getgrnam('hwcronphp').gr_gid)
os.chmod(INSTALL_SUBMITTY_filename, 0o700)
os.system("sed -i -e \"s|'submitty_course_builders'|'course_builders'|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|DAEMON_USER='submitty_daemon'|HWCRON_USER='hwcron'|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|DAEMON_UID|HWCRON_UID|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|DAEMON_GID|HWCRON_GID|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|PHP_USER='submitty_php'|HWPHP_USER='hwphp'|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|PHP_UID|HWPHP_UID|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|PHP_GID|HWPHP_GID|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|CGI_USER='submitty_cgi'|HWCGI_USER='hwcgi'|g\" "+INSTALL_SUBMITTY_filename)
os.system("sed -i -e \"s|DAEMONPHP_GROUP='submitty_daemonphp'|HWCRONPHP_GROUP='hwcronphp'|g\" "+INSTALL_SUBMITTY_filename)
os.chmod(INSTALL_SUBMITTY_filename, 0o500)
# repair & restart apache & phpfpm
APACHE_FILENAME="/etc/apache2/sites-enabled/submitty.conf"
os.system("sed -i -e \"s|submitty_cgi|hwcgi|g\" "+APACHE_FILENAME)
APACHE_FILENAME2="/etc/apache2/sites-enabled/submitty_http.conf"
os.system("sed -i -e \"s|submitty_cgi|hwcgi|g\" "+APACHE_FILENAME2)
APACHE_FILENAME3="/etc/apache2/sites-enabled/vcs.conf"
os.system("sed -i -e \"s|submitty_cgi|hwcgi|g\" "+APACHE_FILENAME3)
PHPFPM_FILENAME="/etc/php/7.0/fpm/pool.d/submitty.conf"
os.system("sed -i -e \"s|submitty_php|hwphp|g\" "+PHPFPM_FILENAME)
os.system("systemctl start apache2.service")
os.system("systemctl start php7.0-fpm.service")
print ("finished rollback of migration changing system user names")
pass
|
nilq/baby-python
|
python
|
from pytari2600.pytari2600 import new_atari
emulator = new_atari("../../roms/dragster.a26", headless=False)
while True:
emulator.core.step()
# print("---AFTER EXECUTION---" + str(emulator.stella.clocks.system_clock))
# print(emulator.stella.display_cache)
|
nilq/baby-python
|
python
|
# # -*- coding: utf-8 -*-
# """
# Created on Fri May 8 17:08:43 2020
#%%
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import pi, c
from numpy.fft import fft, ifft, fftshift
#%%
def calculate_spectrum(z, E, H, f=3.7e9):
k0 = 2*pi*f/c
lambda0 = c/f
# fourier domain points
B = 2**18
Efft = np.fft.fftshift(np.fft.fft(E,B))
Hfft = np.fft.fftshift(np.fft.fft(H,B))
# fourier domain bins
dz = z[1] - z[0] # assumes spatial period is constant
df = 1/(B*dz)
K = np.arange(-B/2,+B/2)
# spatial frequency bins
Fz= K*df
# parallel index is kz/k0
nz= (2*pi/k0)*Fz
# ~ power density spectrum
p = (dz)**2/lambda0 * (1/2*Efft*np.conj(Hfft))
return(nz,p)
#%%
def spectrum_from_HFSS_file(Ereal, Eimag, Hreal, Himag, f):
x,y,z,Ex_re,Ey_re,Ez_re = np.loadtxt(Ereal, skiprows=2, unpack=True)
x,y,z,Ex_im,Ey_im,Ez_im = np.loadtxt(Eimag, skiprows=2, unpack=True)
x,y,z,Hx_re,Hy_re,Hz_re = np.loadtxt(Hreal, skiprows=2, unpack=True)
x,y,z,Hx_im,Hy_im,Hz_im = np.loadtxt(Himag, skiprows=2, unpack=True)
# create a curvilinear
_s = np.sqrt((x - x[0])**2 + (y - y[0])**2 + (z - z[0])**2)
_Ex = Ex_re + 1j*Ex_im
_Ey = Ey_re + 1j*Ey_im
_Ez = Ez_re + 1j*Ez_im
_Hx = Hx_re + 1j*Hx_im
_Hy = Hy_re + 1j*Hy_im
_Hz = Hz_re + 1j*Hz_im
# replace NaN with 0
_Ex = np.nan_to_num(_Ex)
_Ey = np.nan_to_num(_Ey)
_Ez = np.nan_to_num(_Ez)
_Hx = np.nan_to_num(_Hx)
_Hy = np.nan_to_num(_Hy)
_Hz = np.nan_to_num(_Hz)
# nextpow 2
nb_s = int(2**np.ceil(np.log2(len(_s))))
s = np.linspace(np.min(_s), np.max(_s), num=nb_s)
# interpolated field on this regular mesh
Ex = np.interp(s, _s, _Ex)
Ey = np.interp(s, _s, _Ey)
Ez = np.interp(s, _s, _Ez)
Hx = np.interp(s, _s, _Hx)
Hy = np.interp(s, _s, _Hy)
Hz = np.interp(s, _s, _Hz)
N = 1000
s = np.pad(s, N, mode='reflect', reflect_type='odd')
Ey = np.pad(Ey, N)
Hx = np.pad(Ey, N)
nz, p = calculate_spectrum(s, Ey, -Hx, f=f0)
return p, nz
#%% monopole and dipole in curved model
f0 = 55e6
w0 = 2*pi*f0
k0 = w0/c
Ereal = 'WEST_ICRH_Curved_Vacuum_monopole_Ereal.fld'
Eimag = 'WEST_ICRH_Curved_Vacuum_monopole_Eimag.fld'
Hreal = 'WEST_ICRH_Curved_Vacuum_monopole_Hreal.fld'
Himag = 'WEST_ICRH_Curved_Vacuum_monopole_Himag.fld'
p_curved_monopole, nz_curved_monopole = spectrum_from_HFSS_file(Ereal, Eimag, Hreal, Himag, f=f0)
Ereal = 'WEST_ICRH_Curved_Vacuum_dipole_Ereal.fld'
Eimag = 'WEST_ICRH_Curved_Vacuum_dipole_Eimag.fld'
Hreal = 'WEST_ICRH_Curved_Vacuum_dipole_Hreal.fld'
Himag = 'WEST_ICRH_Curved_Vacuum_dipole_Himag.fld'
p_curved_dipole, nz_curved_dipole = spectrum_from_HFSS_file(Ereal, Eimag, Hreal, Himag, f=f0)
#%%
# cut values over than |k|>100
_kz_all = np.real(k0*nz_curved_dipole)
_kz = _kz_all[np.abs(_kz_all)<100]
_pz = p_curved_dipole[np.abs(_kz_all)<100]
np.savetxt('WEST_ICRH_Spectrum_vacuum.csv', np.vstack([_kz, _pz]).T, header='kz \t 1D power density spectrum [a.u.]')
#%%
fig, ax = plt.subplots()
# ax.plot(k0*nz_flat, np.abs(p_flat)/np.max(np.abs(p_flat)) )
# ax.plot(k0*nz_curved_dipole, np.abs(p_curved_dipole)/np.max(np.abs(p_curved_dipole)), lw=2 )
# ax.plot(k0*nz_curved_monopole, np.abs(p_curved_monopole)/np.max(np.abs(p_curved_monopole)), lw=2 )
ax.plot(k0*nz_curved_dipole, np.abs(p_curved_dipole), lw=2 )
# ax.plot(k0*nz_curved_monopole, np.abs(p_curved_monopole), lw=2 )
ax.set_xlim(-30, +30)
ax.set_ylabel('Power spectrum density [a.u.]', fontsize=14)
ax.set_xlabel('Toroidal wavenumber $k_z$ [$m^{-1}$]', fontsize=14)
ax.set_title('WEST ICRH Antenna Power Spectrum Density (Vacuum)', fontsize=14)
ax.tick_params(labelsize=14)
ax.grid(True, alpha=0.2)
fig.tight_layout()
fig.savefig('WEST_ICRH_Spectrum_Vacuum.png', dpi=150)
# % Power conservation checking
# disp(['Power conservation checking : total transmited power [W] :', num2str(dnz*sum(real(dP_nz)))])
# #%% flat model - dielectric medium
# Ereal = 'WEST_ICRH_Flat_Dielectric_Ereal.fld'
# Eimag = 'WEST_ICRH_Flat_Dielectric_Eimag.fld'
# Hreal = 'WEST_ICRH_Flat_Dielectric_Hreal.fld'
# Himag = 'WEST_ICRH_Flat_Dielectric_Himag.fld'
# p_flat_dielectric, nz_flat_dielectric = spectrum_from_HFSS_file(Ereal, Eimag, Hreal, Himag, f=f0)
# #%%
# fig, ax = plt.subplots()
# ax.plot(k0*nz_flat_dielectric, np.abs(p_flat_dielectric)/np.max(np.abs(p_flat_dielectric)) )
# ax.set_xlim(-30, +30)
# ax.set_ylabel('Power spectrum density [a.u.]', fontsize=14)
# ax.set_xlabel('Toroidal wavenumber $k_z$ [$m^{-1}$]', fontsize=14)
# ax.set_title('WEST ICRH Antenna Power Spectrum Density (Vacuum)', fontsize=14)
# ax.tick_params(labelsize=14)
# ax.grid(True, alpha=0.2)
# fig.tight_layout()
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
from bindings.csw.animate_type import AnimateType
__NAMESPACE__ = "http://www.w3.org/2001/SMIL20/Language"
@dataclass
class Animate1(AnimateType):
class Meta:
name = "animate"
namespace = "http://www.w3.org/2001/SMIL20/Language"
|
nilq/baby-python
|
python
|
import unittest
import electricity
class VersionTestCas(unittest.TestCase):
def test_version(self):
self.assertEqual(electricity.__version__, '0.1')
|
nilq/baby-python
|
python
|
from keris.layers.merge import Concatenate, Sum
from keris.layers.core import Input, Dense
from keris.layers.convolution import Conv2D
from keris.layers.dropout import Dropout
from keris.layers.pool import MaxPooling2D, GlobalAveragePooling2D
|
nilq/baby-python
|
python
|
from functools import wraps
def tags(tag_name):
def tags_decorator(func):
@wraps(func)
def func_wrapper(name):
return "<{0}>{1}</{0}>".format(tag_name, func(name))
return func_wrapper
return tags_decorator
@tags("p")
def get_text(name):
"""returns some text"""
return "Hello "+name
print get_text.__name__ # get_text
print get_text.__doc__ # returns some text
print get_text.__module__ # __main__
|
nilq/baby-python
|
python
|
import os
import torch
import numpy as np
from torch.autograd import Variable
from .base_trainer import BaseTrainer
from model import networks
from model.loss import AttnDiscriminatorLoss, AttnGeneratorLoss, KLLoss
from utils.util import convert_back_to_text
from collections import OrderedDict
dirname = os.path.dirname(__file__)
class AttnGANtrainer(BaseTrainer):
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(exp_namem='AttnGAN', netG='synthesis', netD='synthesis')
if is_train:
parser.add_argument('--gamma1', type=float, default=4.0, help='gamma 1 for damsm')
parser.add_argument('--gamma2', type=float, default=5.0, help='gamma 2 for damsm')
parser.add_argument('--gamma3', type=float, default=10.0, help='gamma 3 for damsm')
parser.add_argument('--g_lambda', type=float, default=5.0, help='gamma 3 for damsm')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
super(AttnGANtrainer, self).__init__(opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G', 'D']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['fake_imgs', 'real_imgs']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
self.model_names = ['G', 'D']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt=opt, gpu_ids=self.gpu_ids)
self.netD = networks.define_D(opt=opt, gpu_ids=self.gpu_ids)
self.rnn_encoder, self.cnn_encoder = networks.define_DAMSM(opt=opt, gpu_ids=self.gpu_ids)
self.generator_loss = AttnGeneratorLoss(opt)
self.discriminator_loss = AttnDiscriminatorLoss()
self.KL_loss = KLLoss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.g_lr, betas=(opt.beta_1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizer_D = []
for i in range(len(self.netD)):
self.optimizer_D.append(torch.optim.Adam(self.netD[i].parameters(), lr=opt.g_lr, betas=(opt.beta_1, 0.999)))
self.optimizers.append(self.optimizer_D[i])
# setup noise
self.noise = Variable(torch.FloatTensor(self.batch_size, 100), volatile=True)
self.noise.to(self.device)
# setup labels
self.real_labels, self.fake_labels, self.match_labels = self.prepare_labels()
def set_input(self, data):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
"""
self.real_imgs = []
self.real_imgs.append(data["right_images_64"].to(self.device))
self.real_imgs.append(data["right_images_128"].to(self.device))
self.real_imgs.append(data["right_images_256"].to(self.device))
self.right_captions = data["right_captions"].to(self.device)
self.right_caption_lengths = data["right_caption_lengths"].to(self.device)
self.class_ids = np.array(data['class_id'])
self.labels = torch.LongTensor(range(self.batch_size)).to(self.device)
# # other image
# self.wrong_images = []
# self.wrong_images.append(data["wrong_images_64"].to(self.device))
# self.wrong_images.append(data["wrong_images_128"].to(self.device))
# self.wrong_images.append(data["wrong_images_256"].to(self.device))
# self.wrong_captions = data["wrong_captions"].to(self.device)
# self.wrong_caption_lengths = data["wrong_caption_lengths"].to(self.device)
def prepare_labels(self):
real_labels = Variable(torch.FloatTensor(self.batch_size).fill_(1))
fake_labels = Variable(torch.FloatTensor(self.batch_size).fill_(0))
match_labels = Variable(torch.LongTensor(range(self.batch_size)))
if torch.cuda.is_available():
real_labels = real_labels.cuda()
fake_labels = fake_labels.cuda()
match_labels = match_labels.cuda()
return real_labels, fake_labels, match_labels
def forward(self):
# words_embs: batch_size x nef x seq_len
# sent_emb: batch_size x nef
self.words_embs, self.sent_emb = self.rnn_encoder(self.right_captions, self.right_caption_lengths)
self.words_embs, self.sent_emb = self.words_embs.detach(), self.sent_emb.detach()
mask = (self.right_captions == 0)
num_words = self.words_embs.size(2)
if mask.size(1) > num_words:
mask = mask[:, :num_words]
#######################################################
# (2) Generate fake images
######################################################
self.noise.data.normal_(0, 1)
self.fake_imgs, _, self.mu, self.logvar = self.netG(self.noise, self.sent_emb, self.words_embs, mask)
def backward_D(self):
"""Calculate loss for the discriminator"""
#######################################################
# (3) calculate D network loss
######################################################
self.loss_D = 0
for i in range(len(self.netD)):
self.netD[i].zero_grad()
loss = self.discriminator_loss(self.netD[i], self.real_imgs[i], self.fake_imgs[i],
self.sent_emb, self.real_labels, self.fake_labels)
# backward and update parameters
loss.backward()
# optimizersD[i].step()
self.loss_D += loss
def backward_G(self):
#######################################################
# (4) Update G network: maximize log(D(G(z)))
######################################################
# compute total loss for training G
# do not need to compute gradient for Ds
# self.set_requires_grad_value(netsD, False)
self.netG.zero_grad()
self.loss_G = self.generator_loss(self.netD, self.cnn_encoder, self.fake_imgs, self.real_labels,
self.words_embs, self.sent_emb, self.match_labels,
self.right_caption_lengths, self.class_ids, self.opt)
kl_loss = self.KL_loss(self.mu, self.logvar)
self.loss_G += kl_loss
# backward and update parameters
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute the fake images from text embedding: G(s, w)
# update D
self.set_requires_grad(self.netD, True)
# set D's gradients to zero
for i in range(len(self.netD)):
self.optimizer_D[i].zero_grad()
self.backward_D() # calculate gradients for D
# update D's weights
for i in range(len(self.netD)):
self.optimizer_D[i].step()
# update G
self.set_requires_grad(self.netD, False)
self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
def get_current_visuals(self, vocab):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
wordidarray = self.right_captions.detach().cpu().numpy()
for j, name in enumerate(self.visual_names):
if isinstance(name, str):
results = getattr(self, name)
if type(results) is list:
for i, size in enumerate(['64', '128', '256']):
title = name + '-' + size
if i == 0 and j == 0 :
title = convert_back_to_text(wordidarray[0], vocab)
visual_ret[title] = results[i]
else:
visual_ret[name] = results
return visual_ret
|
nilq/baby-python
|
python
|
import os
from setuptools import setup
from pip.req import parse_requirements
# parse requirements
reqs = [str(r.req) for r in parse_requirements("requirements.txt", session=False)]
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
version = read('tensorbuilder/version.txt').split("\n")[0]
setup(
name = "tensorbuilder",
version = version,
author = "Cristian Garcia",
author_email = "cgarcia.e88@gmail.com",
description = ("A light wrapper over TensorFlow that enables you to easily create complex deep neural networks using the Builder Pattern through a functional fluent immutable API"),
license = "MIT",
keywords = ["tensorflow", "deep learning", "neural networks"],
url = "https://github.com/cgarciae/tensorbuilder",
packages = [
'tensorbuilder',
'tensorbuilder.tensordata',
'tensorbuilder.patches',
'tensorbuilder.tests'
],
package_data={
'': ['LICENCE', 'requirements.txt', 'README.md', 'CHANGELOG.md'],
'tensorbuilder': ['version.txt', 'README-template.md']
},
download_url = 'https://github.com/cgarciae/tensorbuilder/tarball/{0}'.format(version),
include_package_data = True,
long_description = read('README.md'),
install_requires = reqs
)
|
nilq/baby-python
|
python
|
from packetbeat import BaseTest
"""
Tests for trimming long results in pgsql.
"""
class Test(BaseTest):
def test_default_settings(self):
"""
Should store the entire rows but only
10 rows with default settings.
"""
self.render_config_template(
pgsql_ports=[5432],
pgsql_send_response=True
)
self.run_packetbeat(pcap="pgsql_long_result.pcap")
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["pgsql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 11 # 10 plus header
for line in lines[4:]:
print(line, len(line))
assert len(line) == 237
def test_max_row_length(self):
"""
Should be able to cap the row length.
"""
self.render_config_template(
pgsql_ports=[5432],
pgsql_send_response=True,
pgsql_max_row_length=79
)
self.run_packetbeat(pcap="pgsql_long_result.pcap",
debug_selectors=["pgsqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["pgsql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 11 # 10 plus header
for line in lines[4:]:
print(line, len(line))
assert len(line) == 83 # 79 plus two separators and two quotes
def test_max_rows(self):
"""
Should be able to cap the number of rows
"""
self.render_config_template(
pgsql_ports=[5432],
pgsql_send_response=True,
pgsql_max_row_length=79,
pgsql_max_rows=5
)
self.run_packetbeat(pcap="pgsql_long_result.pcap",
debug_selectors=["pgsqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["pgsql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 6 # 5 plus header
def test_larger_max_rows(self):
"""
Should be able to cap the number of rows
"""
self.render_config_template(
pgsql_ports=[5432],
pgsql_send_response=True,
pgsql_max_rows=2000
)
self.run_packetbeat(pcap="pgsql_long_result.pcap",
debug_selectors=["pgsqldetailed"])
objs = self.read_output()
assert len(objs) == 1
res = objs[0]
assert res["pgsql.num_rows"] == 15
lines = res["response"].strip().split("\n")
assert len(lines) == 16 # 15 plus header
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.3 on 2018-03-16 19:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nps', '0021_auto_20180316_1239'),
]
operations = [
migrations.RenameField(
model_name='rawresults',
old_name='role_type',
new_name='user_type',
),
]
|
nilq/baby-python
|
python
|
# Import Abaqus and External Modules
from abaqusConstants import *
from abaqus import *
import random
import regionToolset
import mesh
import step
import part
randomSeed=[41557]
for eachModel in range(0,1):
#
# Create Model Database
VerFile=Mdb(pathName="MStructure")
VerModel=VerFile.models['Model-1']
VerAssembly=VerModel.rootAssembly
#
# Underlying Geometry
xSize=0.1
ySize=0.05
#
# Microstructure Geometry
charLength=0.00595 #Grain Side Length
numX=15
numY=5
#
# Other Parametersvgrain vumat
meshSize=0.001
analysis='Tension' # Options: Tension, Bending
#
# Draw Base Part
BasePart=VerModel.Part(name='Base', dimensionality=THREE_D,type=DEFORMABLE_BODY)
BaseSketch = VerModel.ConstrainedSketch(name='Base',sheetSize=200.0)
#
BaseSketch.Line(point1=(0.,0.),point2=(xSize,0.))
BaseSketch.Line(point1=(xSize,0.),point2=(xSize,ySize))
BaseSketch.Line(point1=(xSize,ySize),point2=(0.,ySize))
BaseSketch.Line(point1=(0.,ySize),point2=(0.,0.))
BasePart.BaseSolidExtrude(sketch=BaseSketch, depth=0.006)
BasePart=VerModel.parts['Base']
#
# Draw Microstructure and Partition Base Part
ParSketch=VerModel.ConstrainedSketch(name='Par',sheetSize=200)
yLength=sin(radians(30.))*charLength
xLength=cos(radians(30.))*charLength
offsetX=0.
for i in range(0,numX):
offsetY=0.
for j in range(0,numY):
if j%2==0:
xPos=offsetX
else:
xPos=offsetX+xLength
ParSketch.Line(point1=(xLength+xPos,-yLength+offsetY),point2=(xLength+xPos,yLength+offsetY))
ParSketch.Line(point1=(xLength+xPos,+yLength+offsetY),point2=(xPos,2.*yLength+offsetY))
ParSketch.Line(point1=(xLength+xPos,-yLength+offsetY),point2=(xPos,-2.*yLength+offsetY))
offsetY=offsetY+3.*yLength
offsetX=offsetX+2.*xLength
for eachFace in BasePart.faces:
if eachFace.getNormal()==(0.0,0.0,1.0):
targetFace=eachFace
print targetFace
BasePart.PartitionFaceBySketch(faces=targetFace, sketch=ParSketch)
#
# Generate Sections and Section Assignments
labelcount=1
regions=BasePart.faces
for eachregion in regions:
mlabel='Mat'+str(labelcount)
VerModel.PEGSection(name=mlabel, material=mlabel, thickness=0.01,
wedgeAngle1=0.0, wedgeAngle2=0.0)
BasePart.SectionAssignment(region=(eachregion,),
sectionName=mlabel, offset=0.0, offsetField='')
labelcount=labelcount+1
#
# Mesh Part
BasePart.ReferencePoint(point=(0.0, 0.0, 0.0))
offsetX=0.
offsetY=0.
ParSketch2=VerModel.ConstrainedSketch(name='Hex',sheetSize=200, transform=partTransform)
for i in range(0,2*numX):
ParSketch2.Line(point1=(offsetX,0.),point2=(offsetX,2.*charLength*numY))
offsetX=offsetX+xLength
for i in range(0,numY):
ParSketch2.Line(point1=(0.,offsetY),point2=(2.*charLength*numX,offsetY))
offsetY=offsetY+3.*yLength
BasePart.PartitionFaceBySketch(faces=BasePart.faces, sketch=ParSketch2)
BasePart.setMeshControls(regions=BasePart.faces, elemShape=QUAD, technique=SWEEP)
BasePart.seedPart(size=meshSize)
pickedRegions =(BasePart.faces, )
elemType1 = mesh.ElemType(elemCode=CPEG8R, elemLibrary=STANDARD)
BasePart.setElementType(regions=pickedRegions, elemTypes=(elemType1,))
BasePart.generateMesh()
#
#Steps
VerModel.StaticStep(name='Step-1', previous='Initial',
maxNumInc=100000, initialInc=0.03, minInc=1e-07, maxInc=0.15, nlgeom=ON, timePeriod=20.)
VerModel.fieldOutputRequests['F-Output-1'].setValues(variables=(
'LE', 'RF', 'S', 'U'), timeInterval=0.2, timeMarks=OFF)
#
#Boundary Conditions
VerAssembly.Instance(name='Strut',part=BasePart, dependent=ON)
iNodes=VerAssembly.instances['Strut'].nodes
toler=0.01*meshSize
Left=iNodes.getByBoundingBox(xMin=-toler,xMax=toler,yMin=-toler,yMax=ySize+toler)
BLeft=iNodes.getByBoundingBox(xMin=-toler,xMax=toler,yMin=-toler,yMax=toler)
Right=iNodes.getByBoundingBox(xMin=xSize-toler,xMax=xSize+toler,yMin=toler,yMax=ySize+toler)
BRight=iNodes.getByBoundingBox(xMin=xSize-toler,xMax=xSize+toler,yMin=-toler,yMax=toler)
#
Lregion=regionToolset.Region(nodes=Left)
BLregion=regionToolset.Region(nodes=BLeft)
Rregion=regionToolset.Region(nodes=Right)
BRregion=regionToolset.Region(nodes=BRight)
#
VerModel.SmoothStepAmplitude(name='Amp-1', timeSpan=TOTAL, data=(( 0.0, 0.0), (24.00, 1.0)))
VerModel.DisplacementBC(name='LeftX', createStepName='Initial',
region=Lregion, u1=0.0, u2=UNSET, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=UNSET, amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
VerModel.DisplacementBC(name='BottomY1', createStepName='Initial',
region=BLregion, u1=UNSET, u2=0.0, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=UNSET, amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
if analysis=='Tension':
VerModel.DisplacementBC(name='Tension', createStepName='Step-1',
region=BRregion, u1=0.5*xSize, u2=UNSET, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=UNSET, amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
VerModel.DisplacementBC(name='BottomY2', createStepName='Initial',
region=BRregion, u1=UNSET, u2=0.0, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=UNSET, amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
VerModel.boundaryConditions['Tension'].setValues(amplitude='Amp-1')
else:
VerModel.DisplacementBC(name='Bending', createStepName='Step-1',
region=BRregion, u1=UNSET, u2=UNSET, u3=UNSET, ur1=UNSET, ur2=UNSET,
ur3=-6., amplitude=UNSET, fixed=OFF, distributionType=UNIFORM)
VerModel.boundaryConditions['Bending'].setValues(amplitude='Amp-1')
#
VerAssembly.Set(nodes=Right, name='Right')
VerAssembly.Set(nodes=BRight, name='BRight')
if analysis=='Tension':
VerModel.Equation(name='Constraint-1', terms=((1.0, 'Right', 1), ( -1.0, 'BRight', 1)))
else:
region1=VerAssembly.sets['BRight']
region2=VerAssembly.sets['Right']
VerModel.MultipointConstraint(name='Constraint-2',
controlPoint=region1, surface=region2, mpcType=BEAM_MPC,
userMode=DOF_MODE_MPC, userType=0, csys=None)
#
#Create Job and write input file
if grainType=='Square':
letter1='S'
elif grainType=='Hexagon':
letter1='H'
elif grainType=='Voronoi':
letter1='V'
if analysis=='Tension':
letter2='T'
else:
letter2='B'
label='W'+str(numY)+'L'+str(numX)+letter1+letter2+str(eachModel)
VerFile.Job(name=label, model='Model-1', type=ANALYSIS,userSubroutine='ucrystal.for')
VerFile.jobs[label].writeInput(consistencyChecking=OFF)
# VerFile.close()
|
nilq/baby-python
|
python
|
'''
@author: Jakob Prange (jakpra)
@copyright: Copyright 2020, Jakob Prange
@license: Apache 2.0
'''
import sys
import json
import argparse as ap
from pathlib import Path
from .mode import Mode
argparser = ap.ArgumentParser()
mode = argparser.add_subparsers(help='mode', dest='mode')
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise ap.ArgumentTypeError('Boolean value expected.')
def load_json(v):
try:
with open(v) as f:
return json.load(f)
except Exception as e:
raise ap.ArgumentTypeError(e)
def parse_args():
global argparser, mode
argparser.add_argument('--format', type=str, default='auto')
argparser.add_argument('-o', '--out', type=str, default=None)
argparser.add_argument('--labels', type=str, default=None)
argparser.add_argument('--freq-threshold', type=int, default=10)
argparser.add_argument('-m', '--model', type=str, default='ccg-model')
argparser.add_argument('--derivation', action='store_true', help='print derivations as they are read')
argparser.add_argument('-i', '--interactive', action='store_true')
argparser.add_argument('-O', '--oracle-scoring', action='store_true')
argparser.add_argument('--oracle-structure', action='store_true')
argparser.add_argument('--oracle-supertags', action='store_true')
argparser.add_argument('-a', '--max-category-depth', type=int, default=6, help='maximum depth of categories')
argparser.add_argument('-k', '--global-beam', type=int, default=None, help='log-2 beam size')
argparser.add_argument('-K', '--local-beam', type=int, default=None, help='log-2 beam size')
argparser.add_argument('--lbda', type=float, default=0.1, help='minimum cost / optimal heuristic factor lambda')
argparser.add_argument('--cheap', type=float, default=1, help='cost multiplier')
argparser.add_argument('--penalty', type=float, default=100, help='cost multiplier')
argparser.add_argument('--high-penalty', type=float, default=1000, help='cost multiplier')
test_files = argparser.add_argument_group('Testing files')
test_files.add_argument('-T', '--testing-files', type=str, nargs='+', default=['sample_data/test.auto'])
test_files.add_argument('--testing-format', type=str, default='auto')
train_files = argparser.add_argument_group('Training files')
train_files.add_argument('-t', '--training-files', type=str, nargs='+', default=['sample_data/train.auto'])
train_files.add_argument('--training-format', type=str, default='auto')
train_files.add_argument('--training-ids', type=load_json, default=None, help='json file containing list of sentence ids')
train_files.add_argument('-D', '--development-files', type=str, nargs='+', default=['sample_data/train.auto'])
train_files.add_argument('--development-format', type=str, default='auto')
# learning architecture
arch = argparser.add_argument_group('Learning Architecture')
arch.add_argument('--span-encoder', type=str, choices=['rnn', 'transformer', 'bert', 'roberta'], default='roberta')
arch.add_argument('--word-vectors', type=str, default='word_vectors/glove.6B/6B.50')
arch.add_argument('--pretrained-bert', type=str, default='roberta-base', help='model identifier')
arch.add_argument('--attention-heads', type=int, default=1)
arch.add_argument('--transformer-layers', type=int, default=2)
arch.add_argument('-d', '--embedding-dim', type=int, default=50)
arch.add_argument('--feat-embedding-dim', type=int, default=12)
arch.add_argument('--feat-chars', type=int, default=4)
arch.add_argument('--feat-freq-cutoff', type=int, default=3)
arch.add_argument('--embedding-dropout', type=float, default=0.2)
arch.add_argument('--span-hidden-dims', type=int, nargs='+', default=[768, 768])
arch.add_argument('--bidirectional', type=str2bool, nargs='?', const=True, default=True)
arch.add_argument('--span-dropout', type=float, nargs='*', default=[0.2, 0.1])
arch.add_argument('--hidden-dims', type=int, nargs='*', default=[])
arch.add_argument('--dropout', type=float, nargs='*', default=[])
arch.add_argument('--tasks', type=str, nargs='*', default=['tasks/addrmlp_att_rebank'])
arch.add_argument('--tree-hidden-dim', type=int, default=64)
arch.add_argument('--enc-attention', action='store_true')
arch.add_argument('--dec-attention', action='store_true')
arch.add_argument('-b', '--batch-size', type=int, default=1)
arch.add_argument('--seed', type=int, default=42)
# CUDA
cuda = argparser.add_argument_group('CUDA')
cuda.add_argument('--cuda', action='store_true')
cuda.add_argument('--cuda-devices', type=int, nargs='*', default=[])
argparser.add_argument('-n', '--n-print', type=int, default=100)
train = mode.add_parser(Mode.train)
# hyperparams
hyp = train.add_argument_group('Hyperparameters')
hyp.add_argument('-e', '--epochs', type=int, default=10)
hyp.add_argument('--max-batches', type=int, default=None)
hyp.add_argument('--loss-fxn', type=str, choices=['crossent', 'avg', 'all'],
default='crossent')
hyp.add_argument('--teacher-forcing', type=str, choices=['global', 'dynamic_best', 'dynamic_random'], # add local?
default='global')
hyp.add_argument('--omega-native-atom', type=float, default=0.0)
hyp.add_argument('--omega-atom', type=float, default=0.0)
hyp.add_argument('--omega-full', type=float, default=0.0)
hyp.add_argument('--lambda-enc', type=float, default=0.0)
hyp.add_argument('--lambda-dec', type=float, default=0.0)
hyp.add_argument('--optimizer', type=str, default='adamw')
hyp.add_argument('--learning-rate', type=float, default=1e-4)
hyp.add_argument('--bert-learning-rate', type=float, default=1e-5)
hyp.add_argument('--momentum', type=float, default=0.7)
hyp.add_argument('--epsilon', type=float, default=1e-6)
hyp.add_argument('--decay', type=float, default=0.01)
hyp.add_argument('--use-schedule', action='store_true', default=False)
hyp.add_argument('--pct-start', type=float, default=0.3)
hyp.add_argument('--anneal-strategy', type=str, default='cos')
hyp.add_argument('--finetune', type=str2bool, nargs='?', const=True, default=True)
return argparser.parse_args()
def get_filepaths_from_path(paths, filename, suffix):
filepaths = []
try:
for path in paths:
p = Path(path)
for filepath in p.glob(f'**/{filename if filename else f"*.{suffix}"}'):
filepaths.append(filepath)
except AttributeError:
pass
return filepaths
def get_filepaths_from_glob(globs):
filepaths = []
try:
p = Path()
for glob in globs:
for filepath in p.glob(glob):
filepaths.append(filepath)
except AttributeError:
pass
return sorted(filepaths)
def get_filepaths_from_args(args):
model = Path(f'{args.model}.pt')
args.model_exists = model.is_file()
print(args, file=sys.stderr)
args.testing_files = get_filepaths_from_glob(args.testing_files)
args.training_files = get_filepaths_from_glob(args.training_files)
args.development_files = get_filepaths_from_glob(args.development_files)
def main():
args = parse_args()
get_filepaths_from_args(args)
import torch.cuda as cuda
if args.cuda and cuda.is_available():
args.device = 'cuda'
else:
args.device = 'cpu'
return args
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
# SPDX-FileCopyrightText: 2020 Melissa LeBlanc-Williams, written for Adafruit Industries
#
# SPDX-License-Identifier: Unlicense
"""
`adafruit_matrixportal.network`
================================================================================
Helper library for the MatrixPortal M4 or Adafruit RGB Matrix Shield + Metro M4 Airlift Lite.
* Author(s): Melissa LeBlanc-Williams
Implementation Notes
--------------------
**Hardware:**
* `Adafruit MatrixPortal M4 <https://www.adafruit.com/product/4745>`_
* `Adafruit Metro M4 Express AirLift <https://www.adafruit.com/product/4000>`_
* `Adafruit RGB Matrix Shield <https://www.adafruit.com/product/2601>`_
* `64x32 RGB LED Matrix <https://www.adafruit.com/product/2278>`_
**Software and Dependencies:**
* Adafruit CircuitPython firmware for the supported boards:
https://github.com/adafruit/circuitpython/releases
"""
import gc
import neopixel
from adafruit_portalbase.network import NetworkBase
from adafruit_portalbase.wifi_coprocessor import WiFi
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MatrixPortal.git"
class Network(NetworkBase):
"""Class representing the Adafruit RGB Matrix Portal.
:param status_neopixel: The pin for the status NeoPixel. Use ``board.NEOPIXEL`` for the on-board
NeoPixel. Defaults to ``None``, not the status LED
:param esp: A passed ESP32 object, Can be used in cases where the ESP32 chip needs to be used
before calling the pyportal class. Defaults to ``None``.
:param busio.SPI external_spi: A previously declared spi object. Defaults to ``None``.
:param bool extract_values: If true, single-length fetched values are automatically extracted
from lists and tuples. Defaults to ``True``.
:param debug: Turn on debug print outs. Defaults to False.
"""
def __init__(self, **kwargs):
extract_values = True
debug = False
if "extract_values" in kwargs:
extract_values = kwargs.pop("extract_values")
if "debug" in kwargs:
debug = kwargs.pop("debug")
if "status_neopixel" in kwargs:
status_neopixel = kwargs.pop("status_neopixel")
status_led = neopixel.NeoPixel(status_neopixel, 1, brightness=0.2)
else:
status_led = None
kwargs["status_led"] = status_led
wifi = WiFi(**kwargs)
super().__init__(
wifi,
extract_values=extract_values,
debug=debug,
)
gc.collect()
@property
def ip_address(self):
"""Return the IP Address nicely formatted"""
return self._wifi.esp.pretty_ip(self._wifi.esp.ip_address)
|
nilq/baby-python
|
python
|
"""Tests for the NumericValue data class."""
from onyx_client.data.animation_keyframe import AnimationKeyframe
from onyx_client.data.animation_value import AnimationValue
from onyx_client.data.numeric_value import NumericValue
class TestNumericValue:
def test_create(self):
expected = NumericValue(
10,
10,
100,
True,
AnimationValue(10, 10, [AnimationKeyframe("linear", 10, 10, 10)]),
)
assert (
NumericValue.create(
{
"value": 10,
"minimum": 10,
"maximum": 100,
"read_only": True,
"animation": {
"start": 10,
"current_value": 10,
"keyframes": [
{
"interpolation": "linear",
"delay": 10,
"duration": 10,
"value": 10,
}
],
},
}
)
== expected
)
def test_create_value_only(self):
expected = NumericValue(10, 0, 100, False)
assert (
NumericValue.create(
{
"value": 10,
}
)
== expected
)
def test_create_no_value(self):
expected = NumericValue(None, 0, 100, False)
assert NumericValue.create({}) == expected
def test_create_none(self):
assert NumericValue.create(None) is None
def test_update_with(self):
value = NumericValue(
1,
1,
10,
False,
AnimationValue(1, 1, [AnimationKeyframe("linear", 1, 1, 1)]),
)
expected = NumericValue(
10,
10,
100,
True,
AnimationValue(10, 10, [AnimationKeyframe("linear", 10, 10, 10)]),
)
value.update_with(expected)
assert value == expected
def test_update_with_only_existing(self):
value = NumericValue(
10,
10,
100,
True,
)
expected = NumericValue(
10,
10,
100,
True,
AnimationValue(10, 10, [AnimationKeyframe("linear", 10, 10, 10)]),
)
value.update_with(
NumericValue(
None,
None,
None,
None,
AnimationValue(10, 10, [AnimationKeyframe("linear", 10, 10, 10)]),
)
)
assert value == expected
def test_not_eq(self):
assert NumericValue(10, 10, 100, True) != 10
|
nilq/baby-python
|
python
|
from .config_diff import Config
import yaml
def save(filename: str, config: Config):
"""Save configuraion to file"""
with open(filename, 'w') as fh:
yaml.dump(config, fh, default_flow_style=False)
|
nilq/baby-python
|
python
|
import os
SPREEDLY_AUTH_TOKEN = os.environ.get('SPREEDLY_AUTH_TOKEN','asdfasdf')
SPREEDLY_SITE_NAME = 'jamesr-c-test'
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Advent of Code 2020
Day 25, Part 1
"""
def main():
with open('in.txt') as f:
card_key, door_key = map(int, f.readlines())
subject_number = 7
value = 1
card_loop = 0
while True:
card_loop += 1
value *= subject_number
value %= 20201227
if value == card_key:
print(card_loop)
break
value = 1
door_loop = 0
while True:
door_loop += 1
value *= subject_number
value %= 20201227
if value == door_key:
print(door_loop)
break
encryption_key = 1
subject_number = card_key
for _ in range(door_loop):
encryption_key *= subject_number
encryption_key %= 20201227
print(encryption_key)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import threading
from time import sleep
lock = threading.Lock()
# funcao que espera 1 segundo
def wait():
global lock
while True:
sleep(1)
lock.release()
def LerVelocidade():
global lock
while True:
lock.acquire()
print('Leitura da Velocidade')
print('cheguei')
# ----------------criando a thread
lock.acquire()
t = threading.Thread(target=wait, name='Wait')
t1 = threading.Thread(target=LerVelocidade, name='Velocidade')
t.start()
t1.start()
|
nilq/baby-python
|
python
|
print('gunicorn hook')
hiddenimports = ['gunicorn.glogging', 'gunicorn.workers.sync']
|
nilq/baby-python
|
python
|
from unittest import mock
from bx_py_utils.test_utils.datetime import parse_dt
from django.core.exceptions import ValidationError
from django.core.validators import validate_slug
from django.db.utils import IntegrityError
from django.test import TestCase
from django.utils import timezone
from bx_django_utils.models.manipulate import (
STORE_BEHAVIOR_IGNORE,
STORE_BEHAVIOR_SET_IF_EMPTY,
STORE_BEHAVIOR_SKIP_EMPTY,
InvalidStoreBehavior,
create,
create_or_update,
create_or_update2,
)
from bx_django_utils.test_utils.datetime import MockDatetimeGenerator
from bx_django_utils.test_utils.model_clean_assert import AssertModelCleanCalled
from bx_django_utils_tests.test_app.models import CreateOrUpdateTestModel, StoreSaveModel, TimetrackingTestModel
class ModelManipulateTestCase(TestCase):
def test_deprecated_create_or_update(self):
with self.assertWarns(DeprecationWarning):
instance, created, updated_fields = create_or_update(
ModelClass=CreateOrUpdateTestModel, name='foo', slug='bar'
)
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.name == 'foo'
assert created is True
assert updated_fields is None # None and not []
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_create_or_update2(self):
# create a new entry:
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='First entry',
slug='first'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.name == 'First entry'
assert instance.slug == 'first'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2001-01-01T00:00:00+0000')
assert result.created is True
assert result.updated_fields == []
assert result.ignored_fields == []
assert result.not_overwritten_fields == []
cm.assert_no_missing_cleans()
# Change only 'slug'
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='First entry',
slug='change-value'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.name == 'First entry'
assert instance.slug == 'change-value'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000') # not changed!
assert instance.update_dt == parse_dt('2002-01-01T00:00:00+0000')
assert result.created is False
assert result.updated_fields == ['slug']
assert result.ignored_fields == []
assert result.not_overwritten_fields == []
cm.assert_no_missing_cleans()
# Change 'name' and 'slug':
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='New name !',
slug='new-slug'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.name == 'New name !'
assert instance.slug == 'new-slug'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000') # not changed!
assert instance.update_dt == parse_dt('2003-01-01T00:00:00+0000')
assert result.created is False
assert result.updated_fields == ['name', 'slug']
assert result.ignored_fields == []
assert result.not_overwritten_fields == []
cm.assert_no_missing_cleans()
# Nothing changed:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='New name !',
slug='new-slug'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.name == 'New name !'
assert instance.slug == 'new-slug'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2003-01-01T00:00:00+0000') # not changed!
assert result.created is False
assert result.updated_fields == []
assert result.ignored_fields == []
assert result.not_overwritten_fields == []
def test_non_valid(self):
msg = str(validate_slug.message)
with self.assertRaisesMessage(ValidationError, msg):
create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='foo',
slug='this is no Slug !'
)
# Update existing entry with non-valid values should also not work:
CreateOrUpdateTestModel(id=1, name='foo', slug='bar')
with self.assertRaisesMessage(ValidationError, msg):
create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
name='foo',
slug='this is no Slug !'
)
def test_disable_full_clean(self):
# Create a new entry without "full_clean()" call:
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
call_full_clean=False,
slug='This is not a valid slug!'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.slug == 'This is not a valid slug!'
assert result.created is True
assert result.updated_fields == []
assert cm.called_cleans == []
assert len(cm.missing_cleans) == 1
# Change existing without "full_clean()" call:
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'id': 1},
call_full_clean=False,
slug='Also no valid slug!'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.id == 1
assert instance.slug == 'Also no valid slug!'
assert result.created is False
assert result.updated_fields == ['slug']
assert cm.called_cleans == []
assert len(cm.missing_cleans) == 1
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_create_or_update_without_lookup(self):
# create a new entry:
with AssertModelCleanCalled() as cm:
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup=None,
name='First entry',
slug='first'
)
instance = result.instance
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.pk is not None
assert instance.name == 'First entry'
assert instance.slug == 'first'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2001-01-01T00:00:00+0000')
assert result.created is True
assert result.updated_fields == []
cm.assert_no_missing_cleans()
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_create(self):
# create a new entry:
with AssertModelCleanCalled() as cm:
instance = create(
ModelClass=CreateOrUpdateTestModel,
name='First entry',
slug='first'
)
assert isinstance(instance, CreateOrUpdateTestModel)
assert instance.pk is not None
assert instance.name == 'First entry'
assert instance.slug == 'first'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2001-01-01T00:00:00+0000')
cm.assert_no_missing_cleans()
# Cannot create an already existing model
with self.assertRaises(IntegrityError):
create(
ModelClass=CreateOrUpdateTestModel,
id=instance.id,
name='second create',
slug='second'
)
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_store_behavior(self):
test_relation1 = TimetrackingTestModel(
create_dt=parse_dt('2002-02-02T00:00:00+0000'),
update_dt=parse_dt('2003-03-03T00:00:00+0000')
)
test_relation1.save(update_dt=False)
test_relation2 = TimetrackingTestModel(
create_dt=parse_dt('2004-04-04T00:00:00+0000'),
update_dt=parse_dt('2005-05-05T00:00:00+0000')
)
test_relation2.save(update_dt=False)
# Create object and respect "store_behavior"
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup=None, # force create object!
store_behavior={
# 'name' is missing here -> normal behavior: overwrite existing values
'slug': STORE_BEHAVIOR_SET_IF_EMPTY,
'many2one_rel': STORE_BEHAVIOR_SET_IF_EMPTY,
'blank_field': STORE_BEHAVIOR_IGNORE,
'null_field': STORE_BEHAVIOR_IGNORE,
},
name='name1',
slug='slug1',
many2one_rel=test_relation1,
blank_field='ignored',
null_field='ignored',
)
assert result.created is True
assert result.updated_fields == [] # Object created!
assert sorted(result.ignored_fields) == ['blank_field', 'null_field']
assert result.not_overwritten_fields == []
assert result.skip_empty_values == []
instance = result.instance
assert instance.name == 'name1'
assert instance.slug == 'slug1'
assert instance.many2one_rel.create_dt == parse_dt('2002-02-02T00:00:00+0000')
assert instance.many2one_rel.update_dt == parse_dt('2003-03-03T00:00:00+0000')
assert instance.blank_field == ''
assert instance.null_field is None
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2001-01-01T00:00:00+0000')
# Update existing instance
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'pk': instance.pk},
store_behavior={
# 'name' is missing here -> normal behavior: overwrite existing values
'slug': STORE_BEHAVIOR_SET_IF_EMPTY,
'many2one_rel': STORE_BEHAVIOR_SKIP_EMPTY, # given relation is not empty
'blank_field': STORE_BEHAVIOR_SET_IF_EMPTY,
'null_field': STORE_BEHAVIOR_SET_IF_EMPTY,
},
name='name2',
slug='not-overwritten',
many2one_rel=test_relation2,
blank_field='set blank field 1',
null_field='set null field 1',
)
instance = result.instance
assert result.created is False
assert instance.name == 'name2'
assert instance.slug == 'slug1'
assert instance.many2one_rel.create_dt == parse_dt('2004-04-04T00:00:00+0000') # updated
assert instance.many2one_rel.update_dt == parse_dt('2005-05-05T00:00:00+0000') # updated
assert instance.blank_field == 'set blank field 1'
assert instance.null_field == 'set null field 1'
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2002-01-01T00:00:00+0000')
assert sorted(result.updated_fields) == [
'blank_field', 'many2one_rel', 'name', 'null_field'
]
assert result.ignored_fields == []
assert result.not_overwritten_fields == ['slug']
assert result.skip_empty_values == []
# Skip empty values
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'pk': instance.pk},
store_behavior={
'slug': STORE_BEHAVIOR_IGNORE,
'many2one_rel': STORE_BEHAVIOR_SKIP_EMPTY,
'blank_field': STORE_BEHAVIOR_SKIP_EMPTY,
'null_field': STORE_BEHAVIOR_SKIP_EMPTY,
},
name='name3',
slug='will-be-ignored',
many2one_rel=None,
blank_field='', # a empty value
null_field=None, # a empty value
)
instance = result.instance
assert result.created is False
assert instance.name == 'name3' # new name
assert instance.slug == 'slug1' # unchanged
assert instance.many2one_rel.create_dt == parse_dt('2004-04-04T00:00:00+0000') # unchanged
assert instance.many2one_rel.update_dt == parse_dt('2005-05-05T00:00:00+0000') # unchanged
assert instance.blank_field == 'set blank field 1' # unchanged
assert instance.null_field == 'set null field 1' # unchanged
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2003-01-01T00:00:00+0000')
assert result.updated_fields == ['name']
assert result.ignored_fields == ['slug']
assert result.not_overwritten_fields == []
assert sorted(result.skip_empty_values) == [
'blank_field', 'many2one_rel', 'null_field'
]
# Store empty values
result = create_or_update2(
ModelClass=CreateOrUpdateTestModel,
lookup={'pk': instance.pk},
store_behavior={
'name': STORE_BEHAVIOR_IGNORE,
'slug': STORE_BEHAVIOR_IGNORE,
},
name='Not Overwritten !',
# "slug" missing here, but can be set in "store_behavior"
many2one_rel=None, # can be set to "empty"
blank_field='', # can be set to "empty"
null_field=None, # can be set to "empty"
)
instance = result.instance
assert result.created is False
assert instance.name == 'name3' # unchanged
assert instance.slug == 'slug1' # unchanged
assert instance.many2one_rel is None
assert instance.blank_field == ''
assert instance.null_field is None
assert instance.create_dt == parse_dt('2001-01-01T00:00:00+0000')
assert instance.update_dt == parse_dt('2004-01-01T00:00:00+0000')
assert sorted(result.updated_fields) == ['blank_field', 'many2one_rel', 'null_field']
assert result.ignored_fields == ['name']
assert result.not_overwritten_fields == []
# We accept only existing field names in store_behavior:
err_msg = (
"store_behavior field name 'wrong' is not one of:"
" ['blank_field', 'create_dt', 'id', 'many2one_rel',"
" 'name', 'null_field', 'slug', 'update_dt']"
)
with self.assertRaisesMessage(InvalidStoreBehavior, err_msg):
create_or_update2(
ModelClass=CreateOrUpdateTestModel,
store_behavior={
'name': STORE_BEHAVIOR_IGNORE,
'slug': STORE_BEHAVIOR_SET_IF_EMPTY,
# We check the field names:
'wrong': STORE_BEHAVIOR_IGNORE,
},
)
assert CreateOrUpdateTestModel.objects.count() == 1
@mock.patch.object(timezone, 'now', MockDatetimeGenerator())
def test_save_kwargs(self):
obj = create_or_update2(
ModelClass=StoreSaveModel,
name='foobar',
save_kwargs={'arg': 'original'},
).instance
assert obj.name == 'foobar'
create_or_update2(
ModelClass=StoreSaveModel,
lookup={'pk': obj.pk},
name='bazqux',
save_kwargs={'other_arg': 'changed'},
)
obj.refresh_from_db()
assert obj.name == 'bazqux'
create_or_update2(
ModelClass=StoreSaveModel,
lookup={'pk': obj.pk},
name='final',
save_kwargs={},
)
obj.refresh_from_db()
assert obj.name == 'final'
assert obj._save_calls.saves == [
{'arg': 'original'},
{'other_arg': 'changed'},
{},
]
|
nilq/baby-python
|
python
|
import bge
scn = bge.logic.getCurrentScene()
def CamAdapt(cont):
lum = scn.objects['und.lum1']
nab = scn.objects['Naball_gerUnderground']
def LoadPart1(cont):
loadObj = []
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
'''Written by Corkine Ma
这个模块的大部分是子类化了一个QDialog,用来接受用户的输入,并且将其保存到daily.setting文件夹中
除此之外,还有一个函数,这个函数负责从daily.setting读取数据,并且使用checkandsend.py模块中的
两个函数来判断在数据库位置是否存在监视文件夹中符合正则表达式规则的文件,如果文件夹中有这样的文件
但是数据库中没有,就判定是一篇新日记,然后调用邮件发送程序发送邮件,其会返回一个bool值,大部分情况,
只要参数文件和日志文件不出问题,返回的都是true,至于发送邮件出错,依旧会返回true(因为考虑到可能存在
发送多个文件,并且有些文件可能无法打开,有些不能发送,所以统一返回true,不过对于每个文件的处理信息
都会保存在stdout中,如果你打开了log,则会保存在daily.log中),第四个参数会返回详细的处理信息,
包括成功和失败的。
'''
import sys,os,io,shelve,traceback,time
from tkinter import Tk
from tkinter.messagebox import showwarning
import PyQt5
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import ui_setting
from checkandsend import *
# os.chdir(r"C:\Users\Administrator\Desktop\pyBook\Project_EveryDayNotice")
__VERSION__ = '0.2.6'
class Form(QDialog,ui_setting.Ui_Dialog):
def __init__(self,parent=None):
super(Form,self).__init__(parent)
self.setupUi(self)
self.pushButton.clicked.connect(self.selectDb)
self.pushButton_2.clicked.connect(self.selectCWD)
# self.pushButton_4.clicked.connect(self.selectAlert)
self.address=''
self.dbaddress =''
# self.alertaddress = ''
self.buttonBox.accepted.connect(self.saveIt)
try:
loadfile = open('daily.setting','r')
thefile = loadfile.read()
# print(thefile)
self.address=str(thefile.split(",")[0])
self.dbaddress=str(thefile.split(",")[1])
# self.alertaddress = str(thefile.split(",")[4])
self.label_3.setText(self.dbaddress)
self.label_4.setText(self.address)
self.lineEdit.setText(str(thefile.split(",")[2]))
self.lineEdit_2.setText(str(thefile.split(",")[3]))
# self.label_5.setText(self.alertaddress)
except:
QMessageBox.warning(self,"WARN",'从之前的文件中读取出错,如果你第一次使用此程序,请忽略此条消息')
def selectCWD(self):
address=QFileDialog.getExistingDirectory(self,"选择需要监视的文件夹",os.getcwd(),QFileDialog.ShowDirsOnly)
if address != None:
self.address = address
self.label_4.setText(self.address)
else:
self.label_4.setText('未选择')
def selectDb(self):
choose = QMessageBox.information(self,'选项',"你是否需要新建一个数据库文件?如果没有,请点击'OK',否则点击'Cancel'选择你的数据库问卷",QMessageBox.Ok|QMessageBox.Cancel)
if choose == QMessageBox.Ok:
address=QFileDialog.getExistingDirectory(self,"选择需要监视的文件夹",os.getcwd(),QFileDialog.ShowDirsOnly)
db=shelve.open(address+'/mydailydata')
db['1999年1月1日.docx']='Update at NOTIME'
self.dbaddress = address+'/mydailydata'
self.label_3.setText(self.dbaddress)
else:
filename,type = QFileDialog.getOpenFileName(self,"选择你的数据库文件",'',"cmData files (*.dat)")
# print(filename)
if filename != None:
if '.bak' in filename[-4:] or '.dat' in filename[-4:] or '.dir' in filename[-4:]:
filename = filename[:-4]
self.dbaddress = filename
self.label_3.setText(self.dbaddress)
# print(self.dbaddress)
else:
self.label_3.setText('未选择')
QMessageBox.warning(self,"WARN",'无效文件,请重新选取')
def contextMenuEvent(self, event):
menu1 = QMenu()
runAction = menu1.addAction("测试程序运行情况(&R)")
runAction.triggered.connect(self.runTest)
menu1.exec_(event.globalPos())
def runTest(self):
result_bool,result_1,result_2,result_txt = runCheck()
QMessageBox.information(self,'输出测试',result_txt)
# def selectAlert(self):
# filename,type = QFileDialog.getOpenFileName(self,"选择你的提醒程序",'',"cmEXE files (*.exe)")
# if filename != None:
# self.alertaddress = filename
# self.label_5.setText(self.alertaddress)
def saveIt(self):
emailaddress = str(self.lineEdit.text())
regularexp = str(self.lineEdit_2.text())
if emailaddress == '' or regularexp == '' or self.dbaddress =='' or self.address == '' :#不对提醒程序判断
QMessageBox.warning(self,"WARN",'输入数据无效,请检查后再试')
else:
try:
# print(emailaddress,regularexp,self.address,self.dbaddress,self.alertaddress)
savedata = open('daily.setting','w')
savedata.write('%s,%s,%s,%s'%(self.address,self.dbaddress,emailaddress,regularexp))
savedata.close()
QMessageBox.information(self,"Info",'设置数据保存在daily.setting文件中')
# print(os.getcwd())
except Exception as _err:
print(traceback.format_exc())
QMessageBox.warning(self,"WARN",'数据保存失败')
# print(os.getcwd())
def runCheck(settingsfile='daily.setting',log=True,logfile='daily.log',sendmail=True):
'''runCheck()用来自动调用checkandsend.py中的函数,使用给定文件载入函数所需参数并且进行查找,其会
返回一个bool值,并且还有处理结果。此方法接受一个bool值和一个输出地址来判断是否允许相关处理日志保存
在给定参数的文件中,比如true,daily.log,表示接受输出,保存在daily.log参数中。
'''
try:
if log == True:
tmp = sys.stdout
sys.stdout = open(logfile,'a')
else:
pass
print('\n\n','='*100)
print('=============================================',time.ctime(),'======================================')
print('='*100,'\n\n')
loadfile = open(settingsfile,'r')
thefile = loadfile.read()
address=str(thefile.split(",")[0])
dbaddress=str(thefile.split(",")[1])
# alertaddress = str(thefile.split(",")[4])
emailaddress=str(thefile.split(",")[2])
regular=str(thefile.split(",")[3])
result,infomation,clist,notedict= checkDaily(address=address+'/',
regular=regular,dbaddress=dbaddress)
processinfo = errinfo = result_txt = ''
if result == True:
if clist != []:
if sendmail == True:
print('需要写入的数据',clist)
result_2,result_num,result_txt,processinfo,errinfo= sendMail(clist,address=address+'/',emailaddress=emailaddress,
dbaddress=dbaddress,preparenotedict=notedict)
print(result_2,'\n',processinfo,'\n',errinfo,'\n',result_txt)
if log == True:
sys.stdout.close()
sys.stdout = tmp
else:pass
return True,processinfo,errinfo,result_txt
else:
return True,'','','成功检索并发现新数据,但你选择了不发送邮件'
else:
print("成功检索数据,但未发现新数据")
print(infomation,clist)
if log == True:
sys.stdout.close()
sys.stdout = tmp
else:pass
return True,'','','成功检索数据,但未发现新数据'
# 此处修改需要更改noticedlg相关判断语法
else:
return False,'','','未能成功调用checkDaily()函数,可能是因为参数传递错误。'
except:
return False,'','',str(traceback.format_exc())
if __name__=="__main__":
app = QApplication(sys.argv)
app.setApplicationName("Daily Notice")
app.setOrganizationName("Marvin Studio")
app.setOrganizationDomain("http://www.marvinstudio.cn")
form = Form()
form.show()
# runCheck()
# print(runCheck(sendmail=False)
app.exec_()
|
nilq/baby-python
|
python
|
from flask import Blueprint
from flask import Blueprint
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import or_,and_
from .form import *
from utils import *
from decorators import admin_required, permission_required
# from .. import app
from flask import current_app
from wtforms import ValidationError, validators
from app import db, bcrypt, login_manager
from flask_login import (
UserMixin,
login_required,
login_user,
LoginManager,
current_user,
logout_user,
login_required,
)
from flask import (
Flask,
render_template,
request,
redirect,
flash,
url_for,
abort,
send_from_directory,
)
from werkzeug.routing import BuildError
from sqlalchemy.exc import (
IntegrityError,
DataError,
DatabaseError,
InterfaceError,
InvalidRequestError,
)
from PIL import Image
from flask_bcrypt import generate_password_hash, check_password_hash
from models import *
bp = Blueprint("blog", __name__, url_prefix="/blog")
# url = "http://localhost:5000/{url_for('blog.article',post_id=post.id,uname=post.author.uname,slug=post.slug)}"
@bp.route("/", methods=("GET", "POST"), strict_slashes=False)
def blog():
# All blog posts - order >> descending
page = request.args.get("page", 1, type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=6)
if not posts:
flash("No Posts are available", "info")
# blog posts - order >> trending
# keyword = request.args.get('sval','')
# results = Post.query.filter(Post.title.contains(keyword) |
# Post.body.contains(keyword))
trending = (
Post.query.filter(Post.views >= 1000)
.order_by(Post.date_posted.desc())
.paginate(page=page, per_page=6)
)
return render_template(
"blog/blog.html", posts=posts, trending=trending, title="Devsate | Home"
)
@bp.route("/search/", strict_slashes=False, methods=("GET", "POST"))
def search():
# keyword = request.args.get("sval")
keyword = request.form.get('sval')
if request.method == 'POST':
posts = Post.query.filter(or_(Post.title.ilike(f'%{keyword}%'), Post.body.ilike(f'%{keyword}%'))).all()
if not posts:
flash("No results matched your search", "info")
return render_template(
"blog/search_results.html",
label="Search Results",
posts=posts,
title="Devsate | Home",
)
return render_template(
"blog/search_results.html",
label="Search Results",
title="Devsate | Home",
)
@bp.route("/Technology/", strict_slashes=False)
def tech():
form = Search()
page = request.args.get("page", 1, type=int)
posts = (
Post.query.filter_by(category="Technology")
# .filter_by(s_category = 'Technology')
.paginate(page=page, per_page=10)
)
if not posts:
flash("No Posts are available", "info")
trending = (
Post.query.filter(Post.views >= 1000)
.order_by(Post.date_posted.desc())
.paginate(page=page, per_page=6)
)
# blog posts - order >> featured
return render_template(
"blog/blog.html",
posts=posts,
trending=trending,
form=form,
title="Devsate | Home",
)
@bp.route("/Business/", strict_slashes=False)
def bs():
form = Search()
page = request.args.get("page", 1, type=int)
posts = Post.query.filter_by(category="Business").paginate(page=page, per_page=10)
if not posts:
flash("No Posts are available", "info")
trending = (
Post.query.filter(Post.views >= 1000)
.order_by(Post.date_posted.desc())
.paginate(page=page, per_page=6)
)
return render_template(
"blog/blog.html",
posts=posts,
trending=trending,
form=form,
title="Devsate | Home",
)
@bp.route(
"/<int:post_id>/<string:uname>/<string:slug>",
methods=("GET", "POST"),
strict_slashes=False,
)
def article(post_id, uname, slug):
form = CommentPost()
post = Post.query.filter_by(id=post_id).first()
comments = Comments.query.filter_by(post_id=post.id).all()
replies = Replies.query.filter_by(id=Replies.id).all()
post.views += 1
db.session.commit()
url = "http://127.0.0.1:5000/"
read_time = estimate_reading_time(url)
if request.method == "POST": #and form.validate_on_submit():
message = form.comment.data
comment = Comments(
message=message,
post_id=post.id,
respondent=current_user.uname,
rimage=current_user.image,
)
db.session.add(comment)
post.count += 1
flash("Comment posted", "success")
db.session.commit()
return render_template(
"blog/article.html",
post=post,
read_time=read_time,
form=form,
comments=comments,
replies=replies,
title="Devsate | Blog",
)
@bp.route("/add", methods=("GET", "POST"), strict_slashes=False)
@login_required
@admin_required
def new_post():
posts = Post.query.order_by(Post.date_posted.desc()).all()
form = AddPost()
if form.validate_on_submit():
try:
if form.postImage.data:
picture_file = upload_img(form.postImage.data)
title = form.title.data
postImage = picture_file
body = form.body.data
category = form.category.data
s_category = form.s_category.data
post = Post(
title=title,
postImage=postImage,
body=body,
category=category,
s_category=s_category,
user_id=current_user.id,
)
db.session.add(post)
db.session.commit()
flash(f"Post succesfully published", "success")
return redirect(url_for("blog.blog"))
except InvalidRequestError:
db.session.rollback()
flash(f"Something went wrong!", "danger")
except IntegrityError:
db.session.rollback()
flash(f"User already exists!.", "warning")
except DataError:
db.session.rollback()
flash(f"Invalid Entry", "warning")
except InterfaceError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except DatabaseError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except BuildError:
db.session.rollback()
flash(f"An error occured !", "danger")
return render_template(
"blog/add.html",
form=form,
posts=posts,
title="Devsate | Blog",
legend="Create a new blog article",
)
@bp.route(
"/<int:post_id>/<string:slug>/update",
methods=("GET", "POST"),
strict_slashes=False,
)
@login_required
@admin_required
def update_article(post_id, slug):
post = Post.query.filter_by(id=post_id).first()
if post.author != current_user:
abort(403)
form = AddPost()
if form.validate_on_submit():
try:
post.title = form.title.data
post.body = form.body.data
db.session.commit()
flash("Post succesfully Updated", "success")
return redirect(url_for("blog.blog"))
except InvalidRequestError:
db.session.rollback()
flash(f"Something went wrong!", "danger")
except IntegrityError:
db.session.rollback()
flash(f"User already exists!.", "warning")
except DataError:
db.session.rollback()
flash(f"Invalid Entry", "warning")
except InterfaceError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except DatabaseError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except BuildError:
db.session.rollback()
flash(f"An error occured !", "danger")
elif request.method == "GET":
form.title.data = post.title
form.body.data = post.body
return render_template(
"blog/add.html",
form=form,
post=post,
title="Devsate|Blog-update post",
legend="Update Post",
)
@bp.route("/<int:post_id>/<string:slug>/delete",methods=("GET", "POST"),strict_slashes=False,)
@login_required
@admin_required
def delete_article(post_id, slug):
post = Post.query.filter_by(id=post_id).first()
if post.author != current_user:
abort(403)
flash("Post has been deleted succesfully ", "success")
db.session.delete(post)
db.session.commit()
return redirect(url_for("blog.blog"))
@bp.route("/user/<string:uname>", methods=("GET", "POST"), strict_slashes=False)
def profile(uname):
user = User.query.filter_by(uname=uname).first_or_404()
page = request.args.get("page", 1, type=int)
posts = (
Post.query.filter_by(author=user)
.order_by(Post.date_posted.desc())
.paginate(page=page, per_page=6)
)
if not posts:
flash("Ooops! You don't have any posts yet.", "info")
image = url_for("static", filename="images/ProfileImages/" + user.image)
return render_template(
"blog/user_profile.html",
image=image,
posts=posts,
user=user,
title="Devsate | Profile",
)
@bp.route("/subscribe/", methods=("GET", "POST"), strict_slashes=False)
def subscribe():
form = Subscribe()
if request.method == "POST":
if form.validate_on_submit():
try:
email = form.email.data
email = Subscribers(
email=email,
)
db.session.add(email)
db.session.commit()
except InvalidRequestError:
db.session.rollback()
flash(f"Something went wrong!", "danger")
except IntegrityError:
db.session.rollback()
flash(f"You are already a member !.", "warning")
except DataError:
db.session.rollback()
flash(f"Invalid Entry", "warning")
except InterfaceError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except DatabaseError:
db.session.rollback()
flash(f"Error connecting to the database", "danger")
except BuildError:
db.session.rollback()
flash(f"An error occured !", "danger")
return ("", 204)
return render_template(
"blog/article.html",
form=form,
title="Devsate | Blog",
)
# Comments reply route handler
@bp.route("/<int:comment_id>/replyComment/", methods=("GET", "POST"), strict_slashes=False)
def replyHandler(comment_id):
form = ReplyComment()
comment = Comments.query.filter_by(id=comment_id).first()
replies = Replies.query.filter_by(comment_id=comment.id).all()
if request.method == "POST":
message = form.reply.data
author = current_user.fname
message = Replies(
message = message,
author = author,
comment_id = comment_id
)
db.session.add(message)
db.session.commit()
flash("Reply succesfully posted", "success")
return ("",204)
# Handles javascript image uploads from tinyMCE
@bp.route("/imageuploader", methods=["POST"])
@login_required
@admin_required
def imageuploader():
file = request.files.get("file")
if file:
filename = file.filename.lower()
fn, ext = filename.split(".")
# truncate filename (excluding extension) to 30 characters
fn = fn[:30]
filename = fn + "." + ext
if ext in ["jpg", "gif", "png", "jpeg"]:
try:
# everything looks good, save file
img_fullpath = os.path.join(
current_app.root_path, "static/images/blog-posts", filename
)
file.save(img_fullpath)
# get the file size to save to db
file_size = os.stat(img_fullpath).st_size
size = 160, 160
# read image into pillow
im = Image.open(img_fullpath)
# get image dimension to save to db
file_width, file_height = im.size
# convert to thumbnail
im.thumbnail(size)
thumbnail = fn + "-thumb.jpg"
tmb_fullpath = os.path.join(
current_app.root_path, "static/images/blog-posts", filename
)
# PNG is index while JPG needs RGB
if not im.mode == "RGB":
im = im.convert("RGB")
# save thumbnail
im.save(tmb_fullpath, "JPEG")
# # save to db
# img = Images(
# filename=filename,
# thumbnail=thumbnail,
# file_size=file_size,
# file_width=file_width,
# file_height=file_height,
# )
# db.session.add(img)
# db.session.commit()
except IOError:
output = make_response(404)
output.headers["Error"] = "Cannot create thumbnail for " + filename
return output
return jsonify({"location": filename})
# fail, image did not upload
output = make_response(404)
output.headers["Error"] = "Filename needs to be JPG, JPEG, GIF or PNG"
return output
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from distutils.core import setup
import uritemplate
base_url = "http://github.com/uri-templates/uritemplate-py/"
setup(
name = 'uritemplate',
version = uritemplate.__version__,
description = 'URI Templates',
author = 'Joe Gregorio',
author_email = 'joe@bitworking.org',
url = base_url,
download_url = \
'%starball/uritemplate-py-%s' % (base_url, uritemplate.__version__),
packages = ['uritemplate'],
provides = ['uritemplate'],
long_description=open("README.rst").read(),
install_requires = ['simplejson >= 2.5.0'],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Operating System :: POSIX',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
nilq/baby-python
|
python
|
#
# Copyright (C) 2019 Luca Pasqualini
# University of Siena - Artificial Intelligence Laboratory - SAILab
#
#
# USienaRL is licensed under a BSD 3-Clause.
#
# You should have received a copy of the license along with this
# work. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
# Import scripts
from .pass_through import PassThroughInterface
|
nilq/baby-python
|
python
|
from appcontroller import AppController
class CustomAppController(AppController):
def __init__(self, *args, **kwargs):
AppController.__init__(self, *args, **kwargs)
def start(self):
print "Calling the default controller to populate table entries"
AppController.start(self)
def stop(self):
reg_val = self.readRegister('forward_count_register', 0)
print "The switch forwarded a total of %d packets" % reg_val
AppController.stop(self)
|
nilq/baby-python
|
python
|
import json
import os, re
import numpy as np
import pandas as pd
from scipy.stats import describe
from tqdm import tqdm
# Open a file
path = "/home/ubuntu/model-inference/rayserve/gpt-optim"
dirs = os.listdir(path)
# This would print all the files and directories
latency_list = []
requests = {}
for file in dirs:
# print(file)
filepath = os.path.join(path, file)
if not "e" in file:
continue
print(filepath)
with open(filepath, "r") as fp:
text = fp.read()
groups = re.findall(
r"\[(.*),.*\].*\((\d+)\) inference \((.*), (.*), (.*), (.*)\)", text
)
# print(groups)
for group in groups:
uuid, bsz, start_time, end_time, start_power, end_power = group
bsz = int(bsz)
start_time = float(start_time)
end_time = float(end_time)
start_power = int(start_power)
end_power = int(end_power)
if not uuid in requests:
requests[uuid] = []
requests[uuid].append(
{
"uuid": uuid,
"bsz": bsz,
"start_time": start_time,
"end_time": end_time,
"start_power": start_power,
"end_power": end_power,
}
)
agg_requests = {}
for uuid in requests:
starty_time = min(record['start_time'] for record in requests[uuid])
end_time = max(record['start_time'] for record in requests[uuid])
bsz = requests[uuid][0]['bsz']
agg_requests[uuid] = {
"uuid": uuid,
"bsz": bsz,
"start_time": start_time,
"end_time": end_time,
}
latency_list = [
sum(
record["end_time"] - record["start_time"]
for record in records
if record["bsz"] == 1
)
for uuid, records in requests.items()
]
print(np.percentile(latency_list, 99))
print(np.percentile(latency_list, 50))
print(describe(latency_list))
time_list = np.array(
[
[record["end_time"], record["start_time"]]
for uuid, records in requests.items()
for record in records
if record["bsz"] == 2
]
)
# df_time = pd.DataFrame(
# [
# record
# for uuid, records in requests.items()
# for record in records
# if record["bsz"] == 2
# ]
# )
df_time = pd.DataFrame(
list(agg_requests.values())
)
print(df_time)
df_time = df_time.sort_values(by="start_time")
counts = [0 for _ in range(len(df_time.index))]
min_time = df_time["start_time"].min()
max_time = df_time["end_time"].max()
max_count = 0
max_records = None
TIME_WINDOW = 1.0
# max_interval = None
for t in tqdm(df_time["start_time"].to_numpy()):
win_l = t
win_h = t + TIME_WINDOW
tmp_records = []
tmp_counts = 0
for idx, row in df_time.iterrows():
# if (win_l <= row["end_time"] <= win_h) or (win_l <= row["start_time"] <= win_h):
# tmp_records.append(row)
if row["end_time"] <= win_h and win_l <= row["start_time"]:
tmp_counts += 1
tmp_records.append(row)
# print("enclosed", row, (win_l, win_h))
print("encolsed", tmp_counts)
elif row["end_time"] > win_h and row["start_time"] < win_h:
tmp_counts += (win_h - row["start_time"]) / (
row["end_time"] - row["start_time"]
)
tmp_records.append(row)
print("high", tmp_counts)
# print("high", row, (win_l, win_h))
elif row["end_time"] > win_l and win_l > row["start_time"]:
tmp_counts += (row["end_time"] - win_l) / (
row["end_time"] - row["start_time"]
)
tmp_records.append(row)
print("low", tmp_counts)
# print("low", row, (win_l, win_h))
if tmp_counts > max_count:
max_count = tmp_counts
max_records = tmp_records
print("tmp_counts", tmp_counts)
print("max_count", max_count / TIME_WINDOW * 2)
# print("max_records", max_records)
# ts_list = []
# power_list = []
# labels = []
# for row in max_records:
# ts_list.append(row['start_time'])
# ts_list.append(row['end_time'])
# power_list.append(row['start_power'])
# power_list.append(row['end_power'])
# labels += [1, -1]
# df_energy = pd.DataFrame({
# "ts": ts_list,
# "power": power_list,
# "labels": labels,
# })
# df_energy = df_energy.sort_values(by="ts")
# energy = (df_energy.ts - df_energy.ts.shift(1)) * (df_energy.power + df_energy.power.shift(1)) / 2
# energy = energy.to_numpy()
# labels = df_energy.labels.to_numpy()
# # print()
# count = 1
# e_sum = 0
# for i in range(1, len(labels)):
# if count > 0: e_sum += energy[i]
# count += labels[i]
# print("energy", e_sum / 1000 / len(max_records) / 2)
|
nilq/baby-python
|
python
|
# Crei um programa que mostre na tela todos os números pares que estão no intervalo entre 1 e 50.
"""for contador in range(1, 51):
if contador % 2 == 0:
if contador == 50:
print(f'{contador}.')
else:
print(f'{contador}, ', end='')
"""
for contador in range(2, 51, 2):
if contador == 50:
print(f'{contador}.')
else:
print(f'{contador}, ', end='')
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.