input
stringlengths
2.65k
237k
output
stringclasses
1 value
"""Script to run pose and shape evaluation for different datasets and methods.""" import argparse import os from datetime import datetime import time from typing import List, Optional, Tuple import random import sys from scipy.spatial.transform import Rotation import numpy as np import matplotlib.pyplot as plt import open3d as o3d import tikzplotlib import torch from tqdm import tqdm import yoco from cpas_toolbox import metrics, pointset_utils, quaternion_utils, camera_utils, utils from cpas_toolbox.method_wrappers import MethodWrapper, PredictionDict def visualize_estimation( color_image: torch.Tensor, depth_image: torch.Tensor, local_cv_position: torch.Tensor, local_cv_orientation_q: torch.Tensor, camera: camera_utils.Camera, instance_mask: Optional[torch.Tensor] = None, extents: Optional[torch.Tensor] = None, reconstructed_points: Optional[torch.Tensor] = None, reconstructed_mesh: Optional[o3d.geometry.TriangleMesh] = None, vis_camera_json: Optional[str] = None, render_options_json: Optional[str] = None, vis_path: Optional[str] = None, ) -> None: """Visualize prediction and ask for confirmation. Args: color_image: The unmasked color image. Shape (H,W,3), RGB, 0-1, float. depth_image: The unmasked depth image. Shape (H,W), float (meters along z). local_cv_position: The position in the OpenCV camera frame. Shape (3,). local_cv_orientation_q: The orientation in the OpenCV camera frame. Scalar last, shape (4,). extents: Extents of the bounding box. Not visualized if None. Shape (3,). instance_mask: The instance mask. No masking if None. Shape (H,W). reconstructed_points: Reconstructed points in object coordinate frame. Not visualized if None. The points must already metrically scaled. Shape (M,3). reconstructed_mesh: Reconstructed mesh in object coordinate frame. Not visualized if None. The mesh must already metrically scaled. vis_camera_json: Path to open3d camera options json file that will be applied. Generated by pressing p in desired view. No render options will be applied if None. vis_path: If not None, the image will be rendered off screen and saved at the specified path. Returns: True if confirmation was positive. False if negative. """ o3d_geometries = [] local_cv_position = local_cv_position.cpu().double().numpy() # shape (3,) local_cv_orientation_q = local_cv_orientation_q.cpu().double().numpy() # shape (4,) if instance_mask is not None: valid_depth_mask = (depth_image != 0) * instance_mask else: valid_depth_mask = depth_image != 0 pointset_colors = color_image[valid_depth_mask] masked_pointset = pointset_utils.depth_to_pointcloud( depth_image, camera, normalize=False, mask=instance_mask, convention="opencv", ) o3d_points = o3d.geometry.PointCloud( points=o3d.utility.Vector3dVector(masked_pointset.cpu().numpy()) ) o3d_points.colors = o3d.utility.Vector3dVector(pointset_colors.cpu().numpy()) o3d_geometries.append(o3d_points) # coordinate frame local_cv_orientation_m = Rotation.from_quat(local_cv_orientation_q).as_matrix() o3d_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1) o3d_frame.rotate( local_cv_orientation_m, center=np.array([0.0, 0.0, 0.0])[:, None], ) o3d_frame.translate(local_cv_position[:, None]) o3d_geometries.append(o3d_frame) o3d_cam_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.3) o3d_geometries.append(o3d_cam_frame) if extents is not None: extents = extents.cpu().double().numpy() o3d_obb = o3d.geometry.OrientedBoundingBox( center=local_cv_position[:, None], R=local_cv_orientation_m, extent=extents[:, None], ) o3d_geometries.append(o3d_obb) if reconstructed_points is not None: o3d_rec_points = o3d.geometry.PointCloud( points=o3d.utility.Vector3dVector(reconstructed_points.cpu().numpy()) ) o3d_rec_points.rotate( local_cv_orientation_m, center=np.array([0.0, 0.0, 0.0])[:, None], ) o3d_rec_points.translate(local_cv_position[:, None]) o3d_geometries.append(o3d_rec_points) if reconstructed_mesh is not None: # copy the mesh to keep original unmoved posed_mesh = o3d.geometry.TriangleMesh(reconstructed_mesh) posed_mesh.rotate( local_cv_orientation_m, center=np.array([0.0, 0.0, 0.0])[:, None], ) posed_mesh.translate(local_cv_position[:, None]) posed_mesh.compute_vertex_normals() o3d_geometries.append(posed_mesh) vis = o3d.visualization.Visualizer() if vis_camera_json is not None: vis_camera = o3d.io.read_pinhole_camera_parameters(vis_camera_json) width = vis_camera.intrinsic.width height = vis_camera.intrinsic.height else: width = 800 height = 600 vis_camera = None vis.create_window(width=width, height=height, visible=(vis_path is None)) for g in o3d_geometries: vis.add_geometry(g) if vis_camera is not None: view_control = vis.get_view_control() view_control.convert_from_pinhole_camera_parameters(vis_camera) if render_options_json is not None: render_option = vis.get_render_option() render_option.load_from_json(render_options_json) if vis_path is not None: vis.poll_events() vis.update_renderer() vis.capture_screen_image(vis_path, do_render=True) else: vis.run() class Evaluator: """Class to evaluate various pose and shape estimation algorithms.""" # ShapeNetV2 convention for all objects and datasets assumed # for simplicity assume all cans, bowls and bottles to be rotation symmetric SYMMETRY_AXIS_DICT = { "mug": None, "laptop": None, "camera": None, "can": 1, "bowl": 1, "bottle": 1, } def __init__(self, config: dict) -> None: """Initialize model wrappers and evaluator.""" self._parse_config(config) def _parse_config(self, config: dict) -> None: """Read config and initialize method wrappers.""" self._init_dataset(config["dataset_config"]) self._visualize_input = config["visualize_input"] self._visualize_prediction = config["visualize_prediction"] self._visualize_gt = config["visualize_gt"] self._fast_eval = config["fast_eval"] self._store_visualization = config["store_visualization"] self._run_name = ( f"{self._dataset_name}_eval_{config['run_name']}_" f"{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}" ) self._out_folder = config["out_folder"] self._metrics = config["metrics"] self._num_gt_points = config["num_gt_points"] self._vis_camera_json = config["vis_camera_json"] self._render_options_json = config["render_options_json"] self._cam = camera_utils.Camera(**config["camera"]) self._init_wrappers(config["methods"]) self._config = config def _init_dataset(self, dataset_config: dict) -> None: """Initialize reading of dataset. This includes sanity checks whether the provided path is correct. """ self._dataset_name = dataset_config["name"] print(f"Initializing {self._dataset_name} dataset...") dataset_type = utils.str_to_object(dataset_config["type"]) self._dataset = dataset_type(config=dataset_config["config_dict"]) # Faster but probably only worth it if whole evaluation supports batches # self._dataloader = DataLoader(self._dataset, 1, num_workers=8) if len(self._dataset) == 0: print(f"No images found for dataset {self._dataset_name}") exit() print(f"{len(self._dataset)} samples found for dataset {self._dataset_name}.") def _init_wrappers(self, method_configs: dict) -> None: """Initialize method wrappers.""" self._wrappers = {} for method_dict in method_configs.values(): method_name = method_dict["name"] print(f"Initializing {method_name}...") wrapper_type = utils.str_to_object(method_dict["wrapper_type"]) self._wrappers[method_name] = wrapper_type( config=method_dict["config_dict"], camera=self._cam ) def _eval_method(self, method_name: str, method_wrapper: MethodWrapper) -> None: """Run and evaluate method on all samples.""" print(f"Run {method_name}...") self._init_metrics() indices = list(range(len(self._dataset))) random.seed(0) random.shuffle(indices) for i in tqdm(indices): if self._fast_eval and i % 10 != 0: continue sample = self._dataset[i] if self._visualize_input: _, ((ax1, ax2), (ax3, _)) = plt.subplots(2, 2) ax1.imshow(sample["color"].numpy()) ax2.imshow(sample["depth"].numpy()) ax3.imshow(sample["mask"].numpy()) plt.show() t_start = time.time() prediction = method_wrapper.inference( color_image=sample["color"], depth_image=sample["depth"], instance_mask=sample["mask"], category_str=sample["category_str"], ) inference_time = time.time() - t_start self._runtime_data["total"] += inference_time self._runtime_data["count"] += 1 if self._visualize_gt: visualize_estimation( color_image=sample["color"], depth_image=sample["depth"], local_cv_position=sample["position"], local_cv_orientation_q=sample["quaternion"], reconstructed_mesh=self._dataset.load_mesh(sample["obj_path"]), extents=sample["scale"], camera=self._cam, vis_camera_json=self._vis_camera_json, render_options_json=self._render_options_json, ) if self._visualize_prediction: visualize_estimation( color_image=sample["color"], depth_image=sample["depth"], local_cv_position=prediction["position"], local_cv_orientation_q=prediction["orientation"], extents=prediction["extents"], reconstructed_points=prediction["reconstructed_pointcloud"], reconstructed_mesh=prediction["reconstructed_mesh"], camera=self._cam, vis_camera_json=self._vis_camera_json, render_options_json=self._render_options_json, ) if self._store_visualization: out_folder = os.path.join( self._out_folder, self._run_name, "visualization" ) os.makedirs(out_folder, exist_ok=True) vis_path = os.path.join(out_folder, f"{i:06}_{method_name}.jpg") visualize_estimation( color_image=sample["color"], depth_image=sample["depth"], local_cv_position=prediction["position"], local_cv_orientation_q=prediction["orientation"], extents=prediction["extents"], reconstructed_points=prediction["reconstructed_pointcloud"], reconstructed_mesh=prediction["reconstructed_mesh"], camera=self._cam, vis_camera_json=self._vis_camera_json, render_options_json=self._render_options_json, vis_path=vis_path, ) self._eval_prediction(prediction, sample) self._finalize_metrics(method_name) def _eval_prediction(self, prediction: PredictionDict, sample: dict) -> None: """Evaluate all metrics for a prediction.""" # correctness metric for metric_name in self._metrics.keys(): self._eval_metric(metric_name, prediction, sample) def _init_metrics(self) -> None: """Initialize metrics.""" self._metric_data = {} self._runtime_data = { "total": 0.0, "count": 0.0, } for metric_name, metric_config_dict in self._metrics.items(): self._metric_data[metric_name] = self._init_metric_data(metric_config_dict) def _init_metric_data(self, metric_config_dict: dict) -> dict: """Create data structure necessary to compute a metric.""" metric_data = {} if "position_thresholds" in metric_config_dict: pts = metric_config_dict["position_thresholds"] dts = metric_config_dict["deg_thresholds"] its = metric_config_dict["iou_thresholds"] fts = metric_config_dict["f_thresholds"] metric_data["correct_counters"] = np.zeros( ( len(pts), len(dts), len(its), len(fts), self._dataset.num_categories + 1, ) ) metric_data["total_counters"] = np.zeros(self._dataset.num_categories + 1) elif "pointwise_f" in metric_config_dict: metric_data["means"] = np.zeros(self._dataset.num_categories + 1) metric_data["m2s"] = np.zeros(self._dataset.num_categories + 1) metric_data["counts"] = np.zeros(self._dataset.num_categories + 1) else: raise NotImplementedError("Unsupported metric configuration.") return metric_data def _eval_metric( self, metric_name: str, prediction: PredictionDict, sample: dict ) -> None: """Evaluate and update single metric for a single prediction. Args: metric_name: Name of metric to evaluate. prediction: Dictionary containing prediction data. sample: Sample containing ground truth information. """ metric_config_dict = self._metrics[metric_name] if "position_thresholds" in metric_config_dict: # correctness metrics self._eval_correctness_metric(metric_name, prediction, sample) elif "pointwise_f" in metric_config_dict: # pointwise reconstruction metrics self._eval_pointwise_metric(metric_name, prediction, sample) else: raise NotImplementedError( f"Unsupported metric configuration with name {metric_name}." ) def _eval_correctness_metric( self, metric_name: str, prediction: PredictionDict, sample: dict ) -> None: """Evaluate and update single correctness metric for a single prediction. Args: metric_name: Name of metric to evaluate. prediction: Dictionary containing prediction data. sample: Sample containing ground truth information. """ metric_dict = self._metrics[metric_name] correct_counters = self._metric_data[metric_name]["correct_counters"] total_counters = self._metric_data[metric_name]["total_counters"] category_id = sample["category_id"] total_counters[category_id] += 1 total_counters[-1] += 1 gt_points, pred_points = self._get_points(sample, prediction, True) for pi, p in enumerate(metric_dict["position_thresholds"]): for di, d in enumerate(metric_dict["deg_thresholds"]): for ii, i in enumerate(metric_dict["iou_thresholds"]): for fi, f in enumerate(metric_dict["f_thresholds"]): correct = metrics.correct_thresh( position_gt=sample["position"].cpu().numpy(), position_prediction=prediction["position"].cpu().numpy(), orientation_gt=Rotation.from_quat(sample["quaternion"]), orientation_prediction=Rotation.from_quat( prediction["orientation"] ), extent_gt=sample["scale"].cpu().numpy(), extent_prediction=prediction["extents"].cpu().numpy(), points_gt=gt_points, points_prediction=pred_points, position_threshold=p, degree_threshold=d, iou_3d_threshold=i, fscore_threshold=f, rotational_symmetry_axis=self.SYMMETRY_AXIS_DICT[ sample["category_str"] ], ) correct_counters[pi, di, ii, fi, category_id] += correct correct_counters[pi, di, ii, fi, -1] += correct # all def _eval_pointwise_metric( self, metric_name: str, prediction: PredictionDict, sample: dict ) -> None: """Evaluate and update single pointwise metric for a single prediction. Args: metric_name: Name of metric to evaluate. prediction: Dictionary containing prediction data. sample: Sample containing ground truth information. """ metric_config_dict = self._metrics[metric_name] means = self._metric_data[metric_name]["means"] m2s = self._metric_data[metric_name]["m2s"] counts = self._metric_data[metric_name]["counts"] category_id = sample["category_id"] point_metric = utils.str_to_object(metric_config_dict["pointwise_f"]) gt_points, pred_points = self._get_points( sample, prediction, metric_config_dict["posed"] ) result = point_metric( gt_points.numpy(), pred_points.numpy(), **metric_config_dict["kwargs"] ) # Use Welfords algorithm to update mean and variance # for category counts[category_id] += 1 delta = result - means[category_id] means[category_id] += delta / counts[category_id] delta2 = result - means[category_id] m2s[category_id] += delta * delta2 # for all counts[-1] += 1 delta = result - means[-1] means[-1] += delta / counts[-1] delta2 = result - means[-1] m2s[-1] += delta * delta2 def _get_points( self, sample: dict, prediction: PredictionDict, posed: bool ) ->
#!/usr/bin/env python """Train HMMs for alignment of signal data from the MinION """ from __future__ import print_function, division import sys import os import urlparse import textwrap import yaml import h5py from argparse import ArgumentParser from random import shuffle from shutil import copyfile from multiprocessing import Process, current_process, Manager from signalalign import parseFofn, DEFAULT_TRAINMODELS_OPTIONS from signalalign.signalAlignment import SignalAlignment from signalalign.hiddenMarkovModel import ContinuousPairHmm, HdpSignalHmm from signalalign.utils import processReferenceFasta from signalalign.utils.fileHandlers import FolderHandler from signalalign.utils.bwaWrapper import getBwaIndex class AbstractSamples(object): def __init__(self, source, reference_map): self.source = source self.reference_map = reference_map def _parse(self): raise NotImplementedError def getFiles(self): raise NotImplementedError def getKey(self): return self.source def getReferenceMap(self): return self.reference_map class Fast5Directory(AbstractSamples): def __init__(self, source, reference_map): AbstractSamples.__init__(self, source, reference_map) self.files = self._parse() def _parse(self): return [self.source + x for x in os.listdir(self.source) if x.endswith(".fast5")] def getFiles(self): return self.files class FileOfFilenames(AbstractSamples): def __init__(self, source, reference_map): AbstractSamples.__init__(self, source, reference_map) self.files = self._parse() def _parse(self): return parseFofn(self.source) def getFiles(self): return self.files def parse_args(): parser = ArgumentParser(description=__doc__) # required arguments parser.add_argument('--file_directory', '-d', action='append', default=None, dest='files_dir', required=True, type=str, help="directories with fast5 files to train on. example: ../reads/") parser.add_argument('--ref', '-r', action='store', default=None, dest='ref', required=True, type=str, help="location of refrerence sequence in FASTA, example: ../ref.fasta") parser.add_argument('--output_location', '-o', action='store', dest='out', default=None, required=True, type=str, help="directory to put the trained model, and use for working directory. example: ./scratch/") # optional arguments parser.add_argument("--2d", action='store_true', dest="twoD", default=False, help="flag, reads are 2D chemistry.") parser.add_argument("--bwt", action='store', dest="bwt", default=None, help="path to BWT files. example: ../ref.fasta") parser.add_argument('--stateMachineType', '-smt', action='store', dest='stateMachineType', type=str, default="threeState", required=False, help="StateMachine options: threeState, threeStateHdp") parser.add_argument("--file_of_files", "-fofn", action="append", required=False, default=None, dest="fofn", type=str, help="text file with absolute paths of files to use") parser.add_argument('--iterations', '-i', action='store', dest='iter', default=10, required=False, type=int, help='number of iterations to perform') parser.add_argument('--train_amount', '-a', action='store', dest='amount', default=15000, required=False, type=int, help="limit the total length of sequence to use in training (batch size).") parser.add_argument('--in_template_hmm', '-T', action='store', dest='in_T_Hmm', required=True, type=str, help="template model to bootstrap from, find a starting model in the " "models directory") parser.add_argument('--in_complement_hmm', '-C', action='store', dest='in_C_Hmm', required=True, type=str, help="complement model to bootstrap from, find a starting model in the " "models directory") parser.add_argument('--templateHDP', '-tH', action='store', dest='templateHDP', default=None, help="path to template HDP model to use") parser.add_argument('--complementHDP', '-cH', action='store', dest='complementHDP', default=None, help="path to complement HDP model to use") parser.add_argument('--jobs', '-j', action='store', dest='nb_jobs', required=False, default=4, type=int, help="number of jobs to run concurrently") parser.add_argument('--test', action='store_true', default=False, dest='test', help="Used for CI testing") parser.add_argument('--ambiguity_positions', '-p', action='store', required=False, default=None, dest='substitution_file', help="Substitution positions") parser.add_argument("--motif", action="store", dest="motif_key", default=None) parser.add_argument('--ambig_char', '-X', action='append', required=False, default=None, type=str, dest='labels', help="Character to substitute at positions, default is 'X'.") parser.add_argument('--diagonalExpansion', '-e', action='store', dest='diag_expansion', type=int, required=False, default=None, help="number of diagonals to expand around each anchor default: 50") parser.add_argument('--constraintTrim', '-m', action='store', dest='constraint_trim', type=int, required=False, default=None, help='amount to remove from an anchor constraint') parser.add_argument('--debug', action='store_true', dest="DEBUG", default=False) args = parser.parse_args() return args def get_2d_length(fast5): read = h5py.File(fast5, 'r') read_length = 0 twoD_read_sequence_address = "/Analyses/Basecall_2D_000/BaseCalled_2D/Fastq" if not (twoD_read_sequence_address in read): print("This read didn't have a 2D read", fast5, end='\n', file=sys.stderr) read.close() return 0 else: read_length = len(read[twoD_read_sequence_address][()].split()[2]) read.close() return read_length def get_1d_length(fast5): read = h5py.File(fast5, "r") read_length = 0 template_fastq_address = "/Analyses/Basecall_1D_000/BaseCalled_template/Fastq" if not (template_fastq_address in read): print("Read %s has not been basecalled" % fast5) read.close() return 0 else: read_length = len(read[template_fastq_address][()].split()[2]) print("read %s has %s bases" % (fast5, read_length)) read.close() return read_length def cull_training_files(samples, training_amount, twoD): print("trainModels - culling training files.\n", end="", file=sys.stderr) training_files = [] for sample in samples: shuffle(sample.getFiles()) total_amount = 0 file_count = 0 get_seq_len_fcn = get_2d_length if twoD else get_1d_length # loop over files and add them to training list, break when we have enough bases to complete a batch # make a list of tuples [(fast5_path, (plus_ref_seq, minus_ref_seq))] for f in sample.getFiles(): training_files.append((f, sample.getReferenceMap())) file_count += 1 total_amount += get_seq_len_fcn(f) if total_amount >= training_amount: break print("Culled {file_count} training files, for {bases} from {sample}." .format(file_count=file_count, bases=total_amount, sample=sample.getKey()), end="\n", file=sys.stderr) shuffle(training_files) return training_files # [(path_to_fast5, reference_map)...] def get_expectations(work_queue, done_queue): try: for f in iter(work_queue.get, 'STOP'): alignment = SignalAlignment(**f) alignment.run(get_expectations=True) except Exception, e: done_queue.put("%s failed with %s" % (current_process().name, e.message)) def get_model(model_type, model_file): assert (model_type in ["threeState", "threeStateHdp"]), "Unsupported StateMachine type" # todo clean this up if model_type == "threeState": assert model_file is not None, "Need to have starting lookup table for {} HMM".format(type) model = ContinuousPairHmm(model_type=model_type) model.load_model(model_file=model_file) return model if model_type == "threeStateHdp": model = HdpSignalHmm(model_type=model_type) model.load_model(model_file=model_file) return model def add_and_norm_expectations(path, files, model, hmm_file, update_transitions=False, update_emissions=False): if update_emissions is False and update_transitions is False: print("[trainModels] NOTICE: Training transitions by default\n", file=sys.stderr) update_transitions = True model.likelihood = 0 files_added_successfully = 0 files_with_problems = 0 for f in files: try: success = model.add_expectations_file(path + f) os.remove(path + f) if success: files_added_successfully += 1 else: files_with_problems += 1 except Exception as e: print("Problem adding expectations file {file} got error {e}".format(file=path + f, e=e), file=sys.stderr) os.remove(path + f) files_with_problems += 1 model.normalize(update_transitions=update_transitions, update_emissions=update_emissions) model.write(hmm_file) model.running_likelihoods.append(model.likelihood) if type(model) is HdpSignalHmm: model.reset_assignments() print("[trainModels] NOTICE: Added {success} expectations files successfully, {problem} files had problems\n" "".format(success=files_added_successfully, problem=files_with_problems), file=sys.stderr) def build_hdp(template_hdp_path, complement_hdp_path, template_assignments, complement_assignments, samples, burn_in, thinning, verbose=False): assert (template_assignments is not None) and (complement_assignments is not None), \ "trainModels - ERROR: missing assignments" if verbose is True: verbose_flag = "--verbose " else: verbose_flag = "" command = "./buildHdpUtil {verbose}-v {tHdpP} -w {cHdpP} -E {tExpectations} -W {cExpectations} " \ "-n {samples} -I {burnIn} -t {thinning}".format(tHdpP=template_hdp_path, cHdpP=complement_hdp_path, tExpectations=template_assignments, cExpectations=complement_assignments, samples=samples, burnIn=burn_in, thinning=thinning, verbose=verbose_flag) print("[trainModels] Running command:{}".format(command), file=sys.stderr) os.system(command) # todo try checkoutput print("trainModels - built HDP.", file=sys.stderr) return def validateConfig(config): # check for inputs if config["fast5_dir"] is None and config["fofn"] is None: raise RuntimeError("Need to provide a directory of Fast5 files or a file of filenames (fofn)") # check for valid paths (if local) ref_url = urlparse.urlparse(config["reference_url"]) if ref_url.scheme == "file": if not os.path.exists(ref_url.path): raise RuntimeError("Cannot find file: %s" % config["reference_url"]) return def generateConfig(config_path): if os.path.exists(config_path): raise RuntimeError config_content = textwrap.dedent("""\ # SignalAlign model training config file output_dir: ../tests/ samples: [ { fast5_dir: ../tests/minion_test_reads/C/, fofn:, positions_file:, motif:, label:, } ] reference: ../tests/test_sequences/zymo_sequence.fasta bwt: stateMachineType: threeState in_T_Hmm: ../models/testModelR73_acegot_template.model in_C_Hmm: ../models/testModelR73_acegot_complement.model templateHdp: complementHdp: iterations: 2 training_bases: 1000 job_count: 4 diagonal_expansion: constraint_trim: twoD: true DEBUG: true TEST: """) fH = open(config_path, "w") fH.write(config_content) fH.flush() fH.close() def trainModelTransitions(config): def process_sample(sample): options = dict(**DEFAULT_TRAINMODELS_OPTIONS) options.update(sample) if options["fast5_dir"] is None and options["fofn"] is None: raise RuntimeError("Need to provide path to .fast5 files or file with filenames (fofn)") reference_map = processReferenceFasta(fasta=config["reference"], work_folder=working_folder, motif_key=options["motif"], sub_char=options["label"], positions_file=options["positions_file"]) if options["fast5_dir"] is not None: if options["fofn"] is not None: print("WARNING Only using files is directory %s ignoring fofn %s" % (options["files_dir"], options["fofn"])) sample = Fast5Directory(options["fast5_dir"], reference_map) else: sample = FileOfFilenames(options["fofn"], reference_map) return sample # make directory to put the files we're using working_folder = FolderHandler() working_folder_path = working_folder.open_folder(config["output_dir"] + "temp_trainModels") samples = [process_sample(s) for s in config["samples"]] if config["bwt"] is not None: print("[trainModels]Using provided BWT") bwa_ref_index = config["bwt"] else: print("signalAlign - indexing reference", file=sys.stderr) bwa_ref_index = getBwaIndex(config["reference"], working_folder_path) print("signalAlign - indexing reference, done", file=sys.stderr) template_model_path = config["in_T_Hmm"] complement_model_path = config["in_C_Hmm"] assert os.path.exists(template_model_path) and os.path.exists(complement_model_path), \ "Missing input models %s and %s" % (template_model_path, complement_model_path) template_model = get_model(config["stateMachineType"], template_model_path) complement_model = get_model(config["stateMachineType"], complement_model_path) if config["twoD"] else None # get the input HDP, if we're using it if config["stateMachineType"] == "threeStateHdp": template_hdp = working_folder.add_file_path("%s" % config["templateHdp"].split("/")[-1]) copyfile(config["templateHdp"], template_hdp) if config["twoD"]: complement_hdp = working_folder.add_file_path("%s" % config["complementHdp"].split("/")[-1]) copyfile(config["complementHdp"], complement_hdp) else: complement_hdp = None else: template_hdp = None complement_hdp = None # make some paths to files to hold the HMMs template_hmm = working_folder.add_file_path("template_trained.hmm") complement_hmm = working_folder.add_file_path("complement_trained.hmm") trained_models = [template_hmm, complement_hmm] untrained_models = [template_model_path, complement_model_path] for default_model, trained_model in zip(untrained_models, trained_models): assert os.path.exists(default_model), "Didn't find default model {}".format(default_model) copyfile(default_model, trained_model) assert os.path.exists(trained_model), "Problem copying default model to {}".format(trained_model) # start iterating i = 0 while i < config["iterations"]: # first cull a set of files to get expectations on training_files = cull_training_files(samples=samples, training_amount=config["training_bases"], twoD=config["twoD"]) # setup workers = config["job_count"] work_queue = Manager().Queue() done_queue = Manager().Queue() jobs = [] # get expectations for all the files in the queue # file_ref_tuple should be (fast5, (plus_ref_seq, minus_ref_seq)) for fast5, ref_map in training_files: alignment_args = { "reference_map": ref_map, "destination": working_folder_path, "stateMachineType": config["stateMachineType"], "bwa_index": bwa_ref_index, "in_templateHmm": template_hmm, "in_complementHmm": complement_hmm, "in_templateHdp": template_hdp, "in_complementHdp": complement_hdp, "in_fast5": fast5, "threshold": 0.01, "diagonal_expansion": config["diagonal_expansion"], "constraint_trim": config["constraint_trim"],
#!/usr/bin/python ############################################################################# # Classes of CyRIS features ############################################################################# INSTANTIATION_DIR = "instantiation" # External imports from entities import Command ######################################################################### # Class Modules is the parent class of every other modules / features class Modules(object): def __init__(self, name, abspath): self.name = name self.abspath = abspath def getName(self): return self.name def getAbsPath(self): return self.abspath ############################################################ # Copy ssh keygen from the local machine to a remote one class SSHKeygenHostname(Modules): def __init__(self, vm_addr, root_passwd, hostname, mstnode_account, abspath, os_type): Modules.__init__(self, "SSHKeygen", abspath) self.vm_addr = vm_addr self.root_passwd = <PASSWORD> self.hostname = hostname self.mstnode_account = mstnode_account self.os_type =os_type def command(self): desc = "Generate ssh keys and do hostname setup" if self.os_type=="windows.7": command_string ="{0}{1}/sshkey_hostname_setup/sshkey_setup_win_cmd.sh {0} {1} {2} {3} {4};".format(self.getAbsPath(), INSTANTIATION_DIR, self.vm_addr, self.root_passwd, self.mstnode_account) elif self.os_type in ["windows.8.1","windows.10"] : command_string ="{0}{1}/sshkey_hostname_setup/sshkey_setup_win_unix.sh {0} {1} {2} {3} {4};".format(self.getAbsPath(), INSTANTIATION_DIR, self.vm_addr, self.root_passwd, self.mstnode_account) else: command_string = "{0}{5}/sshkey_hostname_setup/sshkey_setup.sh {1} {2} {3}; {0}{5}/sshkey_hostname_setup/hostname_setup.sh {1} {2} {4};".format(self.getAbsPath(), self.vm_addr, self.root_passwd, self.mstnode_account, self.hostname, INSTANTIATION_DIR) command = Command(command_string, desc) return command ######################################################################### # Manage users in the system. Contains functions for adding new accounts # and edit info of existing accounts. class ManageUsers(Modules): def __init__(self, addr, abspath): Modules.__init__(self, "ManageUsers", abspath) self.addr = addr def add_account(self, new_account, new_passwd, full_name, os_type, basevm_type): desc = "Add user account '{0}'".format(new_account) if full_name: full_name_arg=full_name else: full_name_arg="" if basevm_type == 'kvm': if os_type=="windows.7" : command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net user {2} {3} /ADD' ;".format(self.addr, self.getAbsPath(), new_account, new_passwd) command_string += "sshpass -p {0} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {1}@{2} 'dir' ;".format(new_passwd, new_account, self.addr) command_string += "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net localgroup \"Remote Desktop Users\" {2} /ADD'".format(self.addr, self.getAbsPath(), new_account) else: command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'bash -s' < {1}{5}/users_managing/add_user.sh {2} {3} yes {4}".format(self.addr, self.getAbsPath(), new_account, new_passwd, full_name_arg, INSTANTIATION_DIR) elif basevm_type == 'aws': if os_type=="windows" : command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net user {2} {3} /ADD' ;".format(self.addr, self.getAbsPath(), new_account, new_passwd) command_string += "sshpass -p {0} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {1}@{2} 'dir' ;".format(new_passwd, new_account, self.addr) command_string += "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net localgroup \"Remote Desktop Users\" {2} /ADD'".format(self.addr, self.getAbsPath(), new_account) elif os_type in ['amazon_linux', 'amazon_linux2', 'red_hat']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ec2-user@{0} 'sudo -s' 'bash -s' < {1}{5}/users_managing/add_user.sh {2} {3} yes {4}".format(self.addr, self.getAbsPath(), new_account, new_passwd, full_name_arg, INSTANTIATION_DIR) elif os_type in ['ubuntu_16', 'ubuntu_18', 'ubuntu_20']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ubuntu@{0} 'sudo -s' 'bash -s' < {1}{5}/users_managing/add_user.sh {2} {3} yes {4}".format(self.addr, self.getAbsPath(), new_account, new_passwd, full_name_arg, INSTANTIATION_DIR) command = Command(command_string, desc) return command def modify_account(self, account, new_account, new_passwd, os_type, basevm_type): sub_desc = "new name: {0} new password: {1}".format(new_account, new_passwd) if new_account == "null": sub_desc = "new password: {0}".format(new_passwd) elif new_passwd == "null": sub_desc = "new name: {0}".format(new_account) desc = "Modify user account '{0}': {1}".format(account, sub_desc) if basevm_type == 'kvm': if os_type =="windows.7": command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net user {1} {2} ' ".format(self.addr, account, new_passwd) else: command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'bash -s' < {1}{5}/users_managing/modify_user.sh {2} {3} {4}".format(self.addr, self.getAbsPath(), account, new_account, new_passwd, INSTANTIATION_DIR) elif basevm_type =='aws': if os_type =="windows": command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{0} 'net user {1} {2} ' ".format(self.addr, account, new_passwd) elif os_type in ['amazon_linux', 'amazon_linux2', 'red_hat']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ec2-user@{0} 'sudo -s' 'bash -s' < {1}{5}/users_managing/modify_user.sh {2} {3} {4}".format(self.addr, self.getAbsPath(), account, new_account, new_passwd, INSTANTIATION_DIR) elif os_type in ['ubuntu_16', 'ubuntu_18', 'ubuntu_20']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ubuntu@{0} 'sudo -s' 'bash -s' < {1}{5}/users_managing/modify_user.sh {2} {3} {4}".format(self.addr, self.getAbsPath(), account, new_account, new_passwd, INSTANTIATION_DIR) command = Command(command_string, desc) return command ######################################################################### # Install tools from (i) package manager (apt-get, yum, etc.), (ii) source class InstallTools(Modules): def __init__(self, addr, account, abspath): Modules.__init__(self, "InstallTools", abspath) self.addr = addr self.account = account def package_install_command(self, package_manager, tool_name, version, os_type, basevm_type): if self.addr != "host": if version == "": desc = "Install package '{0}'".format(tool_name) else: desc = "Install package '{0}' version {1}".format(tool_name, version) if basevm_type == 'kvm': # Handle Windows package manager if package_manager == "chocolatey": if version == "": command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{1} {2} install -y {3}".format(self.account, self.addr, package_manager, tool_name) else: command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@{1} {2} install -y {3} --version {4}".format(self.account, self.addr, package_manager, tool_name, version) # Handle other OS package managers else: command_string = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {0}@{1} {2} install -y {3} {4}".format(self.account, self.addr, package_manager, tool_name, version) elif basevm_type == 'aws': # Handle RedHat-like package manager if os_type in ['amazon_linux', 'amazon_linux2', 'red_hat']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ec2-user@{1} 'sudo -s' '{2} install -y {3} {4}'".format(self.account, self.addr, package_manager, tool_name, version) # Handle Ubuntu package manager elif os_type in ['ubuntu_16', 'ubuntu_18', 'ubuntu_20']: command_string = "ssh -i TESTKEY.pem -o StrictHostKeyChecking=no ubuntu@{1} 'sudo apt-get update; sudo {2} install -y {3} {4}'".format(self.account, self.addr, package_manager, tool_name, version) command = Command(command_string, desc) return command else: return "sudo {0} install -y {1} {2}".format(package_manager, tool_name, version) def source_install_command(self, chdir, compiler): return "Install source '{0}' using '{1}'".format(chdir, compiler) class EmulateAttacks(Modules): def __init__(self, attack_type, target_addr, target_account, number, attack_time, abspath, basevm_type): Modules.__init__(self, "EmulateAttacks", abspath) self.attack_type = attack_type self.target_addr = target_addr self.target_account = target_account self.number = number self.attack_time = attack_time self.basevm_type = basevm_type def command(self): if self.attack_type == "ssh_attack": desc = "Perform ssh attack on account '{0}' (repeat {1} times)".format(self.target_account, self.number) command_string = "{0}{5}/attacks_emulation/install_paramiko.sh; python {0}{5}/attacks_emulation/attack_paramiko_ssh.py {1} {2} {3} {4} {6}".format(self.getAbsPath(), self.target_addr, self.target_account, self.number, self.attack_time, INSTANTIATION_DIR, self.basevm_type) command = Command(command_string, desc) return command class GenerateTrafficCaptureFiles(Modules): def __init__(self, virbr_addr, image_addr, image_passwd, attack_type, noise_level, file_path, file_name, abspath, cr_dir, basevm_type): Modules.__init__(self, "LogsPreparation", abspath) self.virbr_addr = virbr_addr self.image_addr = image_addr self.image_passwd = <PASSWORD> self.attack_type = attack_type self.noise_level = noise_level self.file_path = file_path self.file_name = file_name self.cr_dir = cr_dir self.basevm_type = basevm_type def ssh_attack(self, target_account, attack_source, number): desc = "Generate traffic capture file containing ssh attack trace" command_string = "{0}{11}/logs_preparation/pcap_sshattack_generator.sh {0} {1} {2} {3} {4} {5} {6} {7} {8} {9} {10} {12}".format(self.getAbsPath(), self.virbr_addr, target_account, self.image_addr, self.image_passwd, attack_source, number, self.noise_level, self.file_path, self.file_name, self.cr_dir, INSTANTIATION_DIR, self.basevm_type) command = Command(command_string, desc) return command def ddos_attack(self): desc = "Generate traffic capture file containing DDoS attack trace" command_string = "{0}{8}/logs_preparation/pcap_ddosattack_generator.sh {0} {1} {2} {3} {4} {5} {6} {7}".format(self.getAbsPath(), self.virbr_addr, self.image_addr, self.image_passwd, self.noise_level, self.file_path, self.file_name, self.cr_dir, INSTANTIATION_DIR) command = Command(command_string, desc) return command def dos_attack(self, attack_source, dport): desc = "Generate traffic capture file containing DoS attack trace" command_string = "{0}{10}/logs_preparation/pcap_dosattack_generator.sh {0} {1} {2} {3} {4} {5} {6} {7} {8} {9}".format(self.getAbsPath(), self.virbr_addr, self.image_addr, self.image_passwd, self.noise_level, self.file_path, self.file_name, attack_source, dport, self.cr_dir, INSTANTIATION_DIR) command = Command(command_string, desc) return command class EmulateMalware(Modules): def __init__(self, addr, malware_name, mode, crspd_option, abspath, basevm_type, os_type): Modules.__init__(self, "EmulateMalware", abspath) self.addr = addr self.malware_name = malware_name self.mode = mode self.crspd_option = crspd_option self.basevm_type = basevm_type self.os_type = os_type def command(self): desc = "Deploy dummy malware" command_string = "{0}{5}/malware_creation/malware_launch.sh {1} {2} {3} {4} {6} {0} {7}".format(self.getAbsPath(), self.addr, self.malware_name, self.mode, self.crspd_option, INSTANTIATION_DIR, self.basevm_type, self.os_type) command = Command(command_string, desc) return command class ModifyRuleset(Modules): def __init__(self, image_addr, ruleset_file, abspath, os_type, basevm_type): Modules.__init__(self, "ModifyRuleset", abspath) self.image_addr = image_addr self.ruleset_file = ruleset_file self.basevm_type = basevm_type self.os_type = os_type def command(self): desc = "Modify firewall ruleset" command_string = "{0}{3}/ruleset_modification/ruleset_modify.sh {0} {1} {2} {4} {5}".format(self.getAbsPath(), self.image_addr, self.ruleset_file, INSTANTIATION_DIR, self.basevm_type, self.os_type) command = Command(command_string, desc) return command class CopyContent(Modules): def __init__(self, src, dst, image_addr, image_passwd, abspath, os_type, basevm_type): Modules.__init__(self, "CopyContent", abspath) self.src = src self.dst = dst self.image_addr = image_addr self.image_passwd = <PASSWORD> self.os_type = os_type self.basevm_type = basevm_type def command(self): desc = "Copy file '{0}'".format(self.src) if (self.os_type=="windows.7"): command_string = "{0}{5}/content_copy_program_run/copy_content_win.sh {1} \" {2} \" {3} {4}".format(self.getAbsPath(), self.src, self.dst, self.image_addr, self.image_passwd, INSTANTIATION_DIR) else: command_string = "{0}{4}/content_copy_program_run/copy_content.sh {1} {2} {3} {5} {6}".format(self.getAbsPath(), self.src, self.dst, self.image_addr, INSTANTIATION_DIR, self.basevm_type, self.os_type) command = Command(command_string, desc) return command class ExecuteProgram(Modules): def __init__(self, program, interpreter, args, image_addr, image_passwd, log_file, abspath,os_type,comtag="-"): Modules.__init__(self, "ExecuteProgram", abspath) self.program = program self.interpreter = interpreter self.args = args self.image_addr = image_addr self.image_passwd = <PASSWORD> self.log_file = log_file self.os_type = os_type self.comtag = comtag def getProgram(self): return self.program # This command_post_clone is for tasks that are required to be executed after the cloning step def command_post_clone(self, image_addr): desc = "Execute program post-cloning '{0}'".format(self.program) #command_string = "python {0}{7}/content_copy_program_run/run_program.py \"{1}\" {2} {3} {4} {5} {6}
-1: command = 'ALTER TABLE ' + db + ' ADD ' + column + ' varchar(3000)' else: command = 'ALTER TABLE ' + db + ' ADD ' + column + ' varchar(100)' print "save_fit| command=",command c.execute(command) except: print 'save_fit| traceback.print_exc(file=sys.stdout)=',traceback.print_exc(file=sys.stdout) for column in floatvars: stop = False for key in db_keys: if key.lower() == column.lower(): stop = True if not stop: try: command = 'ALTER TABLE ' + db + ' ADD ' + column + ' float(15)' print "save_fit| command=",command c.execute(command) except: print 'save_fit| traceback.print_exc(file=sys.stdout)=',traceback.print_exc(file=sys.stdout) # insert new observation #print 'save_fit| db_keys=',db_keys OBJNAME = dict_fit['OBJNAME'] FILTER = dict_fit['FILTER'] PPRUN = dict_fit['PPRUN'] sample = dict_fit['sample'] sample_size = dict_fit['sample_size'] command = "SELECT OBJNAME from " + db + " where OBJNAME = '" + OBJNAME + "' and FILTER = '" + FILTER + "' and PPRUN='" + PPRUN + "' and sample='" + str(sample) + "' and sample_size='" + str(sample_size) + "'" #print 'save_fit| command=',command c.execute(command) #print 'save_fit| OBJNAME,=',OBJNAME, FILTER, PPRUN results = c.fetchall() #print 'save_fit| results=',results if len(results) > 0: print 'save_fit| already added' else: command = "INSERT INTO " + db + " (OBJNAME,FILTER,PPRUN,sample,sample_size) VALUES ('" + dict_fit['OBJNAME'] + "','" + dict_fit['FILTER'] + "','" + dict_fit['PPRUN'] + "','" + dict_fit['sample'] + "','" + dict_fit['sample_size'] + "')" #print 'save_fit| ',command c.execute(command) vals = '' for key in stringvars.keys(): #print 'save_fit| key,=',key, stringvars[key] vals += ' ' + key + "='" + str(stringvars[key]) + "'," for key in floatvars.keys(): #print 'save_fit| key,=',key, floatvars[key] vals += ' ' + key + "='" + floatvars[key] + "'," vals = vals[:-1] if len(vals) > 1: command = "UPDATE " + db + " set " + vals + " WHERE OBJNAME='" + dict_fit['OBJNAME'] + "' AND FILTER='" + dict_fit['FILTER'] + "' AND PPRUN='" + dict_fit['PPRUN'] + "' and sample='" + str(sample) + "' and sample_size='" + str(sample_size) + "'" #print 'save_fit| command=',command c.execute(command) #print 'save_fit| vals=',vals #names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()]) #values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()]) #names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()]) #values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()]) #command = "INSERT INTO "+illum_db+" (" + names + ") VALUES (" + values + ")" #print 'save_fit| ',command #os.system(command) print 'save_fit| DONE with func\n' def sdss_coverage(SUPA,FLAT_TYPE): #intermediate #step3_run_fit '''inputs: SUPA,FLAT_TYPE returns: cov, starcat purpose: Determines if the SUPA is in the RefCat field (cov=True if it is, else cov=False). Returns `cov` and path to star RefCat catalogs calls: get_files,initialize,get_files,save_exposure called_by: match_OBJNAME''' print 'sdss_coverage| START the func. inputs: SUPA=',SUPA , ' FLAT_TYPE=',FLAT_TYPE dict_sdss = get_files(SUPA,FLAT_TYPE) search_params = initialize(dict_sdss['FILTER'],dict_sdss['OBJNAME']) search_params.update(dict_sdss) if search_params['CRVAL1'] is None: length(search_params['SUPA'],search_params['FLAT_TYPE']) dict_sdss = get_files(SUPA,FLAT_TYPE) search_params.update(dict_sdss) print "sdss_coverage| search_params['CRVAL1']=",search_params['CRVAL1'] crval1 = float(search_params['CRVAL1']) crval2 = float(search_params['CRVAL2']) query = 'select ra, dec from star where ra between ' + str(crval1-0.1) + ' and ' + str(crval1+0.1) + ' and dec between ' + str(crval2-0.1) + ' and ' + str(crval2+0.1) print 'sdss_coverage| query=',query import sqlcl lines = sqlcl.query(query).readlines() print 'sdss_coverage| lines=',lines if len(lines) > 1: sdss_coverage=True else: sdss_coverage=False save_exposure({'sdss_coverage':sdss_coverage},SUPA,FLAT_TYPE) db2,c = connect_except() command = "select cov from sdss_db where OBJNAME='" + dict_sdss['OBJNAME'] + "'" c.execute(command) results=c.fetchall() print 'sdss_coverage| results=',results if len(results) == 0: get_sdss_cats(dict_sdss['OBJNAME']) command = "select cov from sdss_db where OBJNAME='" + dict_sdss['OBJNAME'] + "'" c.execute(command) results=c.fetchall() print 'sdss_coverage| results=',results sdss_coverage = results[0][0] if string.find(sdss_coverage,'True') != -1: cov = True else: cov=False starcat=data_path+'PHOTOMETRY/sdssstar.cat' print "sdss_coverage| DONE with func" return cov, starcat def match_many(input_list,PPRUN,color=False): '''inputs: input_list,color=False purpose: runs ldacaddkey, associate, and make_ssc commands which do all of the matching among all of my catalogs and the RefCat catalog. IMPORTANT: final cat output is saved in: /gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/tmp_simple_ic_RefCat/final.cat output assoc cats output are saved in files like: /gpfs/slac/kipac/fs1/u/awright/SUBARU/RXJ2129/tmp_simple_ic_RefCat//assoc/pasted_SUPA0135159_W-C-RC_1.0.filt.cat.assoc1.assd calls: make_ssc_config_few called_by: match_OBJNAME''' print 'match_many| START the func. inputs: input_list=',input_list , ' color=',color if color: make_ssc_config_colors(input_list) print 'match_many| color=',color else: make_ssc_config_few(input_list) os.system('rm -rf ' + tmpdir + '/assoc/') os.system('mkdir -p ' + tmpdir + '/assoc/') files = [] for file,prefix,rot in input_list: print 'match_many| file=',file command_ldacaddkey = '%(p_ldacaddkey)s -i %(inputcat)s -t OBJECTS -o %(outputcat)s -k A_WCS_assoc 0.0003 FLOAT "" \ B_WCS_assoc 0.0003 FLOAT "" \ Theta_assoc 0.0 FLOAT "" \ Flag_assoc 0 SHORT "" ' % {'p_ldacaddkey':progs_path['p_ldacaddkey'],'inputcat':file,'outputcat':file + '.assoc1'} print 'match_many| command_ldacaddkey=',command_ldacaddkey ooo=os.system(command_ldacaddkey) if ooo!=0: raise Exception("the line os.system(command_ldacaddkey) failed\ncommand_ldacaddkey="+command_ldacaddkey) files.append(file+'.assoc1') files_input = reduce(lambda x,y:x + ' ' + y,files) files_output = reduce(lambda x,y:x + ' ' + y,[tmpdir + '/assoc/'+re.split('\/',z)[-1] +'.assd' for z in files]) print 'match_many| files=',files print 'match_many| files_input=',files_input,' files_output=', files_output command_associate = progs_path['p_associate']+' -i %(inputcats)s -o %(outputcats)s -t OBJECTS -c %(bonn)s/photconf/fullphotom.alpha.associate' % {'inputcats':files_input,'outputcats':files_output, 'bonn':os.environ['bonn']} print 'match_many| command_associate=',command_associate ooo=os.system(command_associate) if ooo!=0: raise Exception("the line os.system(command_associate) failed\ncommand_associate="+command_associate) print 'match_many| associated' outputcat = tmpdir + '/final_'+PPRUN+'.cat' command_make_ssc = '%(p_make_ssc)s -i %(inputcats)s -o %(outputcat)s -t OBJECTS -c %(tmpdir)s/tmp.ssc ' % {'p_make_ssc':progs_path['p_makessc'],'tmpdir': tmpdir, 'inputcats':files_output,'outputcat':outputcat} print 'match_many| command_make_ssc=',command_make_ssc ooo=os.system(command_make_ssc) print 'match_many| adam-look program=%(p_make_ssc)s wrote outputcat=%(outputcat)s' % {'p_make_ssc':progs_path['p_makessc'],'outputcat':outputcat} if ooo!=0: raise Exception("the line os.system(command_make_ssc) failed\ncommand_make_ssc="+command_make_ssc) print 'match_many| DONE with func' #adam-watch# checkout what's going on with `make_ssc_config_few` func and make_ssc command def make_ssc_config_few(input_list): '''inputs: input_list purpose: writes key values to the file `tmpdir + '/tmp.ssc'` calls: called_by: match_many''' print 'make_ssc_config_few| START the func. inputs: input_list=',input_list ofile = tmpdir + '/tmp.cat' out = open(tmpdir + '/tmp.ssc','w') key_list = ['CHIP','Flag','MAG_AUTO','MAGERR_AUTO','MAG_APER2','MAGERR_APER2','Xpos','Ypos','Xpos_ABS','Ypos_ABS','CLASS_STAR','MaxVal','BackGr','stdMag_corr','stdMagErr_corr','stdMagColor_corr','stdMagClean_corr','stdMagStar_corr','Star_corr','ALPHA_J2000','DELTA_J2000'] keys = [] i = -1 for file_name,prefix,rot in input_list: i += 1 print 'make_ssc_config_few| file_name=',file_name print 'make_ssc_config_few| RUNNING: ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile os.system('ldacdesc -t OBJECTS -i ' + file_name + ' > ' + ofile) file = open(ofile,'r').readlines() for line in file: if string.find(line,"Key name") != -1 : red = re.split('\.+',line) key = red[1].replace(' ','').replace('\n','') out_key = prefix + key if reduce(lambda x,y: x+ y, [string.find(out_key,k)!=-1 for k in key_list]): out.write("COL_NAME = " + out_key + '\nCOL_INPUT = ' + key + '\nCOL_MERGE = AVE_REG\nCOL_CHAN = ' + str(i) + "\n#\n") #print ' key=',key keys.append(key) out.close() print "make_ssc_config_few| DONE with func" def getTableInfo(PPRUN,exclude=[]): #simple #step3 '''purpose: returns a dict with keys=rotations, and values=images corresponding to those rotations returns: ROTS calls: called_by: match_OBJNAME,linear_fit''' print '\ngetTableInfo| START the func. No inputs for this func!' p = pyfits.open(tmpdir + '/final_'+PPRUN+'.cat') #final_'+PPRUN+'.cat is the output from match_many func #tbdata = p[1].data ; types = [] ; KEYS = {} ROTS = {} for column in p[1].columns: if string.find(column.name,'$') != -1: res = re.split('\$',column.name) ROT = res[0] IMAGE = res[1] if IMAGE in exclude: continue if not ROTS.has_key(ROT): ROTS[ROT] = [] if not len(filter(lambda x:x==IMAGE,ROTS[ROT])): ROTS[ROT].append(IMAGE) print "getTableInfo| DONE with func\n" return ROTS def find_config(GID): #simple '''inputs: GID returns: CONFIG_IM purpose: based on GABODSID, figure out which configuration the images are from calls: called_by: match_OBJNAME,match_OBJNAME''' #print 'find_config| START the func. inputs: GID=',GID config_list = [[575,691,'8'],[691,871,'9'],[817,1309,'10_1'],[1309,3470,'10_2'],[3470,7000,'10_3']] CONFIG_IM = None for config in config_list: if config[0] < GID < config[1]: CONFIG_IM = config[2] break if config is None: raise Exception('find_config: no configuration found for GID=%s, may need to define a new configuration for your data' % (GID) ) #print "find_config| DONE with func" return CONFIG_IM def selectGoodStars(EXPS,match,LENGTH1,LENGTH2,CONFIG,PPRUN): #intermediate #step3 '''inputs: EXPS,match,LENGTH1,LENGTH2,CONFIG returns: EXPS, star_good(=list of indicies of "good stars"), supas(=list of "good star" info dicts), totalstars(=# of "good stars"), mdn_background(=median background of exposures) purpose: find the quality detections in "final_'+PPRUN+'.cat". Here are some filtering things that happen: 1.) I exclude exposures with <300 good stars and ROTations with <2 exposures 2.) calculate the "mag"(mag=zp-magnitude ; w/ zp=median({magnitudes in exposure}) for each star 3.) quality stars identified by: (1) in at least two exposures w/ consistent magnitudes (2) in proper flux/magnitude range (3) within the center of the RADIAL ring portion of the image and without any flags at that point (4) with fairly certain magnitudes (Mag_err<.1) if match==True: see if there is an "external ref cat" match
from sklearn.ensemble import RandomForestRegressor from sklearn.datasets import make_regression import pandas as pd import numpy as np from sklearn import preprocessing from sklearn.model_selection import train_test_split import numpy as np, tensorflow as tf from sklearn.preprocessing import OneHotEncoder import os import csv import gc from sklearn.metrics import mean_squared_error import math from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import ConstantKernel, RBF from sklearn.gaussian_process.kernels import DotProduct, WhiteKernel from sklearn.gaussian_process.kernels import RationalQuadratic from sklearn.model_selection import GridSearchCV from sklearn.model_selection import RepeatedKFold from sklearn import linear_model from xgboost.sklearn import XGBRegressor from sklearn.decomposition import PCA import copy import pyflux as pf import datetime os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' PRICED_BITCOIN_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/pricedBitcoin2009-2018.csv" DAILY_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/dailyOccmatrices/" betti0_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_0(100).csv" betti1_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_1(100).csv" DAILY_FILTERED_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/filteredDailyOccMatrices/" ROW = -1 COLUMN = -1 TEST_SPLIT = 0.01 ALL_YEAR_INPUT_ALLOWED = False YEAR = 2017 START_YEAR = 2017 END_YEAR = 2018 SLIDING_BATCH_SIZE = 7 # Baseline from sklearn.metrics import mean_squared_error from sklearn import metrics import matplotlib.pyplot as plt def exclude_days(train, test): row, column = train.shape train_days = np.asarray(train[:, -1]).reshape(-1, 1) x_train = train[:, 0:column - 1] test_days = np.asarray(test[:, -1]).reshape(-1, 1) x_test = test[:, 0:column - 1] return x_train, x_test, train_days, test_days def merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed): if(aggregation_of_previous_days_allowed): if(occurrence_data.size==0): occurrence_data = daily_occurrence_normalized_matrix else: occurrence_data = np.add(occurrence_data, daily_occurrence_normalized_matrix) else: if(occurrence_data.size == 0): occurrence_data = daily_occurrence_normalized_matrix else: occurrence_data = np.concatenate((occurrence_data, daily_occurrence_normalized_matrix), axis=0) #print("merge_data shape: {} occurrence_data: {} ".format(occurrence_data.shape, occurrence_data)) return occurrence_data def get_normalized_matrix_from_file(day, year, totaltx): daily_occurrence_matrix_path_name = DAILY_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + '.csv' daily_occurence_matrix = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx def fl_get_normalized_matrix_from_file(day, year, totaltx, n_components): daily_occurence_matrix = np.asarray([],dtype=np.float32) for filter_number in range(0, 50, 10): daily_occurrence_matrix_path_name = DAILY_FILTERED_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + "_" + str(filter_number) +'.csv' daily_occurence_matrix_read = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values if(daily_occurence_matrix.size == 0): daily_occurence_matrix = daily_occurence_matrix_read else: daily_occurence_matrix = np.concatenate((daily_occurence_matrix, daily_occurence_matrix_read), axis = 1) #print("daily_occurence_matrix: ", daily_occurence_matrix, daily_occurence_matrix.shape) #return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size) def get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed): #print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape) #print("current_row: ", current_row, current_row.shape) previous_price_data = np.array([], dtype=np.float32) occurrence_data = np.array([], dtype=np.float32) for index, row in priced_bitcoin.iterrows(): if not ((row.values == current_row.values).all()): previous_price_data = np.append(previous_price_data, row['price']) previous_price_data = np.append(previous_price_data, row['totaltx']) #print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx']) #print("occurrence_data: ", occurrence_data) if(is_price_of_previous_days_allowed): #print("previous_price_data: ", np.asarray(previous_price_data).reshape(1, -1), np.asarray(previous_price_data).reshape(1, -1).shape) occurrence_data = np.asarray(previous_price_data).reshape(1, -1) occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1) #print("current_row: ", current_row, current_row.shape) #print(" price occurrence_input: ", np.asarray(current_row['price']).reshape(1,1), (np.asarray(current_row['price']).reshape(1,1)).shape) #print("concatenate with price occurrence_input: ", occurrence_input, occurrence_input.shape) occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1) #print(" price occurrence_input: ", np.asarray(current_row['day']).reshape(1,1), (np.asarray(current_row['day']).reshape(1,1)).shape) #print("concatenate with day occurrence_input: ", occurrence_input, occurrence_input.shape) return occurrence_input def betti_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed): previous_price_data = np.array([], dtype=np.float32) occurrence_data = np.array([], dtype=np.float32) for index, row in priced_bitcoin.iterrows(): if not ((row.values == current_row.values).all()): previous_price_data = np.append(previous_price_data, row['price']) previous_price_data = np.append(previous_price_data, row['totaltx']) betti0_50 = read_betti(betti0_input_path, row['day']) occurrence_data = np.append(occurrence_data, np.asarray(betti0_50).reshape(1,-1)) betti1_50 = read_betti(betti1_input_path, row['day']) occurrence_data = np.append(occurrence_data, np.asarray(betti1_50).reshape(1,-1)) if (is_price_of_previous_days_allowed): occurrence_data = np.concatenate((np.asarray(previous_price_data).reshape(1,-1),occurrence_data.reshape(1,-1)), axis=1) occurrence_input = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(current_row['price']).reshape(1,1)), axis=1) occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1) #print("occurrence_input: ",occurrence_input, occurrence_input.shape) return occurrence_input def betti_der_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed): previous_price_data = np.array([], dtype=np.float32) occurrence_data = np.array([], dtype=np.float32) for index, row in priced_bitcoin.iterrows(): if not ((row.values == current_row.values).all()): previous_price_data = np.append(previous_price_data, row['price']) previous_price_data = np.append(previous_price_data, row['totaltx']) betti0_50 = read_betti(betti0_input_path, row['day']) occurrence_data = np.append(occurrence_data, np.asarray(betti0_50).reshape(1,-1)) betti1_50 = read_betti(betti1_input_path, row['day']) occurrence_data = np.append(occurrence_data, np.asarray(betti1_50).reshape(1,-1)) betti0_50_diff1 = betti0_50.diff(1).dropna() occurrence_data = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(betti0_50_diff1).reshape(1,-1)), axis=1) betti1_50_diff1 = betti1_50.diff(1).dropna() occurrence_data = np.concatenate((occurrence_data, np.asarray(betti1_50_diff1).reshape(1,-1)), axis=1) #print("previous_price_data:",previous_price_data, previous_price_data.shape) if (is_price_of_previous_days_allowed): occurrence_data = np.concatenate((np.asarray(previous_price_data).reshape(1,-1),occurrence_data.reshape(1,-1)), axis=1) occurrence_input = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(current_row['price']).reshape(1,1)), axis=1) occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1) #print("occurrence_input: ",occurrence_input, occurrence_input.shape) return occurrence_input def fl_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed): previous_price_data = np.array([], dtype=np.float32) occurrence_data = np.array([], dtype=np.float32) for index, row in priced_bitcoin.iterrows(): if not ((row.values == current_row.values).all()): previous_price_data = np.append(previous_price_data, row['price']) previous_price_data = np.append(previous_price_data, row['totaltx']) daily_occurrence_normalized_matrix = fl_get_normalized_matrix_from_file(row['day'], row['year'], row['totaltx'], 20) occurrence_data = merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed) #print("occurrence_data: ",occurrence_data, occurrence_data.shape) if (is_price_of_previous_days_allowed): occurrence_data = np.concatenate((np.asarray(previous_price_data).reshape(1,-1),occurrence_data.reshape(1,-1)), axis=1) occurrence_input = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(current_row['price']).reshape(1,1)), axis=1) occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1) #print("occurrence_input: ",occurrence_input, occurrence_input.shape) return occurrence_input def read_betti(file_path, day): day = day - 1 betti = pd.read_csv(file_path, index_col=0) try: betti_50 = betti.iloc[day, 0:50] except: print("day:", day) return betti_50 def rf_mode(train_input, train_target, test_input, test_target): param = { 'n_estimators':400 } rf_regression = RandomForestRegressor(**param) rf_regression.fit(train_input, train_target.ravel() ) rf_predicted = rf_regression.predict(test_input) return rf_predicted def gp_mode(train_input, train_target, test_input, test_target): param = { 'kernel': DotProduct() + WhiteKernel(), 'n_restarts_optimizer': 2 } gpr = GaussianProcessRegressor(**param) gpr.fit(train_input,train_target.ravel()) gp_predicted = gpr.predict(test_input) return gp_predicted def enet_mode(train_input, train_target, test_input, test_target): param = { 'alpha': 10, 'l1_ratio': 1, } elastic = linear_model.ElasticNet(**param) elastic.fit(train_input,train_target.ravel()) enet_predicted = elastic.predict(test_input) return enet_predicted def xgbt_mode(train_input, train_target, test_input, test_target): param = { 'n_estimators':1000, 'learning_rate': 0.01, 'objective': 'reg:squarederror', } xgbt = XGBRegressor(**param) xgbt.fit(train_input,train_target.ravel()) xgbt_predicted = xgbt.predict(test_input) return xgbt_predicted def arimax_initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed): data = preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed) train = data[0:100, :] test = data[100:100+prediction_horizon, :] x_train, x_test, train_days, test_days = exclude_days(train, test) row, column = x_train.shape train_target = np.asarray(x_train[:, -1]).reshape(-1) train_input = x_train[:, 0:column - 1] test_target = x_test[: , -1] test_input = x_test[ : , 0:column - 1] return train_input, train_target, test_input, test_target, train_days, test_days def arimax_base_rmse_mode(train_input, train_target, test_input, test_target): train_input_diff_arr = np.array([]) train_columns_name = [] train_input_column = int(train_input.shape[1]) for i in range(train_input_column): if(i%2==0): train_columns_name.append('price_' + str(i)) else: train_columns_name.append('totaltx_' + str(i)) train_input_diff = np.diff(train_input[:,i] ) if i == 0: train_input_diff_arr = train_input_diff else: train_input_diff_arr = np.dstack((train_input_diff_arr, train_input_diff)) columns_name = copy.deepcopy(train_columns_name) columns_name.append('current_price') train_target_diff = np.diff(train_target ) train_input_diff_arr = np.dstack((train_input_diff_arr, train_target_diff)) train_input_diff_arr = pd.DataFrame(train_input_diff_arr[0], columns = columns_name) model = pf.ARIMAX(data=train_input_diff_arr,formula="current_price~totaltx_5",ar=1,ma=2,integ=0) model_1 = model.fit("MLE") model_1.summary() test_input_pd = pd.DataFrame(test_input, columns = train_columns_name) test_target_pd = pd.DataFrame(test_target, columns = ['current_price']) test_input_target = pd.concat([test_input_pd, test_target_pd], axis=1) pred = model.predict(h=test_input_target.shape[0], oos_data=test_input_target, intervals=True, ) arimax_base_rmse = mean_squared_error([test_input_target.iloc[0, 6]],[(train_target[99])+pred.current_price[99]]) print("arimax_base_rmse:",arimax_base_rmse) return arimax_base_rmse def run_print_model(train_input, train_target, test_input, test_target, train_days, test_days): rf_prediction = rf_mode(train_input, train_target, test_input, test_target) xgbt_prediction = xgbt_mode(train_input, train_target, test_input, test_target) gp_prediction = gp_mode(train_input, train_target, test_input, test_target) enet_prediction = enet_mode(train_input, train_target, test_input, test_target) return rf_prediction, xgbt_prediction, gp_prediction, enet_prediction #print_results(predicted, test_target, original_log_return, predicted_log_return, cost, test_days, rmse) #return rf_base_rmse def filter_data(priced_bitcoin, window_size): end_day_of_previous_year = max(priced_bitcoin[priced_bitcoin['year'] == START_YEAR-1]["day"].values) start_index_of_previous_year = end_day_of_previous_year - window_size previous_year_batch = priced_bitcoin[(priced_bitcoin['year'] == START_YEAR-1) & (priced_bitcoin['day'] > start_index_of_previous_year)] input_batch = priced_bitcoin[(priced_bitcoin['year'] >= START_YEAR) & (priced_bitcoin['year'] <= END_YEAR)] filtered_data = previous_year_batch.append(input_batch) filtered_data.insert(0, 'index', range(0, len(filtered_data))) filtered_data = filtered_data.reset_index(drop=True) return filtered_data def preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed): priced_bitcoin = pd.read_csv(PRICED_BITCOIN_FILE_PATH, sep=",") if(ALL_YEAR_INPUT_ALLOWED): pass else: #priced_bitcoin = filter_data(priced_bitcoin, window_size) priced_bitcoin = priced_bitcoin[priced_bitcoin['year']==YEAR].reset_index(drop=True) #print("priced_bitcoin:",priced_bitcoin) daily_occurrence_input = np.array([],dtype=np.float32) temp = np.array([], dtype=np.float32) for current_index, current_row in priced_bitcoin.iterrows(): if(current_index<(window_size+prediction_horizon-1)): pass else: start_index = current_index - (window_size + prediction_horizon) + 1 end_index = current_index - prediction_horizon if(dataset_model=="base"): temp = get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed) elif(dataset_model=="betti"): temp = betti_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed) elif(dataset_model=="fl"): temp = fl_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed) elif(dataset_model=="betti_der"): temp = betti_der_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed) else: sys.exit("Dataset model support only baseline, betti, fl and betti_der!") if(daily_occurrence_input.size == 0): daily_occurrence_input = temp else: daily_occurrence_input = np.concatenate((daily_occurrence_input, temp), axis=0) return daily_occurrence_input def initialize_setting( features, price, day, test_start, dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed): train_target = price[test_start : test_start + 100] train_days = day[test_start : test_start + 100] pca_features = features[test_start : test_start + 100+1, :] train_input = pca_features[0 : 100, :] #x_test = x_test.reshape(-1,1) test_target = price[test_start + 100] test_days = day[test_start + 100] #print(pca_features, pca_features.shape) test_input = pca_features[ 100, :].reshape(1, -1) #print(pca_features, pca_features.shape) #print("***"*20) #print(train_input,train_input.shape, train_target, test_input,test_input.shape, test_target, train_days, test_days) return train_input, train_target, test_input, test_target, train_days, test_days def split_process(data,dataset_model,window_size): baseline_features = data[:, 0:window_size*2] fl_features = data[: , window_size*2:-2] price = data[:, -2] day = data[:,-1] return baseline_features, fl_features, price, day parameter_dict = {#0: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':True})} 1: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':False})} for step in parameter_dict: t = datetime.datetime.now() dir_name = t.strftime('%m_%d___%H_%M') drive_path = "drive/MyDrive/Colab Notebooks/ChainNet/processed_data/"+dir_name if not os.path.exists(dir_name): os.makedirs(drive_path) print("drive_path: ", drive_path) result_path = drive_path + "/" names = locals() gc.collect() evalParameter = parameter_dict.get(step) is_price_of_previous_days_allowed = evalParameter.get('is_price_of_previous_days_allowed') aggregation_of_previous_days_allowed = evalParameter.get('aggregation_of_previous_days_allowed') print("IS_PRICE_OF_PREVIOUS_DAYS_ALLOWED: ", is_price_of_previous_days_allowed) print("AGGREGATION_OF_PREVIOUS_DAYS_ALLOWED: ", aggregation_of_previous_days_allowed) window_size_array = [3, 5, 7] horizon_size_array = [1, 2, 5, 7, 10, 15, 20, 25, 30] dataset_model_array = ["base","betti", "betti_der","fl"] for dataset_model in dataset_model_array: print('dataset_model: ', dataset_model) for window_size in window_size_array: print('WINDOW_SIZE: ', window_size) for prediction_horizon in horizon_size_array: data = preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed) baseline_features, fl_features, price, day = split_process(data, dataset_model, window_size) if dataset_model == "base": #print("features: ", baseline_features, fl_features, price, day) features = baseline_features #print("pca features: ", features, features.shape) else: #print("features: ", baseline_features, fl_features, price, day) pca = PCA(n_components = 20) pca.fit(fl_features) fl_features =
if task_id: proc.submitted_at = datetime.datetime.utcnow() def get_panda_task_id(self, processing): from pandatools import Client start_time = datetime.datetime.utcnow() - datetime.timedelta(hours=10) start_time = start_time.strftime('%Y-%m-%d %H:%M:%S') status, results = Client.getJobIDsJediTasksInTimeRange(start_time, task_type=self.task_type, verbose=False) if status != 0: self.logger.warn("Error to poll latest tasks in last ten hours: %s, %s" % (status, results)) return None proc = processing['processing_metadata']['processing'] task_id = None for req_id in results: task_name = results[req_id]['taskName'] if proc.workload_id is None and task_name == self.task_name: task_id = results[req_id]['jediTaskID'] # processing['processing_metadata']['task_id'] = task_id # processing['processing_metadata']['workload_id'] = task_id proc.workload_id = task_id if task_id: proc.submitted_at = datetime.datetime.utcnow() return task_id def poll_panda_task_status(self, processing): if 'processing' in processing['processing_metadata']: from pandatools import Client proc = processing['processing_metadata']['processing'] status, task_status = Client.getTaskStatus(proc.workload_id) if status == 0: return task_status else: return 'failed' return None def get_processing_status_from_panda_status(self, task_status): if task_status in ['registered', 'defined', 'assigning']: processing_status = ProcessingStatus.Submitting elif task_status in ['ready', 'pending', 'scouting', 'scouted', 'prepared', 'topreprocess', 'preprocessing']: processing_status = ProcessingStatus.Submitted elif task_status in ['running', 'toretry', 'toincexec', 'throttled']: processing_status = ProcessingStatus.Running elif task_status in ['done']: processing_status = ProcessingStatus.Finished elif task_status in ['finished', 'paused']: # finished, finishing, waiting it to be done processing_status = ProcessingStatus.SubFinished elif task_status in ['failed', 'aborted', 'broken', 'exhausted']: # aborting, tobroken processing_status = ProcessingStatus.Failed else: # finished, finishing, aborting, topreprocess, preprocessing, tobroken # toretry, toincexec, rerefine, paused, throttled, passed processing_status = ProcessingStatus.Submitted return processing_status def is_all_contents_terminated_and_with_missing(self, input_output_maps): with_missing = False for map_id in input_output_maps: outputs = input_output_maps[map_id]['outputs'] for content in outputs: if not content['status'] in [ContentStatus.Failed, ContentStatus.FinalFailed, ContentStatus.Lost, ContentStatus.Deleted, ContentStatus.Missing]: return False if not with_missing and content['status'] in [ContentStatus.Missing]: with_missing = True if with_missing: return True return False def reactive_contents(self, input_output_maps): updated_contents = [] for map_id in input_output_maps: inputs = input_output_maps[map_id]['inputs'] if 'inputs' in input_output_maps[map_id] else [] outputs = input_output_maps[map_id]['outputs'] if 'outputs' in input_output_maps[map_id] else [] inputs_dependency = input_output_maps[map_id]['inputs_dependency'] if 'inputs_dependency' in input_output_maps[map_id] else [] all_outputs_available = True for content in outputs: if not content['status'] in [ContentStatus.Available]: all_outputs_available = False break if not all_outputs_available: for content in inputs + outputs: update_content = {'content_id': content['content_id'], 'status': ContentStatus.New, 'substatus': ContentStatus.New} updated_contents.append(update_content) for content in inputs_dependency: if content['status'] not in [ContentStatus.Available]: update_content = {'content_id': content['content_id'], 'status': ContentStatus.New, 'substatus': ContentStatus.New} updated_contents.append(update_content) return updated_contents def sort_panda_jobids(self, input_output_maps): panda_job_ids = {} panda_id_to_map_ids = {} map_id_without_panda_ids = [] for map_id in input_output_maps: outputs = input_output_maps[map_id]['outputs'] for content in outputs: if content['status'] not in panda_job_ids: panda_job_ids[content['status']] = [] if 'panda_id' in content['content_metadata']: panda_job_ids[content['status']].append(content['content_metadata']['panda_id']) panda_id_to_map_ids[content['content_metadata']['panda_id']] = map_id else: map_id_without_panda_ids.append(map_id) return panda_job_ids, map_id_without_panda_ids, panda_id_to_map_ids def get_registered_panda_jobids(self, input_output_maps): panda_job_ids, map_id_without_panda_ids, panda_id_to_map_ids = self.sort_panda_jobids(input_output_maps) unterminated_panda_ids = [] finished_panda_ids = [] failed_panda_ids = [] for key in panda_job_ids: if key in [ContentStatus.Available]: finished_panda_ids += panda_job_ids[key] elif key in [ContentStatus.Failed, ContentStatus.FinalFailed, ContentStatus.Lost, ContentStatus.Deleted, ContentStatus.Missing]: failed_panda_ids += panda_job_ids[key] else: unterminated_panda_ids += panda_job_ids[key] return finished_panda_ids + failed_panda_ids, unterminated_panda_ids, map_id_without_panda_ids, panda_id_to_map_ids def get_map_id_from_input(self, input_output_maps, input_file): map_keys = list(input_output_maps.keys()) map_keys.reverse() for map_id in map_keys: inputs = input_output_maps[map_id]['inputs'] # outputs = input_output_maps[map_id]['outputs'] for content in inputs: if content['name'] == input_file: return map_id return None def get_content_status_from_panda_status(self, job_info): jobstatus = job_info.jobStatus if jobstatus in ['finished', 'merging']: return ContentStatus.Available elif jobstatus in ['failed', 'closed', 'cancelled', 'lost', 'broken', 'missing']: attempt_nr = int(job_info.attemptNr) if job_info.attemptNr else 0 max_attempt = int(job_info.maxAttempt) if job_info.maxAttempt else 0 if (attempt_nr >= max_attempt) and (attempt_nr >= self.maxAttempt): return ContentStatus.FinalFailed else: return ContentStatus.Failed else: return ContentStatus.Processing def get_update_contents_from_map_id(self, map_id, input_output_maps, job_info): outputs = input_output_maps[map_id]['outputs'] update_contents = [] for content in outputs: status = self.get_content_status_from_panda_status(job_info) content['substatus'] = status if 'panda_id' in content['content_metadata'] and content['content_metadata']['panda_id']: # if content['content_metadata']['panda_id'] != job_info.PandaID: if content['content_metadata']['panda_id'] < job_info.PandaID: # new panda id is the bigger one. if 'old_panda_id' not in content['content_metadata']: content['content_metadata']['old_panda_id'] = [] if content['content_metadata']['panda_id'] not in content['content_metadata']['old_panda_id']: content['content_metadata']['old_panda_id'].append(content['content_metadata']['panda_id']) content['content_metadata']['panda_id'] = job_info.PandaID update_contents.append(content) return update_contents def map_panda_ids(self, unregistered_job_ids, input_output_maps): self.logger.debug("map_panda_ids, unregistered_job_ids[:10]: %s" % str(unregistered_job_ids[:10])) from pandatools import Client # updated_map_ids = [] full_update_contents = [] chunksize = 2000 chunks = [unregistered_job_ids[i:i + chunksize] for i in range(0, len(unregistered_job_ids), chunksize)] for chunk in chunks: jobs_list = Client.getJobStatus(chunk, verbose=0)[1] for job_info in jobs_list: if job_info and job_info.Files and len(job_info.Files) > 0: for job_file in job_info.Files: # if job_file.type in ['log']: if job_file.type not in ['pseudo_input']: continue if ':' in job_file.lfn: pos = job_file.lfn.find(":") input_file = job_file.lfn[pos + 1:] # input_file = job_file.lfn.split(':')[1] else: input_file = job_file.lfn map_id = self.get_map_id_from_input(input_output_maps, input_file) if map_id: update_contents = self.get_update_contents_from_map_id(map_id, input_output_maps, job_info) full_update_contents += update_contents return full_update_contents def get_status_changed_contents(self, unterminated_job_ids, input_output_maps, panda_id_to_map_ids): self.logger.debug("get_status_changed_contents, unterminated_job_ids[:10]: %s" % str(unterminated_job_ids[:10])) from pandatools import Client full_update_contents = [] chunksize = 2000 chunks = [unterminated_job_ids[i:i + chunksize] for i in range(0, len(unterminated_job_ids), chunksize)] for chunk in chunks: jobs_list = Client.getJobStatus(chunk, verbose=0)[1] for job_info in jobs_list: panda_id = job_info.PandaID map_id = panda_id_to_map_ids[panda_id] update_contents = self.get_update_contents_from_map_id(map_id, input_output_maps, job_info) full_update_contents += update_contents return full_update_contents def get_final_update_contents(self, input_output_maps): update_contents = [] for map_id in input_output_maps: outputs = input_output_maps[map_id]['outputs'] if 'outputs' in input_output_maps[map_id] else [] for content in outputs: if (content['substatus'] not in [ContentStatus.Available, ContentStatus.FakeAvailable, ContentStatus.FinalFailed]): content['content_metadata']['old_final_status'] = content['substatus'] content['substatus'] = ContentStatus.FinalFailed update_contents.append(content) return update_contents def poll_panda_task_old(self, processing=None, input_output_maps=None): task_id = None try: from pandatools import Client jobs_ids = None if processing: proc = processing['processing_metadata']['processing'] task_id = proc.workload_id if task_id is None: task_id = self.get_panda_task_id(processing) if task_id: # ret_ids = Client.getPandaIDsWithTaskID(task_id, verbose=False) self.logger.debug("poll_panda_task, task_id: %s" % str(task_id)) task_info = Client.getJediTaskDetails({'jediTaskID': task_id}, True, True, verbose=False) self.logger.debug("poll_panda_task, task_info[0]: %s" % str(task_info[0])) if task_info[0] != 0: self.logger.warn("poll_panda_task %s, error getting task status, task_info: %s" % (task_id, str(task_info))) return ProcessingStatus.Submitting, {} task_info = task_info[1] processing_status = self.get_processing_status_from_panda_status(task_info["status"]) if processing_status in [ProcessingStatus.SubFinished]: if self.retry_number < self.num_retries: self.reactivate_processing(processing) processing_status = ProcessingStatus.Submitted self.retry_number += 1 jobs_ids = task_info['PandaID'] ret_get_registered_panda_jobids = self.get_registered_panda_jobids(input_output_maps) terminated_job_ids, unterminated_job_ids, map_id_without_panda_ids, panda_id_to_map_ids = ret_get_registered_panda_jobids registered_job_ids = terminated_job_ids + unterminated_job_ids unregistered_job_ids = [] for job_id in jobs_ids: if job_id not in registered_job_ids: unregistered_job_ids.append(job_id) map_update_contents = self.map_panda_ids(unregistered_job_ids, input_output_maps) status_changed_update_contents = self.get_status_changed_contents(unterminated_job_ids, input_output_maps, panda_id_to_map_ids) final_update_contents = [] if processing_status in [ProcessingStatus.SubFinished, ProcessingStatus.Finished, ProcessingStatus.Failed]: if (unregistered_job_ids or unterminated_job_ids): # there are still polling contents, should not terminate the task. log_warn = "Processing (%s) with panda id (%s) is %s, however there are still unregistered_job_ids(%s) or unterminated_job_ids(%s)" % (processing['processing_id'], task_id, processing_status, str(unregistered_job_ids), str(unterminated_job_ids)) log_warn = log_warn + ". Keep the processing status as running now." self.logger.warn(log_warn) processing_status = ProcessingStatus.Running else: final_update_contents = self.get_final_update_contents(input_output_maps) if final_update_contents: processing_status = ProcessingStatus.Running return processing_status, map_update_contents + status_changed_update_contents + final_update_contents else: return ProcessingStatus.Failed, {} except Exception as ex: msg = "Failed to check the processing (%s) status: %s" % (str(processing['processing_id']), str(ex)) self.logger.error(msg) self.logger.error(ex) self.logger.error(traceback.format_exc()) # raise exceptions.IDDSException(msg) return ProcessingStatus.Submitting, [] def poll_panda_jobs(self, job_ids): job_ids = list(job_ids) self.logger.debug("poll_panda_jobs, poll_panda_jobs_chunk_size: %s, job_ids[:10]: %s" % (self.poll_panda_jobs_chunk_size, str(job_ids[:10]))) from pandatools import Client # updated_map_ids = [] inputname_jobid_map = {} chunksize = self.poll_panda_jobs_chunk_size chunks = [job_ids[i:i + chunksize] for i in range(0, len(job_ids), chunksize)] for chunk in chunks: jobs_list = Client.getJobStatus(chunk, verbose=0)[1] self.logger.debug("poll_panda_jobs, input jobs: %s, output_jobs: %s" % (len(chunk), len(jobs_list))) for job_info in jobs_list: job_status = self.get_content_status_from_panda_status(job_info) if job_info and job_info.Files and len(job_info.Files) > 0: for job_file in job_info.Files: # if job_file.type in ['log']: if job_file.type not in ['pseudo_input']: continue if ':' in job_file.lfn: pos = job_file.lfn.find(":") input_file = job_file.lfn[pos + 1:] # input_file = job_file.lfn.split(':')[1] else: input_file = job_file.lfn inputname_jobid_map[input_file] = {'panda_id': job_info.PandaID, 'status': job_status} return inputname_jobid_map def get_job_maps(self, input_output_maps): inputname_mapid_map = {} finished_jobs, failed_jobs = [], [] for map_id in input_output_maps: inputs = input_output_maps[map_id]['inputs'] outputs = input_output_maps[map_id]['outputs'] outputs_short = [] for content in outputs: outputs_short.append({'content_id': content['content_id'], 'status': content['status'], 'substatus': content['substatus'], 'content_metadata': content['content_metadata']}) if content['status'] in [ContentStatus.Available]: if 'panda_id' in content['content_metadata']: finished_jobs.append(content['content_metadata']['panda_id']) elif content['status'] in [ContentStatus.Failed, ContentStatus.FinalFailed, ContentStatus.Lost, ContentStatus.Deleted, ContentStatus.Missing]: if 'panda_id' in content['content_metadata']: failed_jobs.append(content['content_metadata']['panda_id']) for content in inputs: inputname_mapid_map[content['name']] = {'map_id': map_id, 'outputs': outputs_short} return finished_jobs + failed_jobs, inputname_mapid_map def get_update_contents(self, inputnames, inputname_mapid_map, inputname_jobid_map): self.logger.debug("get_update_contents, inputnames[:5]: %s" % str(inputnames[:5])) update_contents = [] num_updated_contents, num_unupdated_contents = 0, 0 for inputname in inputnames: panda_id_status = inputname_jobid_map[inputname] panda_id = panda_id_status['panda_id'] panda_status = panda_id_status['status'] map_id_contents = inputname_mapid_map[inputname] contents = map_id_contents['outputs'] for content in contents: if content['substatus'] != panda_status: if 'panda_id' in content['content_metadata'] and content['content_metadata']['panda_id']: # if content['content_metadata']['panda_id'] != job_info.PandaID: if content['content_metadata']['panda_id'] < panda_id: # new panda
<reponame>HPCCS/PARIS import pdb import csv import numpy as np from numpy import linalg as LA from scipy import stats import matplotlib.pyplot as plt from sklearn import linear_model from sklearn.kernel_ridge import KernelRidge from sklearn.neural_network import MLPRegressor from sklearn import preprocessing from sklearn.neighbors import RadiusNeighborsRegressor from sklearn.model_selection import cross_validate from sklearn.cross_validation import cross_val_score, cross_val_predict from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.decomposition import PCA from sklearn.feature_selection import RFECV from sklearn import metrics from sklearn.svm import SVR,NuSVR,LinearSVR from sklearn.neighbors import KNeighborsRegressor from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import GridSearchCV from sklearn.ensemble import GradientBoostingRegressor,RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from sklearn.preprocessing import StandardScaler # ############################################################################# # reading in X, Y y=np.loadtxt(open("small_program_label.csv", "rb"), delimiter=",", skiprows=0, dtype="float") #y=np.loadtxt(open("y_labels_cut.csv", "rb"), delimiter=",", skiprows=0, dtype="float") #x=np.loadtxt(open("x_features_cut2.csv", "rb"), delimiter=",", skiprows=0,dtype="float") #x=np.loadtxt(open("small_program_features_90_unif.csv", "rb"), delimiter=",", skiprows=0,dtype="float") #x=np.loadtxt(open("small_program_features_90.csv", "rb"), delimiter=",", skiprows=0,dtype="float") x=np.loadtxt(open("small_program_features.csv", "rb"), delimiter=",", skiprows=0,dtype="float") #pdb.set_trace() # feature selection #lda=LinearDiscriminantAnalysis(n_components=15) #x_lda=lda.fit(x,y).transform(x) #pca=PCA(n_components=15) #x=pca.fit(x).transform(x) y_hpc=np.loadtxt(open("small_program_label_hpc.csv", "rb"), delimiter=",", skiprows=0, dtype="float") #x_hpc=np.loadtxt(open("x_hpc_features_cut.csv", "rb"), delimiter=",", skiprows=0,dtype="float") x_hpc=np.loadtxt(open("small_program_features_hpc.csv", "rb"), delimiter=",", skiprows=0,dtype="float") ##x_hpc=np.loadtxt(open("small_program_features_hpc_unif.csv", "rb"), delimiter=",", skiprows=0,dtype="float") #pca1=PCA(n_components=20) #x_hpc=pca1.fit(x_hpc).transform(x_hpc) z_scaler = StandardScaler() #x=z_scaler.fit_transform(x) #x_hpc=z_scaler.fit_transform(x_hpc) # ############################################################################# # Fitting Regressions and an Original Linear Regression for comparison # Least Square Linear Regression #''' #ridgereg=linear_model.Ridge(alpha=.5) #ridgereg.fit(x,y) plt.figure(1) print "Least Square Linear Regression is On ..." oreg=linear_model.LinearRegression() #selector=RFECV(oreg,step=1,cv=5) #selector=selector.fit(x,y) #print selector.support_ #print selector.ranking_ #print selector.n_features_ #print selector.grid_scores_ #print selector.estimator oreg.fit(x,y) print oreg.coef_ osc=oreg.score(x,y) osc_hpc=oreg.score(x_hpc,y_hpc) print "R2 score is: ", osc print "R2 score on hpc is: ", osc_hpc o_cv_res=cross_val_score(oreg,x,y,cv=10) print o_cv_res predictions = cross_val_predict(oreg,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) print predictions plt.scatter(y, predictions) plt.title("Least Square Linear") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=oreg.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "Least Square Linear Regression is Off ..." print "\n\n" # Ridge Regression plt.figure(2) print "Ridge Regression is On ..." rr=linear_model.Ridge(alpha=0.5) rr.fit(x,y) print rr.coef_ rrsc=rr.score(x,y) rrsc_hpc=rr.score(x_hpc,y_hpc) print "R2 score is: ", rrsc print "R2 score on hpc is: ", rrsc_hpc rr_cv_res=cross_val_score(rr,x,y,cv=10) print rr_cv_res predictions = cross_val_predict(rr,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Ridge Regression") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=rr.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "Ridge Regression is Off ..." print "\n\n" # Support Vector Regression *3* #svr=SVR(kernel='linear') #selector=RFECV(svr,step=1,cv=5) #selector=selector.fit(x,y) #print selector.support_ #print selector.ranking_ #pdb.set_trace() #plt.figure(0) print "SV Regression is On ..." C_range = np.arange(0, 1, 1) gamma_range = np.logspace(-1, 0, 2) degree_range = np.arange(1,2,1) nu_range=np.arange(0.01,1,0.01) for C in [0]: for nu in [3]: # for degree in degree_range: # print "**************************************" # print "C: ", C # print "gamma: ", gamma print "nu: ", nu #sv=SVR(kernel='rbf', C=120, gamma=1e-11) #sv=NuSVR(nu=0.5, kernel='rbf', C=120, gamma=1e-11) #sv=LinearSVR(C=1000, max_iter=1500,loss='epsilon_insensitive',epsilon=0.1) # #sv=SVR(kernel='sigmoid', C=C, gamma=gamma) sv=SVR() sv.fit(x,y) #print sv.coef_ svsc=sv.score(x,y) # svsc_hpc=sv.score(x_hpc,y_hpc) print "R2 score is: ", svsc # print "R2 score on hpc is: ", svsc_hpc sv_cv_res=cross_val_score(sv,x,y,cv=10) #print sv_cv_res predictions = cross_val_predict(sv,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) #plt.scatter(y, predictions) #plt.title("SV Regression") #plt.xlabel("True Values") #plt.ylabel("Predictions") #plt.show() predictions_h=sv.predict(x_hpc) #np.clip(predictions_h,0,1,out=predictions_h) # accuracy = metrics.r2_score(y, predictions) # print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error: ", err print "Cross-Predicted Error for hpc: ", err_h print "SV Regression is Off ..." print "\n\n" # Decision Tree Regressor *2* #plt.figure(11) print "Decision Tree Regression is On ..." lrate = np.arange(0.015, 0.03, 0.001) al = np.arange(0.1,0.9,0.1) n_est = np.arange(90, 120, 1) depth=np.arange(1,8,1) for max_depth in [1]: for learning_rate in [1]: print "**************************************" # print "lrate: ", learning_rate # print "alpha: ", alpha print "depth: ", max_depth #dt=DecisionTreeRegressor(max_depth=2,criterion="friedman_mse",splitter="random",min_samples_split=4) dt=DecisionTreeRegressor() dt.fit(x,y) #print gb.coef_ dtsc=dt.score(x,y) print "R2 score is: ", dtsc dtsc_hpc=dt.score(x_hpc,y_hpc) print "R2 score on hpc is: ", dtsc_hpc dt_cv_res=cross_val_score(dt,x,y,cv=10) print dt_cv_res predictions = cross_val_predict(dt,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) # plt.scatter(y, predictions) # plt.title("GradientBoostingRegressor") # plt.xlabel("True Values") # plt.ylabel("Predictions") # plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy predictions_h=dt.predict(x_hpc) #np.clip(predictions_h,0,1,out=predictions_h) err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "Gradient Boosting Regression is Off ..." print "\n\n" # Passive Aggressive Regressor #plt.figure(12) print "Passive Aggressive Regression is On ..." lrate = np.arange(0.015, 0.03, 0.001) al = np.arange(0.1,0.9,0.1) n_est = np.arange(90, 120, 1) depth=np.arange(1,8,1) for max_depth in [1]: for learning_rate in [1]: print "**************************************" # print "lrate: ", learning_rate # print "alpha: ", alpha # print "depth: ", max_depth #pa=linear_model.PassiveAggressiveRegressor(C=0.01,loss="epsilon_insensitive",max_iter=2000) pa=linear_model.PassiveAggressiveRegressor() pa.fit(x,y) #print pa.coef_ pasc=pa.score(x,y) print "R2 score is: ", pasc pasc_hpc=pa.score(x_hpc,y_hpc) print "R2 score on hpc is: ", pasc_hpc pa_cv_res=cross_val_score(pa,x,y,cv=10) print pa_cv_res predictions = cross_val_predict(pa,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) # plt.scatter(y, predictions) # plt.title("GradientBoostingRegressor") # plt.xlabel("True Values") # plt.ylabel("Predictions") # plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy predictions_h=pa.predict(x_hpc) #np.clip(predictions_h,0,1,out=predictions_h) err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "Passive Aggressive Regression is Off ..." print "\n\n" #''' and None # Gradient Boosting Regressor *1* #plt.figure(10) print "Gradient Boosting Regression is On ..." lrate = np.arange(0.019, 0.031, 0.001) al = np.arange(0.1,0.9,0.01) n_est = np.arange(90, 125, 1) for n_estimators in [0]: for alpha in [0]: print "**************************************" # print "lrate: ", learning_rate print "alpha: ", alpha print "n_est: ", n_estimators #gb=GradientBoostingRegressor(max_depth=1,learning_rate=0.04,n_estimators=100) #gb=GradientBoostingRegressor(loss='lad',max_depth=1,learning_rate=0.05,n_estimators=440) # -x- gb=GradientBoostingRegressor(loss='huber',max_depth=1,learning_rate=0.45,n_estimators=200,alpha=alpha) #gb=GradientBoostingRegressor(loss='quantile',max_depth=1,learning_rate=0.028,n_estimators=109,alpha=0.36,criterion="friedman_mse") #gb=GradientBoostingRegressor(loss='quantile',max_depth=1,learning_rate=0.028,n_estimators=109,alpha=0.36,criterion="friedman_mse",subsample=0.6) gb=GradientBoostingRegressor() gb.fit(x,y) gbsc=gb.score(x,y) print "R2 score is: ", gbsc gbsc_hpc=gb.score(x_hpc,y_hpc) print "R2 score on hpc is: ", gbsc_hpc gb_cv_res=cross_val_score(gb,x,y,cv=10) print gb_cv_res predictions = cross_val_predict(gb,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) # plt.scatter(y, predictions) # plt.title("GradientBoostingRegressor") # plt.xlabel("True Values") # plt.ylabel("Predictions") # plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy predictions_h=gb.predict(x_hpc) #np.clip(predictions_h,0,1,out=predictions_h) err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print predictions_h print "Cross-Predicted Error for hpc: ", err_h print "Gradient Boosting Regression is Off ..." print "\n\n" # Random Forest Regressor #plt.figure(13) print "Random Forest Regression is On ..." lrate = np.arange(0.019, 0.031, 0.001) al = np.arange(0.1,0.9,0.01) n_est = np.arange(10, 100, 2) for n_estimators in [1]: for alpha in [0]: print "**************************************" # print "lrate: ", learning_rate # print "alpha: ", alpha print "n_est: ", n_estimators #gb=GradientBoostingRegressor(max_depth=1,learning_rate=0.04,n_estimators=100) #gb=GradientBoostingRegressor(loss='lad',max_depth=1,learning_rate=0.05,n_estimators=440) # -x- gb=GradientBoostingRegressor(loss='huber',max_depth=1,learning_rate=0.45,n_estimators=200,alpha=alpha) #gb=GradientBoostingRegressor(loss='quantile',max_depth=1,learning_rate=0.028,n_estimators=109,alpha=0.36,criterion="friedman_mse") #rf=RandomForestRegressor(n_estimators=88) rf=RandomForestRegressor() rf.fit(x,y) #print rf.coef_ rfsc=rf.score(x,y) print "R2 score is: ", rfsc rfsc_hpc=rf.score(x_hpc,y_hpc) print "R2 score on hpc is: ", rfsc_hpc rf_cv_res=cross_val_score(rf,x,y,cv=10) print rf_cv_res predictions = cross_val_predict(rf,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) # plt.scatter(y, predictions) # plt.title("GradientBoostingRegressor") # plt.xlabel("True Values") # plt.ylabel("Predictions") # plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy predictions_h=rf.predict(x_hpc) #np.clip(predictions_h,0,1,out=predictions_h) err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print predictions_h print "Gradient Boosting Regression is Off ..." print "\n\n" #''' # Lasso plt.figure(3) print "Lasso Regression is On ..." #la=linear_model.Lasso(alpha=0.1) la=linear_model.Lasso() la.fit(x,y) print la.coef_ lasc=la.score(x,y) print "R2 score is: ", lasc lasc_hpc=la.score(x_hpc,y_hpc) print "R2 score on hpc is: ", lasc_hpc la_cv_res=cross_val_score(la,x,y,cv=10) print la_cv_res predictions = cross_val_predict(la,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Lasso") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=la.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "Lasso Regression is Off ..." print "\n\n" # Elastic Net plt.figure(4) print "Elastic Net Regression is On ..." #en=linear_model.ElasticNet(alpha=0.5,l1_ratio=0.5) en=linear_model.ElasticNet() en.fit(x,y) print(en.coef_) ensc=en.score(x,y) print "R2 score is: ", ensc ensc_hpc=en.score(x_hpc,y_hpc) print "R2 score on hpc is: ", ensc_hpc en_cv_res=cross_val_score(en,x,y,cv=10) print en_cv_res predictions = cross_val_predict(en,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Elastic Net") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=en.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "Elastic Net Regression is Off ..." print "\n\n" # Bayesian Ridge Lasso plt.figure(5) print "Bayesian Ridge Regression is On ..." #br=linear_model.BayesianRidge(compute_score=True) br=linear_model.BayesianRidge() br.fit(x,y) print br.coef_ brsc=br.score(x,y) print "R2 score is: ", brsc brsc_hpc=br.score(x_hpc,y_hpc) print "R2 score on hpc is: ", brsc_hpc br_cv_res=cross_val_score(br,x,y,cv=10) print br_cv_res predictions = cross_val_predict(br,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Bayesian Ridge") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=br.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "Bayesian Ridge Regression is Off ..." print "\n\n" # RANSAC Regression plt.figure(6) print "RANSAC Regression is On ..." # rr is replacable #RR=linear_model.RANSACRegressor(rr, random_state=42) RR=linear_model.RANSACRegressor() RR.fit(x,y) #print RR.coef_ RRsc=RR.score(x,y) print "R2 score is: ", RRsc RRsc_hpc=RR.score(x_hpc,y_hpc) print "R2 score on hpc is: ", RRsc_hpc RR_cv_res=cross_val_score(RR,x,y,cv=10) print RR_cv_res predictions = cross_val_predict(RR,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("RANSAC") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=RR.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "RANSAC Regression is Off ..." print "\n\n" # TheilSen Regression plt.figure(7) print "TheilSen Regression is On ..." #tsr=linear_model.TheilSenRegressor(random_state=42) tsr=linear_model.TheilSenRegressor() tsr.fit(x,y) print tsr.coef_ tsrsc=tsr.score(x,y) print "R2 score is: ", tsrsc tsrsc_hpc=tsr.score(x_hpc,y_hpc) print "R2 score on hpc is: ", tsrsc_hpc tsr_cv_res=cross_val_score(tsr,x,y,cv=10) print tsr_cv_res predictions = cross_val_predict(tsr,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("TheilSen") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) predictions_h=tsr.predict(x_hpc) print "Cross-Predicted Error: ", err err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "TheilSen Regression is Off ..." print "\n\n" # Huber Regression plt.figure(8) print "Huber Regression is On ..." hr=linear_model.HuberRegressor() hr.fit(x,y) print hr.coef_ hrsc=hr.score(x,y) print "R2 score is: ", hrsc hrsc_hpc=hr.score(x_hpc,y_hpc) print "R2 score on hpc is: ", hrsc_hpc hr_cv_res=cross_val_score(hr,x,y,cv=10) print hr_cv_res predictions = cross_val_predict(hr,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Huber") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=hr.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "TheilSen Regression is Off ..." print "\n\n" # SGD regression print "SGD Regression is On ..." hr=linear_model.SGDRegressor() hr.fit(x,y) print hr.coef_ hrsc=hr.score(x,y) print "R2 score is: ", hrsc hrsc_hpc=hr.score(x_hpc,y_hpc) print "R2 score on hpc is: ", hrsc_hpc hr_cv_res=cross_val_score(hr,x,y,cv=10) print hr_cv_res predictions = cross_val_predict(hr,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Huber") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=hr.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "SGD Regression is Off ..." print "\n\n" # Kneighbors Regression print "Kneighbors Regression is On ..." hr=KNeighborsRegressor() hr.fit(x,y) #print hr.coef_ hrsc=hr.score(x,y) print "R2 score is: ", hrsc hrsc_hpc=hr.score(x_hpc,y_hpc) print "R2 score on hpc is: ", hrsc_hpc hr_cv_res=cross_val_score(hr,x,y,cv=10) print hr_cv_res predictions = cross_val_predict(hr,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Huber") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=hr.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "Kneighbors Regression is Off ..." print "\n\n" # MLP Regression print "MLP Regression is On ..." hr=MLPRegressor(hidden_layer_sizes=3) hr.fit(x,y) #print hr.coef_ hrsc=hr.score(x,y) print "R2 score is: ", hrsc hrsc_hpc=hr.score(x_hpc,y_hpc) print "R2 score on hpc is: ", hrsc_hpc hr_cv_res=cross_val_score(hr,x,y,cv=10) print hr_cv_res predictions = cross_val_predict(hr,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Huber") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=hr.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "MLP Regression is Off ..." print "\n\n" # KernelRidge Regression print "KernelRidge Regression is On ..." hr=KernelRidge() hr.fit(x,y) #print hr.coef_ hrsc=hr.score(x,y) print "R2 score is: ", hrsc #hrsc_hpc=hr.score(x_hpc,y_hpc) #print "R2 score on hpc is: ", hrsc_hpc #hr_cv_res=cross_val_score(hr,x,y,cv=10) #print hr_cv_res predictions = cross_val_predict(hr,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Huber") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=hr.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "KernelRidge Regression is Off ..." print "\n\n" # NuSVR regressor print "NuSVR Regression is On ..." hr=NuSVR() hr.fit(x,y) #print hr.coef_ hrsc=hr.score(x,y) print "R2 score is: ", hrsc hrsc_hpc=hr.score(x_hpc,y_hpc) print "R2 score on hpc is: ", hrsc_hpc hr_cv_res=cross_val_score(hr,x,y,cv=10) print hr_cv_res predictions = cross_val_predict(hr,x,y,cv=10) #np.clip(predictions,0,1,out=predictions) plt.scatter(y, predictions) plt.title("Huber") plt.xlabel("True Values") plt.ylabel("Predictions") plt.show() accuracy = metrics.r2_score(y, predictions) print "Cross-Predicted Accuracy:", accuracy err=np.mean(abs((predictions-y)/y)) print "Cross-Predicted Error: ", err predictions_h=hr.predict(x_hpc) err_h=np.mean(abs((predictions_h-y_hpc)/y_hpc)) print "Cross-Predicted Error for hpc: ", err_h print "NuSVR Regression is Off ..." print "\n\n" # ############################################################################# # This preprocessor transforms an input data matrix into polynomial form # such that X can be used into any model ''' poly=preprocessing.PolynomialFeatures(degree=3) x=poly.fit_transform(x) # Least Square Linear Regression #ridgereg=linear_model.Ridge(alpha=.5) #ridgereg.fit(x,y) plt.figure(9) print "Least Square Linear Regression is On ..." lr=linear_model.LinearRegression() oreg=linear_model.LinearRegression() oreg.fit(x,y) print oreg.coef_ osc=oreg.score(x,y) print "R2 score is: ", osc osc_hpc=oreg.score(x_hpc,y_hpc) print "R2 score on hpc
input, target): assert input.dim() in [4, 5] num_class = input.size(1) if input.dim() == 4: input = input.permute(0, 2, 3, 1).contiguous() input_flatten = input.view(-1, num_class) elif input.dim() == 5: input = input.permute(0, 2, 3, 4, 1).contiguous() input_flatten = input.view(-1, num_class) target_flatten = target.view(-1) return input_flatten, target_flatten def lovasz_softmax_flat(self, output, target): num_classes = output.size(1) losses = [] for c in range(num_classes): target_c = (target == c).float() if num_classes == 1: input_c = output[:, 0] else: input_c = output[:, c] loss_c = (torch.autograd.Variable(target_c) - input_c).abs() loss_c_sorted, loss_index = torch.sort(loss_c, 0, descending=True) target_c_sorted = target_c[loss_index] losses.append(torch.dot(loss_c_sorted, torch.autograd.Variable(_lovasz_grad(target_c_sorted)))) losses = torch.stack(losses) if self.reduction == 'none': loss = losses elif self.reduction == 'sum': loss = losses.sum() else: loss = losses.mean() return loss def flatten_binary_scores(self, scores, labels, ignore=None): """ Flattens predictions in the batch (binary case) Remove labels equal to 'ignore' """ scores = scores.view(-1) labels = labels.view(-1) if ignore is None: return scores, labels valid = (labels != ignore) vscores = scores[valid] vlabels = labels[valid] return vscores, vlabels def lovasz_hinge(self, logits, labels, per_image=True, ignore=None): """ Binary Lovasz hinge loss logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty) labels: [B, H, W] Tensor, binary ground truth masks (0 or 1) per_image: compute the loss per image instead of per batch ignore: void class id """ if per_image: loss = (self.lovasz_hinge_flat(*self.flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore)) for log, lab in zip(logits, labels)).mean() else: loss = self.lovasz_hinge_flat(*self.flatten_binary_scores(logits, labels, ignore)) return loss def lovasz_hinge_flat(self, logits, labels): """ Binary Lovasz hinge loss logits: [P] Variable, logits at each prediction (between -\infty and +\infty) labels: [P] Tensor, binary ground truth labels (0 or 1) ignore: label to ignore """ if len(labels) == 0: # only void pixels, the gradients should be 0 return logits.sum() * 0. signs = 2. * labels.float() - 1. errors = (1. - logits * torch.tensor(signs, requires_grad=True)) errors_sorted, perm = torch.sort(errors, dim=0, descending=True) perm = perm.data gt_sorted = labels[perm] grad = _lovasz_grad(gt_sorted) loss = torch.dot(F.relu(errors_sorted), grad) return loss def forward(self, output, target) -> 'loss': # print(output.shape, target.shape) # (batch size, class_num, x,y,z), (batch size, 1, x,y,z) self.num_classes = output.size(1) output, target = self.prob_flatten(output, target) # print(output.shape, target.shape) losses = self.lovasz_softmax_flat(output, target) if self.num_classes > 2 else self.lovasz_hinge_flat(output, target) return losses class TripletLoss(_Loss): r"""Creates a criterion that measures the triplet loss given an input tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`. This is used for measuring a relative similarity between samples. A triplet is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative examples` respectively). The shapes of all input tensors should be :math:`(N, D)`. The distance swap is described in detail in the paper `Learning shallow convolutional feature descriptors with triplet losses`_ by <NAME>, <NAME> et al. The loss function for each sample in the mini-batch is: .. math:: L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} where .. math:: d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p Args: margin (float, optional): Default: :math:`1`. p (int, optional): The norm degree for pairwise distance. Default: :math:`2`. swap (bool, optional): The distance swap is described in detail in the paper `Learning shallow convolutional feature descriptors with triplet losses` by <NAME>, <NAME> et al. Default: ``False``. size_average (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when reduce is ``False``. Default: ``True`` reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`. Default: ``True`` reduction (string, optional): Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`. Default: ``'mean'`` Shape: - Input: :math:`(N, D)` where :math:`D` is the vector dimension. - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N)`. >>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2) >>> anchor = torch.randn(100, 128, requires_grad=True) >>> positive = torch.randn(100, 128, requires_grad=True) >>> negative = torch.randn(100, 128, requires_grad=True) >>> output = triplet_loss(anchor, positive, negative) >>> output.backward() .. _Learning shallow convolutional feature descriptors with triplet losses: http://www.bmva.org/bmvc/2016/papers/paper119/index.html """ __constants__ = ['margin', 'p', 'eps', 'swap', 'reduction'] margin: float p: float eps: float swap: bool def __init__(self, margin: float = 1.0, p: float = 2., eps: float = 1e-6, swap: bool = False, reduction: str = 'mean'): super(TripletLoss, self).__init__(reduction=reduction) self.margin = margin self.p = p self.eps = eps self.swap = swap def forward(self, anchor, positive, negative): return F.triplet_margin_loss(anchor, positive, negative, margin=self.margin, p=self.p, eps=self.eps, swap=self.swap, reduction=self.reduction) TripletMarginLoss = TripletLoss class HardTripletLoss(_Loss): """Hard/Hardest Triplet Loss (pytorch implementation of https://omoindrot.github.io/triplet-loss) For each anchor, we get the hardest positive and hardest negative to form a triplet. """ def __init__(self, margin=0.1, hardest=False, squared=False): """ Args: margin: margin for triplet loss hardest: If true, loss is considered only hardest triplets. squared: If true, output is the pairwise squared euclidean distance matrix. If false, output is the pairwise euclidean distance matrix. """ super(HardTripletLoss, self).__init__() self.margin = margin self.hardest = hardest self.squared = squared def _pairwise_distance(self, x, squared=False, eps=1e-16): # Compute the 2D matrix of distances between all the embeddings. cor_mat = torch.matmul(x, x.t()) norm_mat = cor_mat.diag() distances = norm_mat.unsqueeze(1) - 2 * cor_mat + norm_mat.unsqueeze(0) distances = F.relu(distances) if not squared: mask = torch.eq(distances, 0.0).float() distances = distances + mask * eps distances = torch.sqrt(distances) distances = distances * (1.0 - mask) return distances def _get_anchor_positive_triplet_mask(self, labels): # Return a 2D mask where mask[a, p] is True iff a and p are distinct and have same label. device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") indices_not_equal = torch.eye(labels.shape[0]).to(device).byte() ^ 1 # Check if labels[i] == labels[j] labels_equal = torch.unsqueeze(labels, 0) == torch.unsqueeze(labels, 1) mask = indices_not_equal * labels_equal return mask def _get_anchor_negative_triplet_mask(self, labels): # Return a 2D mask where mask[a, n] is True iff a and n have distinct labels. # Check if labels[i] != labels[k] labels_equal = torch.unsqueeze(labels, 0) == torch.unsqueeze(labels, 1) mask = labels_equal ^ 1 return mask def _get_triplet_mask(self, labels): """Return a 3D mask where mask[a, p, n] is True iff the triplet (a, p, n) is valid. A triplet (i, j, k) is valid if: - i, j, k are distinct - labels[i] == labels[j] and labels[i] != labels[k] """ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Check that i, j and k are distinct indices_not_same = torch.eye(labels.shape[0]).to(device).byte() ^ 1 i_not_equal_j = torch.unsqueeze(indices_not_same, 2) i_not_equal_k = torch.unsqueeze(indices_not_same, 1) j_not_equal_k = torch.unsqueeze(indices_not_same, 0) distinct_indices = i_not_equal_j * i_not_equal_k * j_not_equal_k # Check if labels[i] == labels[j] and labels[i] != labels[k] label_equal = torch.eq(torch.unsqueeze(labels, 0), torch.unsqueeze(labels, 1)) i_equal_j = torch.unsqueeze(label_equal, 2) i_equal_k = torch.unsqueeze(label_equal, 1) valid_labels = i_equal_j * (i_equal_k ^ 1) mask = distinct_indices * valid_labels # Combine the two masks return mask def forward(self, embeddings, labels): """ Args: labels: labels of the batch, of size (batch_size,) embeddings: tensor of shape (batch_size, embed_dim) Returns: triplet_loss: scalar tensor containing the triplet loss """ pairwise_dist = self._pairwise_distance(embeddings, squared=self.squared) if self.hardest: # Get the hardest positive pairs mask_anchor_positive = self._get_anchor_positive_triplet_mask(labels).float() valid_positive_dist = pairwise_dist * mask_anchor_positive hardest_positive_dist, _ = torch.max(valid_positive_dist, dim=1, keepdim=True) # Get the hardest negative pairs mask_anchor_negative = self._get_anchor_negative_triplet_mask(labels).float() max_anchor_negative_dist, _ = torch.max(pairwise_dist, dim=1, keepdim=True) anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative) hardest_negative_dist, _ = torch.min(anchor_negative_dist, dim=1, keepdim=True) # Combine biggest d(a, p) and smallest d(a, n) into final
<reponame>plucena24/tda-api '''Defines the basic client and methods for creating one. This client is completely unopinionated, and provides an easy-to-use wrapper around the TD Ameritrade HTTP API.''' from abc import ABC, abstractmethod from enum import Enum import datetime import json import logging import pickle import tda import time import warnings from tda.orders.generic import OrderBuilder from ..utils import EnumEnforcer def get_logger(): return logging.getLogger(__name__) ########################################################################## # Client class BaseClient(EnumEnforcer): # This docstring will appears as documentation for __init__ '''A basic, completely unopinionated client. This client provides the most direct access to the API possible. All methods return the raw response which was returned by the underlying API call, and the user is responsible for checking status codes. For methods which support responses, they can be found in the response object's ``json()`` method.''' def __init__(self, api_key, session, *, enforce_enums=True, token_metadata=None): '''Create a new client with the given API key and session. Set `enforce_enums=False` to disable strict input type checking.''' super().__init__(enforce_enums) self.api_key = api_key self.session = session # Logging-related fields self.logger = get_logger() self.request_number = 0 tda.LOG_REDACTOR.register(api_key, 'API_KEY') self.token_metadata = token_metadata # XXX: This class's tests perform monkey patching to inject synthetic values # of utcnow(). To avoid being confused by this, capture these values here so # we can use them later. _DATETIME = datetime.datetime _DATE = datetime.date def _log_response(self, resp, req_num): self.logger.debug('Req {}: GET response: {}, content={}'.format( req_num, resp.status_code, resp.text)) def _req_num(self): self.request_number += 1 return self.request_number def _assert_type(self, name, value, exp_types): value_type = type(value) value_type_name = '{}.{}'.format( value_type.__module__, value_type.__name__) exp_type_names = ['{}.{}'.format( t.__module__, t.__name__) for t in exp_types] if not any(isinstance(value, t) for t in exp_types): if len(exp_types) == 1: error_str = "expected type '{}' for {}, got '{}'".format( exp_type_names[0], name, value_type_name) else: error_str = "expected type in ({}) for {}, got '{}'".format( ', '.join(exp_type_names), name, value_type_name) raise ValueError(error_str) def _format_datetime(self, var_name, dt): '''Formats datetime objects appropriately, depending on whether they are naive or timezone-aware''' self._assert_type(var_name, dt, [self._DATETIME]) tz_offset = dt.strftime('%z') tz_offset = tz_offset if tz_offset else '+0000' return dt.strftime('%Y-%m-%dT%H:%M:%S') + tz_offset def _format_date(self, var_name, dt): '''Formats datetime objects appropriately, depending on whether they are naive or timezone-aware''' self._assert_type(var_name, dt, [self._DATE, self._DATETIME]) d = datetime.date(year=dt.year, month=dt.month, day=dt.day) return d.isoformat() def _datetime_as_millis(self, var_name, dt): 'Converts datetime objects to compatible millisecond values' self._assert_type(var_name, dt, [self._DATETIME]) return int(dt.timestamp() * 1000) def ensure_updated_refresh_token(self, update_interval_seconds=None): ''' The client automatically performs a token refresh ''' if not self.token_metadata: return None new_session = self.token_metadata.ensure_refresh_token_update( self.api_key, self.session, update_interval_seconds) if new_session: self.session = new_session return new_session is not None ########################################################################## # Orders def cancel_order(self, order_id, account_id): '''Cancel a specific order for a specific account. `Official documentation <https://developer.tdameritrade.com/account-access/apis/delete/ accounts/%7BaccountId%7D/orders/%7BorderId%7D-0>`__.''' path = '/v1/accounts/{}/orders/{}'.format(account_id, order_id) return self._delete_request(path) def get_order(self, order_id, account_id): '''Get a specific order for a specific account by its order ID. `Official documentation <https://developer.tdameritrade.com/account-access/apis/get/accounts/ %7BaccountId%7D/orders/%7BorderId%7D-0>`__.''' path = '/v1/accounts/{}/orders/{}'.format(account_id, order_id) return self._get_request(path, {}) class Order: class Status(Enum): '''Order statuses passed to :meth:`get_orders_by_path` and :meth:`get_orders_by_query`''' AWAITING_PARENT_ORDER = 'AWAITING_PARENT_ORDER' AWAITING_CONDITION = 'AWAITING_CONDITION' AWAITING_MANUAL_REVIEW = 'AWAITING_MANUAL_REVIEW' ACCEPTED = 'ACCEPTED' AWAITING_UR_OUR = 'AWAITING_UR_OUR' PENDING_ACTIVATION = 'PENDING_ACTIVATION' QUEUED = 'QUEUED' WORKING = 'WORKING' REJECTED = 'REJECTED' PENDING_CANCEL = 'PENDING_CANCEL' CANCELED = 'CANCELED' PENDING_REPLACE = 'PENDING_REPLACE' REPLACED = 'REPLACED' FILLED = 'FILLED' EXPIRED = 'EXPIRED' def _make_order_query(self, *, max_results=None, from_entered_datetime=None, to_entered_datetime=None, status=None, statuses=None): status = self.convert_enum(status, self.Order.Status) statuses = self.convert_enum_iterable(statuses, self.Order.Status) if status is not None and statuses is not None: raise ValueError('at most one of status or statuses may be set') if from_entered_datetime is None: from_entered_datetime = datetime.datetime( year=1971, month=1, day=1) if to_entered_datetime is None: to_entered_datetime = datetime.datetime.utcnow() params = { 'fromEnteredTime': self._format_datetime( 'from_entered_datetime', from_entered_datetime), 'toEnteredTime': self._format_datetime( 'to_entered_datetime', to_entered_datetime), } if max_results: params['maxResults'] = max_results if status: params['status'] = status if statuses: params['status'] = ','.join(statuses) return params def get_orders_by_path(self, account_id, *, max_results=None, from_entered_datetime=None, to_entered_datetime=None, status=None, statuses=None): '''Orders for a specific account. At most one of ``status`` and ``statuses`` may be set. `Official documentation <https://developer.tdameritrade.com/account-access/apis/get/accounts/ %7BaccountId%7D/orders-0>`__. :param max_results: The maximum number of orders to retrieve. :param from_entered_datetime: Specifies that no orders entered before this time should be returned. Date must be within 60 days from today's date. ``toEnteredTime`` must also be set. :param to_entered_datetime: Specifies that no orders entered after this time should be returned. ``fromEnteredTime`` must also be set. :param status: Restrict query to orders with this status. See :class:`Order.Status` for options. :param statuses: Restrict query to orders with any of these statuses. See :class:`Order.Status` for options. ''' path = '/v1/accounts/{}/orders'.format(account_id) return self._get_request(path, self._make_order_query( max_results=max_results, from_entered_datetime=from_entered_datetime, to_entered_datetime=to_entered_datetime, status=status, statuses=statuses)) def get_orders_by_query(self, *, max_results=None, from_entered_datetime=None, to_entered_datetime=None, status=None, statuses=None): '''Orders for all linked accounts. At most one of ``status`` and ``statuses`` may be set. `Official documentation <https://developer.tdameritrade.com/account-access/apis/get/orders-0>`__. :param max_results: The maximum number of orders to retrieve. :param from_entered_datetime: Specifies that no orders entered before this time should be returned. Date must be within 60 days from today's date. ``toEnteredTime`` must also be set. :param to_entered_datetime: Specifies that no orders entered after this time should be returned. ``fromEnteredTime`` must also be set. :param status: Restrict query to orders with this status. See :class:`Order.Status` for options. :param statuses: Restrict query to orders with any of these statuses. See :class:`Order.Status` for options. ''' path = '/v1/orders' return self._get_request(path, self._make_order_query( max_results=max_results, from_entered_datetime=from_entered_datetime, to_entered_datetime=to_entered_datetime, status=status, statuses=statuses)) def place_order(self, account_id, order_spec): '''Place an order for a specific account. If order creation was successful, the response will contain the ID of the generated order. See :meth:`tda.utils.Utils.extract_order_id` for more details. Note unlike most methods in this library, responses for successful calls to this method typically do not contain ``json()`` data, and attempting to extract it will likely result in an exception. `Official documentation <https://developer.tdameritrade.com/account-access/apis/post/accounts/ %7BaccountId%7D/orders-0>`__. ''' if isinstance(order_spec, OrderBuilder): order_spec = order_spec.build() path = '/v1/accounts/{}/orders'.format(account_id) return self._post_request(path, order_spec) def replace_order(self, account_id, order_id, order_spec): '''Replace an existing order for an account. The existing order will be replaced by the new order. Once replaced, the old order will be canceled and a new order will be created. `Official documentation <https://developer.tdameritrade.com/account-access/apis/put/accounts/ %7BaccountId%7D/orders/%7BorderId%7D-0>`__.''' if isinstance(order_spec, OrderBuilder): order_spec = order_spec.build() path = '/v1/accounts/{}/orders/{}'.format(account_id, order_id) return self._put_request(path, order_spec) ########################################################################## # Saved Orders def create_saved_order(self, account_id, order_spec): '''Save an order for a specific account. `Official documentation <https://developer.tdameritrade.com/account-access/apis/post/accounts/ %7BaccountId%7D/savedorders-0>`__.''' if isinstance(order_spec, OrderBuilder): order_spec = order_spec.build() path = '/v1/accounts/{}/savedorders'.format(account_id) return self._post_request(path, order_spec) def delete_saved_order(self, account_id, order_id): '''Delete a specific saved order for a specific account. `Official documentation <https://developer.tdameritrade.com/account-access/apis/delete/ accounts/%7BaccountId%7D/savedorders/%7BsavedOrderId%7D-0>`__.''' path = '/v1/accounts/{}/savedorders/{}'.format(account_id, order_id) return self._delete_request(path) def get_saved_order(self, account_id, order_id): '''Specific saved order by its ID, for a specific account. `Official documentation <https://developer.tdameritrade.com/account-access/apis/get/accounts/ %7BaccountId%7D/savedorders/%7BsavedOrderId%7D-0>`__.''' path = '/v1/accounts/{}/savedorders/{}'.format(account_id, order_id) return self._get_request(path, {}) def get_saved_orders_by_path(self, account_id): '''Saved orders for a specific account. `Official documentation <https://developer.tdameritrade.com/account-access/apis/get/accounts/ %7BaccountId%7D/savedorders-0>`__.''' path = '/v1/accounts/{}/savedorders'.format(account_id) return self._get_request(path, {}) def replace_saved_order(self, account_id, order_id, order_spec): '''Replace an existing saved order for an account. The existing saved order will be replaced by the new order. `Official documentation <https://developer.tdameritrade.com/account-access/apis/put/accounts/ %7BaccountId%7D/savedorders/%7BsavedOrderId%7D-0>`__.''' if isinstance(order_spec, OrderBuilder): order_spec = order_spec.build() path = '/v1/accounts/{}/savedorders/{}'.format(account_id, order_id) return self._put_request(path, order_spec) ########################################################################## # Accounts class Account: class Fields(Enum): '''Account fields passed to :meth:`get_account` and :meth:`get_accounts`''' POSITIONS = 'positions' ORDERS = 'orders' def get_account(self, account_id, *, fields=None): '''Account balances, positions, and orders for a specific account. `Official documentation <https://developer.tdameritrade.com/account-access/apis/get/accounts/ %7BaccountId%7D-0>`__. :param fields: Balances displayed by default, additional fields can be added here by adding values from :class:`Account.Fields`. ''' fields = self.convert_enum_iterable(fields, self.Account.Fields) params = {} if fields: params['fields'] = ','.join(fields) path = '/v1/accounts/{}'.format(account_id) return self._get_request(path, params) def get_accounts(self, *, fields=None): '''Account balances, positions, and orders for all linked accounts. `Official documentation <https://developer.tdameritrade.com/account-access/apis/get/ accounts-0>`__. :param fields: Balances displayed by default, additional fields can be added here by adding values from :class:`Account.Fields`. ''' fields = self.convert_enum_iterable(fields, self.Account.Fields) params = {} if fields: params['fields'] = ','.join(fields) path = '/v1/accounts' return self._get_request(path, params) ########################################################################## # Instruments class Instrument: class Projection(Enum): '''Search query type for :func:`search_instruments`. See the `official documentation <https://developer.tdameritrade.com/instruments/apis/get/ instruments>`__ for details on the semantics of each.''' SYMBOL_SEARCH = 'symbol-search' SYMBOL_REGEX = 'symbol-regex' DESC_SEARCH = 'desc-search' DESC_REGEX = 'desc-regex' FUNDAMENTAL = 'fundamental' def search_instruments(self, symbols, projection): '''Search or retrieve instrument data, including fundamental data. `Official documentation <https://developer.tdameritrade.com/instruments/apis/get/ instruments>`__. :param projection:
new angle parameters new_indices = [self._topology_proposal.old_to_new_atom_map[old_atomid] for old_atomid in old_angle_parameters[:3]] new_angle_parameters = self._find_angle_parameters(new_system_angle_force, new_indices) if not new_angle_parameters: new_angle_parameters = [0, 0, 0, old_angle_parameters[3], 0.0*unit.kilojoule_per_mole/unit.radian**2] #add to the hybrid force: #the parameters at indices 3 and 4 represent theta0 and k, respectively. hybrid_force_parameters = [old_angle_parameters[3], old_angle_parameters[4], new_angle_parameters[3], new_angle_parameters[4]] self._hybrid_system_forces['core_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters) # Check if the atoms are neither all core nor all environment, which would mean they involve unique old interactions elif not hybrid_index_set.issubset(self._atom_classes['environment_atoms']): _logger.debug(f"\t\thandle_harmonic_angles: angle_index {angle_index} is an environment or core with unique_old...") # Check if we are softening angles, and not softening only new angles: if self._soften_angles and not self._soften_only_new: _logger.debug(f"\t\t\thandle_harmonic_angles: softening (to custom angle force)") # If we are, then we need to generate the softened parameters (at lambda=1 for old atoms) # We do this by using the same equilibrium angle, and scaling the force constant at the non-interacting # endpoint: if angle_index in self.neglected_old_angle_terms: _logger.debug("\t\t\tsoften angles on but angle is in neglected old, so softening constant is set to zero.") hybrid_force_parameters = [old_angle_parameters[3], old_angle_parameters[4], old_angle_parameters[3], 0.0 * old_angle_parameters[4]] self._hybrid_system_forces['custom_neglected_old_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters) else: _logger.debug(f"\t\t\thandle_harmonic_angles: softening (to custom angle force)") hybrid_force_parameters = [old_angle_parameters[3], old_angle_parameters[4], old_angle_parameters[3], self._angle_softening_constant * old_angle_parameters[4]] self._hybrid_system_forces['core_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters) # If not, we can just add this to the standard angle force else: if angle_index in self.neglected_old_angle_terms: _logger.debug(f"\t\t\tangle in neglected_old_angle_terms; K_2 is set to zero") hybrid_force_parameters = [old_angle_parameters[3], old_angle_parameters[4], old_angle_parameters[3], 0.0 * old_angle_parameters[4]] self._hybrid_system_forces['custom_neglected_old_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters) else: _logger.debug(f"\t\t\thandle_harmonic_bonds: no softening (to standard angle force)") self._hybrid_system_forces['standard_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], old_angle_parameters[3], old_angle_parameters[4]) #otherwise, only environment atoms are in this interaction, so add it to the standard angle force else: _logger.debug(f"\t\thandle_harmonic_angles: angle_index {angle_index} is an environment (to standard angle force)") self._hybrid_system_forces['standard_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], old_angle_parameters[3], old_angle_parameters[4]) #finally, loop through the new system force to add any unique new angles _logger.info("\thandle_harmonic_angles: looping through new_system to add relevant terms...") for angle_index in range(new_system_angle_force.getNumAngles()): _logger.debug(f"\t\thandle_harmonic_angles: new angle_index: {angle_index}") new_angle_parameters = new_system_angle_force.getAngleParameters(angle_index) #get the indices in the hybrid system hybrid_index_list = [self._new_to_hybrid_map[new_atomid] for new_atomid in new_angle_parameters[:3]] hybrid_index_set = set(hybrid_index_list) #if the intersection of this hybrid set with the unique new atoms is nonempty, it must be added: if len(hybrid_index_set.intersection(self._atom_classes['unique_new_atoms'])) > 0: _logger.debug(f"\t\thandle_harmonic_bonds: angle_index {angle_index} is a core-unique_new or unique_new-unique_new...") # Check to see if we are softening angles: if self._soften_angles: _logger.info(f"\t\t\thandle_harmonic_bonds: softening (to custom angle force)") if angle_index in self.neglected_new_angle_terms: _logger.debug("\t\t\tsoften angles on but angle is in neglected new, so softening constant is set to zero.") hybrid_force_parameters = [new_angle_parameters[3], new_angle_parameters[4] * 0.0, new_angle_parameters[3], new_angle_parameters[4]] self._hybrid_system_forces['custom_neglected_new_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters) else: _logger.debug(f"\t\t\thandle_harmonic_angles: softening (to custom angle force)") hybrid_force_parameters = [new_angle_parameters[3], new_angle_parameters[4] * self._angle_softening_constant, new_angle_parameters[3], new_angle_parameters[4]] self._hybrid_system_forces['core_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters) # Otherwise, just add to the nonalchemical force else: if angle_index in self.neglected_new_angle_terms: _logger.debug(f"\t\t\tangle in neglected_new_angle_terms; K_1 is set to zero") hybrid_force_parameters = [new_angle_parameters[3], 0.0 * new_angle_parameters[4], new_angle_parameters[3], new_angle_parameters[4]] self._hybrid_system_forces['custom_neglected_new_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters) else: _logger.debug(f"\t\t\thandle_harmonic_bonds: no softening (to standard angle force)") self._hybrid_system_forces['standard_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], new_angle_parameters[3], new_angle_parameters[4]) if hybrid_index_set.issubset(self._atom_classes['core_atoms']): _logger.debug(f"\t\thandle_harmonic_angles: angle_index {angle_index} is a core (to custom angle force).") if not self._find_angle_parameters(self._hybrid_system_forces['core_angle_force'], hybrid_index_list): _logger.debug(f"\t\t\thandle_harmonic_angles: angle_index {angle_index} NOT previously added...adding now...THERE IS A CONSIDERATION NOT BEING MADE!") hybrid_force_parameters = [new_angle_parameters[3], 0.0*unit.kilojoule_per_mole/unit.radian**2, new_angle_parameters[3], new_angle_parameters[4]] self._hybrid_system_forces['core_angle_force'].addAngle(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_force_parameters) def handle_periodic_torsion_force(self): """ Handle the torsions in the hybrid system in the same way as the angles and bonds. """ old_system_torsion_force = self._old_system_forces['PeriodicTorsionForce'] new_system_torsion_force = self._new_system_forces['PeriodicTorsionForce'] #first, loop through all the torsions in the old system to determine what to do with them. We will only use the #custom torsion force if all atoms are part of "core." Otherwise, they are either unique to one system or never #change. #we need to keep track of what torsions we added so that we do not double count. added_torsions = [] _logger.info("\thandle_periodic_torsion_forces: looping through old_system to add relevant terms...") for torsion_index in range(old_system_torsion_force.getNumTorsions()): _logger.debug(f"\t\thandle_harmonic_torsion_forces: old torsion_index: {torsion_index}") torsion_parameters = old_system_torsion_force.getTorsionParameters(torsion_index) _logger.debug(f"\t\thandle_harmonic_torsion_forces: old_torsion parameters: {torsion_parameters}") #get the indices in the hybrid system hybrid_index_list = [self._old_to_hybrid_map[old_index] for old_index in torsion_parameters[:4]] _logger.debug(f"\t\thandle_harmonic_torsion_forces: hybrid torsion index: {hybrid_index_list}") hybrid_index_set = set(hybrid_index_list) #if all atoms are in the core, we'll need to find the corresponding parameters in the old system and #interpolate if hybrid_index_set.issubset(self._atom_classes['core_atoms']): _logger.debug(f"\t\thandle_periodic_torsion_forces: torsion_index {torsion_index} is a core (to custom torsion force).") torsion_indices = torsion_parameters[:4] #if we've already added these indices (they may appear >once for high periodicities) #then just continue to the next torsion. if torsion_indices in added_torsions: continue #it doesn't matter if the torsion indices are already in the new hybrid torsion force object...some torsions have high periodicity #get the new indices so we can get the new angle parameters, as well as all old parameters of the old torsion #The reason we do it like this is to take care of varying periodicity between new and old system. torsion_parameters_list = self._find_torsion_parameters(old_system_torsion_force, torsion_indices) _logger.debug(f"\t\thandle_periodic_torsion_forces: old torsion parameters: {torsion_parameters_list}") new_indices = [self._topology_proposal.old_to_new_atom_map[old_index] for old_index in torsion_indices] _logger.debug(f"\t\thandle_periodic_torsion_forces: new indices: {new_indices}") new_torsion_parameters_list = self._find_torsion_parameters(new_system_torsion_force, new_indices) _logger.debug(f"\t\thandle_periodic_torsion_forces: new torsion parameters: {new_torsion_parameters_list}") #for old torsions, have the energy scale from full at lambda=0 to off at lambda=1 for torsion_parameters in torsion_parameters_list: hybrid_force_parameters = [torsion_parameters[4], torsion_parameters[5], torsion_parameters[6], 0.0, 0.0, 0.0] self._hybrid_system_forces['core_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters) #for new torsions, have the energy scale from 0 at lambda=0 to full at lambda=1 for torsion_parameters in new_torsion_parameters_list: #add to the hybrid force: #the parameters at indices 3 and 4 represent theta0 and k, respectively. hybrid_force_parameters = [0.0, 0.0, 0.0,torsion_parameters[4], torsion_parameters[5], torsion_parameters[6]] self._hybrid_system_forces['core_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters) added_torsions.append(torsion_indices) #otherwise, just add the parameters to the regular force: else: #TODO: make considerations for environment-core valence interactions. THESE will be important in protein mutation studies... _logger.debug(f"\t\thandle_periodic_torsion_forces: torsion_index {torsion_index} is a core-unique_old or unique_old-unique_old (to standard torsion force).") self._hybrid_system_forces['standard_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], torsion_parameters[4], torsion_parameters[5], torsion_parameters[6]) _logger.info("\thandle_periodic_torsion_forces: looping through new_system to add relevant terms...") for torsion_index in range(new_system_torsion_force.getNumTorsions()): _logger.debug(f"\t\thandle_harmonic_angles: new torsion_index: {torsion_index}") torsion_parameters = new_system_torsion_force.getTorsionParameters(torsion_index) #get the indices in the hybrid system: hybrid_index_list = [self._new_to_hybrid_map[new_index] for new_index in torsion_parameters[:4]] hybrid_index_set = set(hybrid_index_list) #if any are part of the unique new atoms, we will add them to the standard torsion force: if len(hybrid_index_set.intersection(self._atom_classes['unique_new_atoms'])) > 0: _logger.debug(f"\t\thandle_periodic_torsion_forces: torsion_index {torsion_index} is core-unique_new or unique_new-unique_new (to standard torsion force).") self._hybrid_system_forces['standard_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], torsion_parameters[4], torsion_parameters[5], torsion_parameters[6]) #another consideration has to be made for when a core-core-core-core torsion force appears in the new_system but is not present in the old system; #this would not have been caught by the previous `for` loop over the old system core torsions if hybrid_index_set.issubset(self._atom_classes['core_atoms']): _logger.debug(f"\t\thandle_periodic_torsion_forces: torsion_index {torsion_index} is a core (to custom torsion force).") torsion_indices = torsion_parameters[:4] old_index_list = [self._hybrid_to_old_map[hybr_idx] for hybr_idx in hybrid_index_list] old_index_list_reversed = [i for i in reversed(old_index_list)] #if we've already added these indices (they may appear >once for high periodicities) #then just continue to the next torsion. if (old_index_list in added_torsions) or (old_index_list_reversed in added_torsions): continue new_torsion_parameters_list = self._find_torsion_parameters(new_system_torsion_force, torsion_indices) for torsion_parameters in new_torsion_parameters_list: #add to the hybrid force: #the parameters at indices 3 and 4 represent theta0 and k, respectively. hybrid_force_parameters = [0.0, 0.0, 0.0,torsion_parameters[4], torsion_parameters[5], torsion_parameters[6]] self._hybrid_system_forces['core_torsion_force'].addTorsion(hybrid_index_list[0], hybrid_index_list[1], hybrid_index_list[2], hybrid_index_list[3], hybrid_force_parameters) added_torsions.append(old_index_list) added_torsions.append(old_index_list_reversed) def handle_nonbonded(self): """ """ old_system_nonbonded_force = self._old_system_forces['NonbondedForce'] new_system_nonbonded_force = self._new_system_forces['NonbondedForce'] hybrid_to_old_map = self._hybrid_to_old_map hybrid_to_new_map = self._hybrid_to_new_map # Define new global parameters for NonbondedForce self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter('lambda_electrostatics_core', 0.0) self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter('lambda_sterics_core', 0.0) self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter("lambda_electrostatics_delete", 0.0) self._hybrid_system_forces['standard_nonbonded_force'].addGlobalParameter("lambda_electrostatics_insert", 0.0) #We have to loop through the particles in the system, because nonbonded force does not accept index _logger.info("\thandle_nonbonded: looping through all particles in hybrid...") for particle_index in range(self._hybrid_system.getNumParticles()): if particle_index in self._atom_classes['unique_old_atoms']: _logger.debug(f"\t\thandle_nonbonded: particle {particle_index} is a unique_old") #get the parameters in the old system old_index = hybrid_to_old_map[particle_index] [charge, sigma, epsilon] = old_system_nonbonded_force.getParticleParameters(old_index) #add the particle to the hybrid custom sterics and electrostatics. check_index = self._hybrid_system_forces['core_sterics_force'].addParticle([sigma, epsilon, sigma, 0.0*epsilon, 1, 0]) #turning off sterics in forward direction assert (particle_index == check_index ), "Attempting to add incorrect particle to hybrid system" # Add particle to the regular nonbonded force, but Lennard-Jones will be handled by CustomNonbondedForce check_index = self._hybrid_system_forces['standard_nonbonded_force'].addParticle(charge, sigma, 0.0*epsilon) #add charge to standard_nonbonded force
prev_input_feed, reduced_output_weights, ) futures.append(fut) elif isinstance(model, transformer.TransformerModel) or isinstance( model, char_source_transformer_model.CharSourceTransformerModel ): encoder_output = inputs[i] # store cached states, use evaluation mode model.decoder._is_incremental_eval = True model.eval() states_per_layer = 4 state_inputs = [] for i, _ in enumerate(model.decoder.layers): # (prev_key, prev_value) for self- and encoder-attention if hasattr(model.decoder, "decoder_layers_to_keep") and ( i not in model.decoder.decoder_layers_to_keep.keys() ): continue state_inputs.extend( inputs[next_state_input : next_state_input + states_per_layer] ) next_state_input += states_per_layer encoder_out = (encoder_output, None, None) # TODO(jcross) reduced_output_weights = None reduced_output_weights_per_model.append(reduced_output_weights) def forked_section( input_tokens, encoder_out, state_inputs, possible_translation_tokens, timestep, ): decoder_output = model.decoder( input_tokens, encoder_out, incremental_state=state_inputs, possible_translation_tokens=possible_translation_tokens, timestep=timestep, ) logits, attn_scores, _, attention_states = decoder_output log_probs = F.log_softmax(logits, dim=2) return log_probs, attn_scores, tuple(attention_states) fut = torch.jit._fork( forked_section, input_tokens, encoder_out, state_inputs, possible_translation_tokens, timestep, ) futures.append(fut) elif isinstance(model, levenshtein_transformer.LevenshteinTransformerModel): encoder_output = inputs[i] # store cached states, use evaluation mode model.decoder._is_incremental_eval = True model.eval() states_per_layer = 4 state_inputs = [] for _ in model.decoder.layers: # (prev_key, prev_value) for self- and encoder-attention state_inputs.extend( inputs[next_state_input : next_state_input + states_per_layer] ) next_state_input += states_per_layer encoder_out = (encoder_output, None, None) # TODO(jcross) reduced_output_weights = None reduced_output_weights_per_model.append(reduced_output_weights) def forked_section( input_tokens, encoder_out, state_inputs, possible_translation_tokens, timestep, ): decoder_output = model.decoder( input_tokens, encoder_out, incremental_state=state_inputs, possible_translation_tokens=possible_translation_tokens, timestep=timestep, ) logits, attn_scores, attention_states = decoder_output log_probs = F.log_softmax(logits, dim=2) return log_probs, attn_scores, tuple(attention_states) fut = torch.jit._fork( forked_section, input_tokens, encoder_out, state_inputs, possible_translation_tokens, timestep, ) futures.append(fut) elif isinstance(model, latent_var_models.LatentVarModel): encoder_output = inputs[i] # store cached states, use evaluation mode model.decoder._is_incremental_eval = True model.eval() state_inputs = [] state_inputs.extend(inputs[next_state_input : next_state_input + 3]) next_state_input += 3 for _ in list(model.decoder.decoders.values())[0].layers: # (prev_key, prev_value) for self- and encoder-attention state_inputs.extend(inputs[next_state_input : next_state_input + 4]) next_state_input += 4 encoder_out = encoder_output # TODO(jcross) reduced_output_weights = None reduced_output_weights_per_model.append(reduced_output_weights) def forked_section( input_tokens, encoder_out, state_inputs, possible_translation_tokens, timestep, ): decoder_output = model.decoder( input_tokens, encoder_out, incremental_state=state_inputs ) logits, attn_scores, _, _, attention_states = decoder_output log_probs = F.log_softmax(logits, dim=2) return log_probs, attn_scores, tuple(attention_states) fut = torch.jit._fork( forked_section, input_tokens, encoder_out, state_inputs, possible_translation_tokens, timestep, ) futures.append(fut) elif isinstance( model, hybrid_transformer_rnn.HybridTransformerRNNModel ) or isinstance(model, char_source_hybrid.CharSourceHybridModel): encoder_output = inputs[i] # store cached states, use evaluation mode model.decoder._is_incremental_eval = True model.eval() encoder_out = (encoder_output, None, None) num_states = (1 + model.decoder.num_layers) * 2 state_inputs = inputs[next_state_input : next_state_input + num_states] next_state_input += num_states # TODO(jcross) reduced_output_weights = None reduced_output_weights_per_model.append(reduced_output_weights) def forked_section( input_tokens, encoder_out, state_inputs, possible_translation_tokens, timestep, ): incremental_state = {} utils.set_incremental_state( model.decoder, incremental_state, "cached_state", state_inputs ) decoder_output = model.decoder( input_tokens, encoder_out, incremental_state=incremental_state, possible_translation_tokens=possible_translation_tokens, timestep=timestep, ) logits, attn_scores, _ = decoder_output log_probs = F.log_softmax(logits, dim=2) next_states = utils.get_incremental_state( model.decoder, incremental_state, "cached_state" ) return log_probs, attn_scores, tuple(next_states) fut = torch.jit._fork( forked_section, input_tokens, encoder_out, state_inputs, possible_translation_tokens, timestep, ) futures.append(fut) else: raise RuntimeError(f"Not a supported model: {type(model)}") for i, (model, fut) in enumerate(zip(self.models, futures)): if ( isinstance(model, rnn.RNNModel) or isinstance(model, rnn.DummyPyTextRNNPointerModel) or isinstance(model, char_source_model.CharSourceModel) or isinstance(model, word_prediction_model.WordPredictionModel) ): ( log_probs, attn_scores, next_hiddens, next_cells, next_input_feed, ) = torch.jit._wait(fut) for h, c in zip(next_hiddens, next_cells): state_outputs.extend([h, c]) beam_axis_per_state.extend([0, 0]) state_outputs.append(next_input_feed) beam_axis_per_state.append(0) if reduced_output_weights_per_model[i] is not None: state_outputs.extend(reduced_output_weights_per_model[i]) beam_axis_per_state.extend( [None for _ in reduced_output_weights_per_model[i]] ) elif isinstance(model, transformer.TransformerModel) or isinstance( model, char_source_transformer_model.CharSourceTransformerModel ): log_probs, attn_scores, attention_states = torch.jit._wait(fut) log_probs_per_model.append(log_probs) attn_weights_per_model.append(attn_scores) state_outputs.extend(attention_states) beam_axis_per_state.extend([0 for _ in attention_states]) elif isinstance(model, levenshtein_transformer.LevenshteinTransformerModel): log_probs, attn_scores, attention_states = torch.jit._wait(fut) log_probs_per_model.append(log_probs) attn_weights_per_model.append(attn_scores) state_outputs.extend(attention_states) beam_axis_per_state.extend([None for _ in attention_states]) elif isinstance(model, latent_var_models.LatentVarModel): log_probs, attn_scores, attention_states = torch.jit._wait(fut) log_probs_per_model.append(log_probs) attn_weights_per_model.append(attn_scores) state_outputs.extend(attention_states) beam_axis_per_state.extend([0 for _ in attention_states]) elif isinstance( model, hybrid_transformer_rnn.HybridTransformerRNNModel ) or isinstance(model, char_source_hybrid.CharSourceHybridModel): log_probs, attn_scores, next_states = torch.jit._wait(fut) log_probs_per_model.append(log_probs) attn_weights_per_model.append(attn_scores) state_outputs.extend(next_states) # sequence RNN states have beam along axis 1 beam_axis_per_state.extend([1 for _ in next_states[:-2]]) # encoder input projections have beam along axis 0 beam_axis_per_state.extend([0, 0]) else: raise RuntimeError(f"Not a supported model: {type(model)}") return ( log_probs_per_model, attn_weights_per_model, state_outputs, beam_axis_per_state, possible_translation_tokens, ) @classmethod def build_from_checkpoints( cls, checkpoint_filenames, src_dict_filename, dst_dict_filename, beam_size, word_reward=0, unk_reward=0, lexical_dict_paths=None, ): models, _, tgt_dict = load_models_from_checkpoints( checkpoint_filenames, src_dict_filename, dst_dict_filename, lexical_dict_paths, ) return cls( models, tgt_dict, beam_size=beam_size, word_reward=word_reward, unk_reward=unk_reward, ) class FakeEncoderEnsemble(torch.jit.ScriptModule): @torch.jit.script_method def forward(self, src_tokens, src_lengths) -> None: raise RuntimeError( "Called EncoderEnsemble on a BeamSearch thats not word-source" ) class FakeCharSourceEncoderEnsemble(torch.jit.ScriptModule): @torch.jit.script_method def forward(self, src_tokens, src_lengths, char_inds, word_lengths) -> None: raise RuntimeError( "Called CharSourceEncoderEnsemble on a BeamSearch thats not char-source" ) class BeamSearch(torch.jit.ScriptModule): __constants__ = ["beam_size", "is_char_source"] def __init__( self, model_list, tgt_dict, src_tokens, src_lengths, beam_size=1, word_reward=0, unk_reward=0, quantize=False, # Tracing inputs for CharSourceModel char_inds=None, word_lengths=None, ): super().__init__() self.models = model_list self.tgt_dict = tgt_dict self.beam_size = beam_size self.word_reward = word_reward self.unk_reward = unk_reward if ( isinstance(self.models[0], char_source_model.CharSourceModel) or isinstance( self.models[0], char_source_transformer_model.CharSourceTransformerModel ) or isinstance(self.models[0], char_source_hybrid.CharSourceHybridModel) ): encoder_ens = CharSourceEncoderEnsemble(self.models) else: encoder_ens = EncoderEnsemble(self.models) encoder_ens.enable_precompute_reduced_weights = True if quantize: torch.quantization.quantize_dynamic( encoder_ens, {torch.nn.Linear}, dtype=torch.qint8, inplace=True ) encoder_ens = torch.jit.quantized.quantize_rnn_cell_modules(encoder_ens) if ( isinstance(self.models[0], char_source_model.CharSourceModel) or isinstance( self.models[0], char_source_transformer_model.CharSourceTransformerModel ) or isinstance(self.models[0], char_source_hybrid.CharSourceHybridModel) ): self.is_char_source = True enc_inputs = (src_tokens, src_lengths, char_inds, word_lengths) example_encoder_outs = encoder_ens(*enc_inputs) self.encoder_ens = FakeEncoderEnsemble() self.encoder_ens_char_source = torch.jit.trace( encoder_ens, enc_inputs, _force_outplace=True, check_trace=False ) else: self.is_char_source = False enc_inputs = (src_tokens, src_lengths) example_encoder_outs = encoder_ens(*enc_inputs) self.encoder_ens = torch.jit.trace( encoder_ens, enc_inputs, _force_outplace=True, check_trace=False ) self.encoder_ens_char_source = FakeCharSourceEncoderEnsemble() decoder_ens = DecoderBatchedStepEnsemble( self.models, tgt_dict, beam_size, word_reward, unk_reward, tile_internal=False, ) decoder_ens.enable_precompute_reduced_weights = True if quantize: torch.quantization.quantize_dynamic( decoder_ens, {torch.nn.Linear}, dtype=torch.qint8, inplace=True ) decoder_ens = torch.jit.quantized.quantize_rnn_cell_modules(decoder_ens) decoder_ens = torch.jit.quantized.quantize_rnn_modules(decoder_ens) decoder_ens_tile = DecoderBatchedStepEnsemble( self.models, tgt_dict, beam_size, word_reward, unk_reward, tile_internal=True, ) decoder_ens_tile.enable_precompute_reduced_weights = True if quantize: torch.quantization.quantize_dynamic( decoder_ens_tile, {torch.nn.Linear}, dtype=torch.qint8, inplace=True ) decoder_ens_tile = torch.jit.quantized.quantize_rnn_cell_modules( decoder_ens_tile ) decoder_ens_tile = torch.jit.quantized.quantize_rnn_modules( decoder_ens_tile ) prev_token = torch.LongTensor([0]) prev_scores = torch.FloatTensor([0.0]) ts = torch.LongTensor([0]) _, _, _, _, *tiled_states = decoder_ens_tile( prev_token, prev_scores, ts, *example_encoder_outs ) self.decoder_ens_tile = torch.jit.trace( decoder_ens_tile, (prev_token, prev_scores, ts, *example_encoder_outs), _force_outplace=True, check_trace=False, ) self.decoder_ens = torch.jit.trace( decoder_ens, ( prev_token.repeat(self.beam_size), prev_scores.repeat(self.beam_size), ts, *tiled_states, ), _force_outplace=True, check_trace=False, ) self.input_names = [ "src_tokens", "src_lengths", "prev_token", "prev_scores", "attn_weights", "prev_hypos_indices", "num_steps", ] self.output_names = [ "all_tokens", "all_scores", "all_weights", "all_prev_indices", ] @torch.jit.script_method def forward( self, src_tokens: torch.Tensor, src_lengths: torch.Tensor, prev_token: torch.Tensor, prev_scores: torch.Tensor, attn_weights: torch.Tensor, prev_hypos_indices: torch.Tensor, num_steps: int, char_inds: Optional[torch.Tensor] = None, word_lengths: Optional[torch.Tensor] = None, ): if self.is_char_source: if char_inds is None or word_lengths is None: raise RuntimeError( "char_inds and word_lengths must be specified " "for char-source models" ) char_inds = torch.jit._unwrap_optional(char_inds) word_lengths = torch.jit._unwrap_optional(word_lengths) enc_states = self.encoder_ens_char_source( src_tokens, src_lengths, char_inds, word_lengths ) else: enc_states = self.encoder_ens(src_tokens, src_lengths) # enc_states ends up being optional because of the above branch, one # side returns None. We should never take the path that returns None # so we unrap the optional type here. enc_states = torch.jit._unwrap_optional(enc_states) all_tokens = prev_token.repeat(repeats=[self.beam_size]).unsqueeze(dim=0) all_scores = prev_scores.repeat(repeats=[self.beam_size]).unsqueeze(dim=0) all_weights = ( attn_weights.unsqueeze(dim=0) .repeat(repeats=[self.beam_size, 1]) .unsqueeze(dim=0) ) all_prev_indices = prev_hypos_indices.unsqueeze(dim=0) prev_token, prev_scores, prev_hypos_indices, attn_weights, *states = self.decoder_ens_tile( prev_token, prev_scores, _to_tensor(0), *enc_states # noqa ) all_tokens = torch.cat((all_tokens, prev_token.unsqueeze(dim=0)), dim=0) all_scores = torch.cat((all_scores, prev_scores.unsqueeze(dim=0)), dim=0) all_weights = torch.cat((all_weights, attn_weights.unsqueeze(dim=0)), dim=0) all_prev_indices = torch.cat( (all_prev_indices, prev_hypos_indices.unsqueeze(dim=0)), dim=0 ) for i in range(num_steps - 1): ( prev_token, prev_scores, prev_hypos_indices, attn_weights, *states, ) = self.decoder_ens( prev_token, prev_scores, _to_tensor(i + 1), *states # noqa ) all_tokens = torch.cat((all_tokens, prev_token.unsqueeze(dim=0)), dim=0) all_scores = torch.cat((all_scores, prev_scores.unsqueeze(dim=0)), dim=0) all_weights = torch.cat((all_weights, attn_weights.unsqueeze(dim=0)), dim=0) all_prev_indices = torch.cat( (all_prev_indices, prev_hypos_indices.unsqueeze(dim=0)), dim=0 ) return all_tokens, all_scores, all_weights, all_prev_indices @classmethod def build_from_checkpoints( cls, checkpoint_filenames, src_dict_filename, dst_dict_filename, beam_size, word_reward=0, unk_reward=0, lexical_dict_paths=None, ): length = 10 models, _, tgt_dict = load_models_from_checkpoints( checkpoint_filenames, src_dict_filename, dst_dict_filename, lexical_dict_paths, ) src_tokens = torch.LongTensor(np.ones((length, 1), dtype="int64")) src_lengths = torch.IntTensor(np.array([length], dtype="int32")) if ( isinstance(models[0], char_source_model.CharSourceModel) or isinstance( models[0], char_source_transformer_model.CharSourceTransformerModel ) or isinstance(models[0], char_source_hybrid.CharSourceHybridModel) ): word_length = 3 char_inds = torch.LongTensor( np.ones((1, length, word_length), dtype="int64") ) word_lengths = torch.IntTensor( np.array([word_length] * length, dtype="int32") ).reshape((1, length)) else: char_inds = None word_lengths = None return cls( models, tgt_dict, src_tokens, src_lengths, beam_size=beam_size, word_reward=word_reward, unk_reward=unk_reward, quantize=True, char_inds=char_inds, word_lengths=word_lengths, ) def save_to_pytorch(self, output_path): def pack(s): if hasattr(s, "_pack"): s._pack() def unpack(s): if hasattr(s, "_unpack"): s._unpack() self.apply(pack) torch.jit.save(self, output_path) self.apply(unpack) class KnownOutputDecoderStepEnsemble(nn.Module): def __init__(self, models, tgt_dict, word_reward=0, unk_reward=0): super().__init__() self.models = models self.tgt_dict = tgt_dict for i, model in enumerate(self.models): model.prepare_for_onnx_export_() self._modules[f"model_{i}"] = model self.word_reward = word_reward self.unk_reward = unk_reward vocab_size = len(tgt_dict.indices) self.word_rewards = torch.FloatTensor(vocab_size).fill_(word_reward) self.word_rewards[tgt_dict.eos()] = 0 self.word_rewards[tgt_dict.unk()] = word_reward + unk_reward self.vocab_size = vocab_size self.unk_token = tgt_dict.unk() self.enable_precompute_reduced_weights = False
<reponame>peter-zyj/awsCLI #!/usr/bin/env python import os, sys import time import re import socket import fcntl import struct import pexpect import yaml, hashlib ########SSH logon stuff############ default_passwd = "<PASSWORD>" prompt_firstlogin = "Are you sure you want to continue connecting \(yes/no.." # update to regex when "(yes/no/[fingerprint])?" prompt_passwd_lnx = "root@.*'s password:" prompt_passwd_ftd = "[pP]assword:" prompt_logined_lnx = "\\x1b]0;root@.*?\]#" prompt_logined_lnx_bk = "\[root@.*\]#" prompt_logined_ftd = "> " prompt_logined_ftd_root_6_7 = "root@firepower:.*?\$" prompt_logined_ftd_root_6_8 = "root@firepower:.*?#" prompt_percentage = ".*100%.*" prompt_percentage_dir = prompt_logined_ftd_root_6_7 console_passwd_prompt = "[pP]assword:" console_user_prompt = "[uU]sername:" console_usr = console_pwd = "<PASSWORD>" console_prompt = "cluster-.*?#" # fxos_passwd = "<PASSWORD>!" fxos_passwd = "<PASSWORD>!@#" fxos_user = "admin" fxos_firstlogin = "Are you sure you want to continue connecting \(yes/no.." fxos_passwd_prompt = "[pP]assword:" fxos_shell_prompt = "firepowerfxos#" fxos_lina_prompt = ">" fxos_user_prompt = "firepowerfxos login:" fxos_more_prompt = "--More--" ftd_raw_prompt = "firepower>" ftd_en_prompt = "firepower#" ftd_conf_prompt = "firepower(.*?)#" ftd_more_prompt = "<--- More --->" tnt_console_prompt = "Escape character is '^]'." OceanPrompt = [] OceanPrompt.append(fxos_shell_prompt) OceanPrompt.append(fxos_lina_prompt) OceanPrompt.append(ftd_raw_prompt) OceanPrompt.append(ftd_en_prompt) OceanPrompt.append(ftd_conf_prompt) OceanPrompt.append(fxos_user_prompt) OceanPrompt.append(fxos_passwd_prompt) asa_firstlogin = "Are you sure you want to continue connecting \(yes/no.." asa_raw = "ciscoasa>" asa_passwd = "Password:" asa_enable = "ciscoasa#" asa_config = "ciscoasa(.*?)#" asa_more = "<--- More --->" asa_question = "help improve the product? \[Y\]es, \[N\]o, \[A\]sk later:" asa_reload = "System config has been modified. Save\? \[Y\]es\/\[N\]o:" # asa_reload_confirm = "Proceed with reload? \[confirm\]" # asa_overwritten_confirm = "Do you want to over write? \[confirm\]" asa_confirm = "\[confirm\]" asa_copy_confirm = "\[.*?\]\?" asav_geneve_prompt = [] asav_geneve_prompt.append(asa_raw) asav_geneve_prompt.append(asa_passwd) asav_geneve_prompt.append(asa_enable) asav_geneve_prompt.append(asa_config) asav_geneve_prompt.append(asa_more) asav_geneve_prompt.append(asa_question) asav_geneve_prompt.append(asa_firstlogin) asav_geneve_prompt.append(asa_reload) asav_geneve_prompt.append(asa_confirm) asav_geneve_prompt.append(asa_copy_confirm) # asav_geneve_prompt.append(asa_overwritten_confirm) fxos_firstlogin = "Are you sure you want to continue connecting \(yes/no.." fxos_passwd_creation = "Enter new password:" fxos_passwd_confirm = "Confirm new password:" fxos_lina_prompt = "[^\w-]>" ftd_raw_prompt = "firepower>" ftd_passwd_prompt = "Password:" ftd_en_prompt = "firepower#" ftd_conf_prompt = "firepower\(.*?\)#" ftd_more_prompt = "<--- More --->" ftd_exp_admin = "admin@firepower:~\$" ftd_exp_root = "root@firepower:~#" ftdv_geneve_prompt = [] ftdv_geneve_prompt.append(fxos_lina_prompt) ftdv_geneve_prompt.append(ftd_raw_prompt) ftdv_geneve_prompt.append(ftd_passwd_prompt) ftdv_geneve_prompt.append(ftd_en_prompt) ftdv_geneve_prompt.append(ftd_conf_prompt) ftdv_geneve_prompt.append(ftd_more_prompt) ftdv_geneve_prompt.append(fxos_firstlogin) ftdv_geneve_prompt.append(fxos_passwd_creation) ftdv_geneve_prompt.append(fxos_passwd_confirm) ftdv_geneve_prompt.append(ftd_exp_admin) ftdv_geneve_prompt.append(ftd_exp_root) def force2fxos(ip, port): pass def force2asa(ip, port): pass def force2lina(ip, port): pass def go2fxos_telnet(tnt, res=None): while not res or "fxos_user_prompt" not in res: tnt.sendline("exit") tnt, res = Ocean_reply(tnt) tnt.sendline(fxos_user) tnt, res = Ocean_reply(tnt) tnt.sendline(fxos_passwd) tnt, res = Ocean_reply(tnt) if "fxos_shell_prompt" not in res: print("Error: Failed to Enter FXOS") return tnt, res def go2fxos(tnt, res=None, debug=False): if debug: print("enter fxos") tnt.sendline("") tnt, res, _ = Ocean_reply(tnt, debug=debug) num = 0 while "fxos_lina_prompt" not in res and "timeout" not in res: tnt.sendline("exit") tnt, res, _ = Ocean_reply(tnt, debug=debug) if num > 10: print_color("[ERROR][go2fxos]: 'EXIT' exceed the limit","red") sys.exit(1) num += 1 #weird behavor to avoid unnecessary exit for go2expert tnt.sendline("") tnt, res, _ = Ocean_reply(tnt, debug=debug) if debug: print("enter fxos done") return tnt, res def go2ftd(tnt, res=None, debug=False): if debug: print("enter ftd") tnt, res = go2fxos(tnt, debug=debug) tnt.sendline("system support diagnostic-cli") tnt, result, _ = Ocean_reply(tnt, debug=debug) tnt.sendline("") Ocean_reply(tnt, debug=debug) if debug: print("enter ftd done") return tnt, res def go2expert(tnt, res=None, debug=False): if debug: print("enter expert") tnt, res = go2fxos(tnt, debug=debug) tnt.sendline("expert") tnt, result, _ = Ocean_reply(tnt, debug=debug) tnt.sendline("") Ocean_reply(tnt, debug=debug) tnt.sendline("sudo su -") tnt, result, _ = Ocean_reply(tnt, debug=debug) tnt.sendline("") Ocean_reply(tnt, debug=debug) if debug: print("enter expert done") return tnt, res def go2lina(tnt, res=None): # exit to fxos # enter lina tnt, res = go2fxos(tnt, res) tnt.sendline("connect ftd") tnt, res = Ocean_reply(tnt) if "fxos_lina_prompt" not in res: print("Error: Failed to Enter Lina") return tnt, res def go2asa(tnt, res=None): # exit to fxos # enter lina # enter asa go2lina(tnt, res) tnt.sendline("system support diagnostic-cli") tnt, res = Ocean_reply(tnt) if "ftd_raw_prompt" not in res: print("Error: Failed to Enter ASA:ftd_raw_prompt") tnt.sendline("en") tnt, res = Ocean_reply(tnt) tnt.sendline("") tnt, res = Ocean_reply(tnt) if "ftd_en_prompt" not in res: print("Error: Failed to Enter ASA:ftd_en_prompt") return tnt, res def console_clear(ip, port): tnt = pexpect.spawn('telnet {ip} {port}'.format(ip=ip, port=port)) tnt, result = Console_reply(tnt) # print(result) tnt.sendline("clear line 27") tnt.expect(["\[confirm\]"], timeout=5) tnt.sendline("") tnt.expect(["\[OK\]"], timeout=5) tnt.close() def Conn_reply(tnt): try: result = tnt.expect([pexpect.TIMEOUT, tnt_console_prompt], timeout=5) tnt.logfile = None if result == 0: res = "0:Conn_reply: failure(timeout)!" return tnt, res elif result == 1: tnt.sendline("\n") return Ocean_reply(tnt) except Exception as e: print(e) res = "Error:Conn_reply: failure(Exception)" return None, res def Geneve_load(tnt, fileName, timeout=5, debug=False): if not os.path.exists(fileName): return None with open(fileName, "r") as f: for line in f: line = line.strip() # print("debug::", line) tnt.sendline(line) tnt, result, cont = Geneve_reply(tnt, debug=debug, timeout=timeout) def Ocean_load(tnt, fileName, timeout=5, debug=False): if not os.path.exists(fileName): return None with open(fileName, "r") as f: for line in f: line = line.strip() # print("debug::", line) tnt.sendline(line) tnt, result, cont = Ocean_reply(tnt, debug=debug, timeout=timeout) def Geneve_reply(tnt, timeout=120, debug=False): try: start_time = time.time() result = tnt.expect([pexpect.TIMEOUT] + asav_geneve_prompt, timeout=timeout) tnt.logfile = None if result == 0: res = "0:Geneve_reply: failure(timeout)!" content = tnt.before.decode() #content = str(tnt.before)+str(tnt.after) if debug: print(res); print(content) elif result == 1: res = "1:Geneve_reply: success(asa_raw)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) elif result == 2: res = "2:Geneve_reply: success(asa_passwd)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) tnt.sendline("cisco") _, _, tmp_content = Geneve_reply(tnt, timeout=timeout, debug=debug) content += "\n" + tmp_content elif result == 3: res = "3:Geneve_reply: success(asa_enable)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) elif result == 4: res = "4:Geneve_reply: success(asa_config)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) elif result == 5: res = "5:Geneve_reply: success(asa_more)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) tnt.sendline(" ") _, _, tmp_content = Geneve_reply(tnt, timeout=timeout, debug=debug) content += "\n" + tmp_content elif result == 6: res = "6:Geneve_reply: success(asa_question)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) tnt.sendline("n") _, _, tmp_content = Geneve_reply(tnt, timeout=timeout, debug=debug) content += "\n" + tmp_content elif result == 7: res = "7:Geneve_reply: success(asa_firstlogin)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) tnt.sendline("yes") _, _, tmp_content = Geneve_reply(tnt, timeout=timeout, debug=debug) content += "\n" + tmp_content elif result == 8: res = "8:Geneve_reply: success(asa_reload)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) tnt.sendline("Y") _, _, tmp_content = Geneve_reply(tnt, timeout=timeout, debug=debug) content += "\n" + tmp_content elif result == 9: res = "9:Geneve_reply: success(asa_confirm)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) tnt.sendline("") _, _, tmp_content = Geneve_reply(tnt, timeout=timeout, debug=debug) content += "\n" + tmp_content elif result == 10: res = "10:Geneve_reply: success(asa_copy_confirm)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() #content = str(tnt.before)+str(tnt.after) tnt.sendline("") _, _, tmp_content = Geneve_reply(tnt, timeout=timeout, debug=debug) content += "\n" + tmp_content else: res = "{result}:Geneve_reply: failure(unknown)".format(result=result) content = None end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") return tnt, res, content except Exception as e: print(e) res = "Error:Geneve_reply: failure(Exception)" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") return None, res, None def Ocean_reply(tnt, timeout=30, debug=False): try: start_time = time.time() result = tnt.expect([pexpect.TIMEOUT] + ftdv_geneve_prompt, timeout=timeout) tnt.logfile = None if result == 0: res = "0:Ocean_reply: failure(timeout)!" content = tnt.before.decode() if debug: print(res);print(content) elif result == 1: res = "1:Ocean_reply: success(fxos_lina_prompt)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() elif result == 2: res = "2:Ocean_reply: success(ftd_raw_prompt)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() elif result == 3: res = "3:Ocean_reply: success(ftd_passwd_prompt)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() if "Sorry, try again" in content: tnt.sendline("Cisco123!@#") #expert mode else: tnt.sendline("") #ftd en mode _, _, tmp_content = Ocean_reply(tnt, timeout=timeout, debug=debug) content += "\n" + tmp_content elif result == 4: res = "4:Ocean_reply: success(ftd_en_prompt)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() elif result == 5: res = "5:Ocean_reply: success(ftd_conf_prompt)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() elif result == 6: res = "6:Ocean_reply: success(ftd_more_prompt)!" end_time = time.time() gap = round(end_time - start_time,2) if debug: print(f"{res} # cost {gap}s") content = tnt.before.decode()+tnt.after.decode() tnt.sendline(" ") _, _,
clustal, stockholm, phylip and many others. The full list of supported fileformat arguments is `provided here <https://biopython.org/wiki/AlignIO>`_). Parameters ---------- fileformat : str, default='fasta' text format requested to_file : str | TextIO, optional filename or buffer to write into. If not specified, the output is returned instead Returns ------- str String representation of alignment in the requested format Examples -------- >>> ali=Alignment([ ('seq1 this is first', 'ATTCG-'), ('seq2 this is 2nd' , '--TTGG'), ('seq3', 'ATTCG-')]) >>> print(ali.write('phylip')) 3 6 seq1 ATTCG- seq2 --TTGG seq3 ATTCG- <BLANKLINE> """ if not to_file is None: with (to_file if not isinstance(to_file, str) else open(to_file, 'w')) as fh: fh.write( self.write(fileformat=fileformat) ) else: if fileformat=='fasta': return self.fasta() else: return format(self.to_biopython(), fileformat) def __add__(self, other): return self.concatenate(other) def concatenate(self, other): """Concatenate two alignments, i.e., add their sequences one next to the other The two alignments must have the same names in the same order or an AlignmentError exception is raised. The sequence descriptions in returned alignment are taken from self Parameters ---------- other : Alignment or str alignment that will be concatenated to the right of the self in the returned Alignment. If a string is provided, this same sequence is added to each sequence in self Returns ------- Alignment alignment with same names as inputs, and sequences resulting from their concatenation Examples -------- >>> ali= Alignment([ ('seq1 first', 'ATTCG-'), ('seq2 this is 2nd' , '--TTGG'), ('seq3', 'ATTCG-')]) >>> ali2=Alignment([ ('seq1 first', 'TTGC-TAG'), ('seq2 this is 2nd' , '-ATGGGGC'), ('seq3', 'AATCGGCC')]) >>> ali.concatenate(ali2) # Alignment of 3 sequences and 14 positions ATTCG-TTGC-TAG seq1 --TTGG-ATGGGGC seq2 ATTCG-AATCGGCC seq3 <BLANKLINE> Note that descriptions in the second alignment are ignored: >>> ali3= Alignment([ ('seq1 this desc is ignored', 'TTGC-TAG'), ('seq2' , '-ATGGGGC'), ('seq3 this also', 'AATCGGCC')]) >>> print( ali.concatenate(ali3).fasta() ) >seq1 first ATTCG-TTGC-TAG >seq2 this is 2nd --TTGG-ATGGGGC >seq3 ATTCG-AATCGGCC <BLANKLINE> """ if type(other) is str: a=Alignment() for name, seq in self: a.add_seq(name, seq + other, desc=self.get_desc(name)) return a else: if self.names() != other.names(): raise AlignmentError(f'concatenate ERROR the two alignments must have the same sequence names!') a=Alignment() for name, seq in self: a.add_seq(name, seq + other.get_seq(name), desc=self.get_desc(name)) return a def copy(self): """Returns a copy of the alignment Returns ------- Alignment copy of the self alignment """ return self.__class__(self) def remove_by_name(self, *names): """Remove one or more sequences in the alignment by name in-place. Note that the modification is done in place. To obtain a new object instead, see examples below. Parameters ---------- *names : tuple name or names of sequences to be removed from the alignment Returns ------- None None Examples -------- >>> ali= Alignment([ ('seq1 first', 'ATTCG-'), ('seq2 this is 2nd' , '--TTGG'), ('seq3', 'ATTCG-')]) >>> ali.remove_by_name('seq1') >>> ali # Alignment of 2 sequences and 6 positions --TTGG seq2 ATTCG- seq3 <BLANKLINE> >>> ali.remove_by_name('seq2', 'seq3') >>> ali # Empty alignment To return a new alignment without certain names, do not use this function. Instead, use indexing by rows: >>> ali= Alignment([ ('seq1 first', 'ATTCG-'), ('seq2 this is 2nd' , '--TTGG'), ('seq3', 'ATTCG-')]) >>> names_to_omit=set( ['seq2'] ) >>> ali[ [n for n in ali.names() if not n in names_to_omit], :] # Alignment of 2 sequences and 6 positions ATTCG- seq1 ATTCG- seq3 <BLANKLINE> See Also -------- remove_by_index """ for name in names: if not name in self._seqs: raise AlignmentError(f'remove_by_seq ERROR alignment does not have {name}') del self._seqs[name] del self._desc[name] s=set(names) for i in range(len( self._ord )-1, -1, -1): if self._ord[i] in s: self._ord.pop(i) def remove_by_index(self, *seqindices): """Remove one or more sequences in the alignment by their index, in-place. The input indices refer to the position of the sequence in the alignment, i.e. their row number. Note that the modification is done in place. To obtain a new object instead, see examples below. Parameters ---------- *seqindices : tuple index or indices of sequences to be removed from the alignment Returns ------- None None Examples -------- >>> ali= Alignment([ ('seq1 first', 'ATTCG-'), ('seq2 this is 2nd' , '--TTGG'), ('seq3', 'ATTCG-')]) >>> ali.remove_by_index(0) >>> ali # Alignment of 2 sequences and 6 positions --TTGG seq2 ATTCG- seq3 <BLANKLINE> To return a new alignment without certain sequences, do not use this function. Instead, use indexing by rows: >>> ali= Alignment([ ('seq1 first', 'ATTCG-'), ('seq2 this is 2nd' , '--TTGG'), ('seq3', 'ATTCG-')]) >>> indices_to_omit=set( [0, 2] ) >>> ali[ [n for i,n in enumerate(ali.names()) if not i in indices_to_omit], :] # Alignment of 1 sequences and 6 positions --TTGG seq2 <BLANKLINE> See Also -------- remove_by_name """ for i in sorted(seqindices, reverse=True): name=self._ord.pop(i) del self._seqs[name] del self._desc[name] def remove_empty_seqs(self, inplace=True): """Remove all sequences which are entirely made of gaps or that are empty. By default, removal is done in place. Parameters ---------- inplace : bool, default:True whether the removal should be done in place. If not, a new Alignment is returned instead Returns ------- None or Alignment If inplace==True, None is returned; otherwise, a new Alignment without empty sequences is returned Examples -------- >>> ali= Alignment([ ('seq1 first', 'ATTCG-'), ('seq2 this is 2nd' , '--TTGG'), ('seq3', '------')]) >>> ali.remove_empty_seqs() >>> ali # Alignment of 2 sequences and 6 positions ATTCG- seq1 --TTGG seq2 <BLANKLINE> See Also -------- trim_gaps, remove_by_name, remove_by_index """ # boolean array, True when all gaps selector=np.char.equal( np.array( [np.array(seq, dtype=np.str_) for name, seq in self], dtype=np.str_ ), '-'*self.ali_length()) if inplace: empty_seq_names=np.array(self.names())[ selector ] self.remove_by_name(*empty_seq_names) else: return self[np.array(self.names())[~selector],:] # def pop(self, index): # name=self._ord.pop(i) # seq=self._seqs[name] # desc=self._desc[name] # return( name, seq, desc ) def to_biopython(self): """Returns a copy of the alignment as a Bio.Align.MultipleSeqAlignment object The SeqRecord instances in the returned MultipleSeqAlignment has their id and name attributes set to sequence names, and also possess the description attribute. Returns ------- MultipleSeqAlignment Alignment in biopython format (Bio.Align.MultipleSeqAlignment) See also -------- to_numpy, to_pandas """ return MultipleSeqAlignment( [SeqRecord( Seq(seq), id=name, name=name, description=self.get_desc(name) ) for name, seq in self]) @classmethod def from_numpy(cls, nparray, names, descriptions=None): """Class method to instance an Alignment object from a numpy array. Parameters ---------- nparray : np.ndarray analogous to object returned by Alignment.to_numpy(), it must have one row per sequence, and one column per alignment position. Its dtype must be is np.str\_ names : list of str ordered list of sequence names (i.e. identifiers) descriptions : list of str, optional ordered list of sequence description. If not provided, all descriptions are set to '' Returns ------- Alignment new alignment object See also -------- to_numpy """ sequences=np.apply_along_axis( ''.join, 1, nparray) out=Alignment() for i, n in enumerate(names): out.add_seq(n, sequences[i], desc='' if descriptions is None else descriptions[i]) return out @lru_cache(maxsize=max_cache_size) def to_numpy(self): """Returns a numpy 2-D array representation of the alignment, useful for vectorized sequence methods Returns ------- np.ndarray The returned array has one row per sequence and one column per alignment position. Each value is a single character. The dtype is np.str\_ Note that rows are not indexed by sequence names, just by their order index Examples -------- >>> ali= Alignment([ ('seq1 first', 'ATTCG-'), ('seq2 this is 2nd' , '--TTGG'), ('seq3', '--TT--')]) >>> print(ali.to_numpy()) [['A' 'T' 'T' 'C' 'G' '-'] ['-' '-' 'T' 'T' 'G' 'G'] ['-' '-' 'T' 'T' '-' '-']] See Also -------- to_biopython, to_pandas Warning ------ This function is cached for best performance. Thus, do not directly modify the returned object. The hash key for caching is derived from sequences only: names are not considered. """ return np.array( [np.array(list(seq), dtype=np.str_) for name, seq in self], dtype=np.str_ ) def to_pandas(self, use_names=False): """Returns a pandas DataFrame representation of the alignment Parameters ---------- use_names : bool, optional Normally, the returned DataFrame has a simply RangeIndex as index. Specify this to instead use sequence names as the index. Returns ------- pd.DataFrame The returned
would mean # that this filter would do nothing, so assume that this # is really a configuration error. assert items_matching, 'rank_features: missing or empty item match dict' assert rank_key, 'rank_features: missing or empty rank key' if zoom < start_zoom: return None layer = _find_layer(feature_layers, source_layer) if layer is None: return None count = 0 for shape, props, fid in layer['features']: if (_match_props(props, items_matching) and unpadded_bounds_shp.intersects(shape)): count += 1 props[rank_key] = count return layer def normalize_aerialways(shape, props, fid, zoom): aerialway = props.get('aerialway') # normalise cableway, apparently a deprecated # value. if aerialway == 'cableway': props['aerialway'] = 'zip_line' # 'yes' is a pretty unhelpful value, so normalise # to a slightly more meaningful 'unknown', which # is also a commonly-used value. if aerialway == 'yes': props['aerialway'] = 'unknown' return shape, props, fid def numeric_min_filter(ctx): """ Keep only features which have properties equal or greater than the configured minima. These are in a dict per zoom like this: { 15: { 'area': 1000 }, 16: { 'area': 2000 } } This would mean that at zooms 15 and 16, the filter was active. At other zooms it would do nothing. Multiple filters can be given for a single zoom. The `mode` parameter can be set to 'any' to require that only one of the filters needs to match, or any other value to use the default 'all', which requires all filters to match. """ feature_layers = ctx.feature_layers zoom = ctx.nominal_zoom source_layer = ctx.params.get('source_layer') assert source_layer, 'rank_features: missing source layer' filters = ctx.params.get('filters') mode = ctx.params.get('mode') # assume missing filter is a config error. assert filters, 'numeric_min_filter: missing or empty filters dict' # get the minimum filters for this zoom, and return if # there are none to apply. minima = filters.get(zoom) if not minima: return None layer = _find_layer(feature_layers, source_layer) if layer is None: return None # choose whether all minima have to be met, or just # one of them. aggregate_func = all if mode == 'any': aggregate_func = any new_features = [] for shape, props, fid in layer['features']: keep = [] for prop, min_val in minima.iteritems(): val = props.get(prop) keep.append(val >= min_val) if aggregate_func(keep): new_features.append((shape, props, fid)) layer['features'] = new_features return layer def copy_features(ctx): """ Copy features matching _both_ the `where` selection and the `geometry_types` list to another layer. If the target layer doesn't exist, it is created. """ feature_layers = ctx.feature_layers source_layer = ctx.params.get('source_layer') target_layer = ctx.params.get('target_layer') where = ctx.params.get('where') geometry_types = ctx.params.get('geometry_types') assert source_layer, 'copy_features: source layer not configured' assert target_layer, 'copy_features: target layer not configured' assert where, \ ('copy_features: you must specify how to match features in the where ' 'parameter') assert geometry_types, \ ('copy_features: you must specify at least one type of geometry in ' 'geometry_types') src_layer = _find_layer(feature_layers, source_layer) if src_layer is None: return None tgt_layer = _find_layer(feature_layers, target_layer) if tgt_layer is None: # create target layer if it doesn't already exist. tgt_layer_datum = src_layer['layer_datum'].copy() tgt_layer_datum['name'] = target_layer tgt_layer = src_layer.copy() tgt_layer['name'] = target_layer tgt_layer['features'] = [] tgt_layer['layer_datum'] = tgt_layer_datum new_features = [] for feature in src_layer['features']: shape, props, fid = feature if _match_props(props, where): # need to deep copy, otherwise we could have some # unintended side effects if either layer is # mutated later on. shape_copy = shape.__class__(shape) new_features.append((shape_copy, props.copy(), fid)) tgt_layer['features'].extend(new_features) return tgt_layer def make_representative_point(shape, properties, fid, zoom): """ Replaces the geometry of each feature with its representative point. This is a point which should be within the interior of the geometry, which can be important for labelling concave or doughnut-shaped polygons. """ label_placement_wkb = properties.get('mz_label_placement', None) if label_placement_wkb: shape = shapely.wkb.loads(label_placement_wkb) else: shape = shape.representative_point() return shape, properties, fid def add_iata_code_to_airports(shape, properties, fid, zoom): """ If the feature is an airport, and it has a 3-character IATA code in its tags, then move that code to its properties. """ kind = properties.get('kind') if kind not in ('aerodrome', 'airport'): return shape, properties, fid tags = properties.get('tags') if not tags: return shape, properties, fid iata_code = tags.get('iata') if not iata_code: return shape, properties, fid # IATA codes should be uppercase, and most are, but there # might be some in lowercase, so just normalise to upper # here. iata_code = iata_code.upper() if iata_short_code_pattern.match(iata_code): properties['iata'] = iata_code return shape, properties, fid def add_uic_ref(shape, properties, fid, zoom): """ If the feature has a valid uic_ref tag (7 integers), then move it to its properties. """ tags = properties.get('tags') if not tags: return shape, properties, fid uic_ref = tags.get('uic_ref') if not uic_ref: return shape, properties, fid uic_ref = uic_ref.strip() if len(uic_ref) != 7: return shape, properties, fid try: uic_ref_int = int(uic_ref) except ValueError: return shape, properties, fid else: properties['uic_ref'] = uic_ref_int return shape, properties, fid def _freeze(thing): """ Freezes something to a hashable item. """ if isinstance(thing, dict): return frozenset([(_freeze(k), _freeze(v)) for k, v in thing.items()]) elif isinstance(thing, list): return tuple([_freeze(i) for i in thing]) return thing def _thaw(thing): """ Reverse of the freeze operation. """ if isinstance(thing, frozenset): return dict([_thaw(i) for i in thing]) elif isinstance(thing, tuple): return list([_thaw(i) for i in thing]) return thing def quantize_val(val, step): # special case: if val is very small, we don't want it rounding to zero, so # round the smallest values up to the first step. if val < step: return int(step) result = int(step * round(val / float(step))) return result def quantize_height_round_nearest_5_meters(height): return quantize_val(height, 5) def quantize_height_round_nearest_10_meters(height): return quantize_val(height, 10) def quantize_height_round_nearest_20_meters(height): return quantize_val(height, 20) def quantize_height_round_nearest_meter(height): return round(height) def _merge_lines(linestring_shapes, _unused_tolerance): list_of_linestrings = [] for shape in linestring_shapes: list_of_linestrings.extend(_flatten_geoms(shape)) # if the list of linestrings is empty, return None. this avoids generating # an empty GeometryCollection, which causes problems further down the line, # usually while formatting the tile. if not list_of_linestrings: return [] multi = MultiLineString(list_of_linestrings) result = _linemerge(multi) return [result] def _drop_small_inners_multi(shape, area_tolerance): """ Drop inner rings (holes) of the given shape which are smaller than the area tolerance. The shape must be either a Polygon or MultiPolygon. Returns a shape which may be empty. """ from shapely.geometry import MultiPolygon if shape.geom_type == 'Polygon': shape = _drop_small_inners(shape, area_tolerance) elif shape.geom_type == 'MultiPolygon': multi = [] for poly in shape: new_poly = _drop_small_inners(poly, area_tolerance) if not new_poly.is_empty: multi.append(new_poly) shape = MultiPolygon(multi) else: shape = MultiPolygon([]) return shape def _drop_small_outers_multi(shape, area_tolerance): """ Drop individual polygons which are smaller than the area tolerance. Input can be a single Polygon or MultiPolygon, in which case each Polygon within the MultiPolygon will be compared to the area tolerance individually. Returns a shape, which may be empty. """ from shapely.geometry import MultiPolygon if shape.geom_type == 'Polygon': if shape.area < area_tolerance: shape = MultiPolygon([]) elif shape.geom_type == 'MultiPolygon': multi = [] for poly in shape: if poly.area >= area_tolerance: multi.append(poly) shape = MultiPolygon(multi) else: shape = MultiPolygon([]) return shape def _merge_polygons(polygon_shapes, tolerance): """ Merge a list of polygons together into a single shape. Returns list of shapes, which might be empty. """ list_of_polys = [] for shape in polygon_shapes: list_of_polys.extend(_flatten_geoms(shape)) # if the list of polygons is empty, return None. this avoids generating an # empty GeometryCollection, which causes problems further down the line, # usually while formatting the tile. if not list_of_polys: return [] # first, try to merge the polygons as they are. try: result = shapely.ops.unary_union(list_of_polys) return [result] except ValueError: pass # however, this can lead to numerical instability where polygons _almost_ # touch, so sometimes buffering them outwards a little bit can help. try: from shapely.geometry import JOIN_STYLE # don't buffer by the full pixel, instead choose a smaller value that # shouldn't be noticable. buffer_size = tolerance / 16.0 list_of_buffered = [ p.buffer(buffer_size, join_style=JOIN_STYLE.mitre, mitre_limit=1.5) for p in list_of_polys ] result = shapely.ops.unary_union(list_of_buffered) return [result] except ValueError: pass # ultimately, if it's not possible to merge them then bail. # TODO: when we get a logger in here, let's log a big FAIL message. return [] def _merge_polygons_with_buffer(polygon_shapes, tolerance): """ Merges polygons together
(1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w * 1j * t_values[29])) + (R31 / (1 + w * 1j * t_values[30])) + (R32 / (1 + w * 1j * t_values[31])) + (R33 / (1 + w * 1j * t_values[32])) + (R34 / (1 + w * 1j * t_values[33])) + (R35 / (1 + w * 1j * t_values[34])) + (R36 / (1 + w * 1j * t_values[35])) + (R37 / (1 + w * 1j * t_values[36])) + (R38 / (1 + w * 1j * t_values[37])) + (R39 / (1 + w * 1j * t_values[38])) + (R40 / (1 + w * 1j * t_values[39])) + (R41 / (1 + w * 1j * t_values[40])) + (R42 / (1 + w * 1j * t_values[41])) + (R43 / (1 + w * 1j * t_values[42])) + (R44 / (1 + w * 1j * t_values[43])) + (R45 / (1 + w * 1j * t_values[44])) + (R46 / (1 + w * 1j * t_values[45])) + (R47 / (1 + w * 1j * t_values[46])) + (R48 / (1 + w * 1j * t_values[47])) + (R49 / (1 + w * 1j * t_values[48])) + (R50 / (1 + w * 1j * t_values[49])) + (R51 / (1 + w * 1j * t_values[50])) + (R52 / (1 + w * 1j * t_values[51])) + (R53 / (1 + w * 1j * t_values[52])) + (R54 / (1 + w * 1j * t_values[53])) + (R55 / (1 + w * 1j * t_values[54])) + (R56 / (1 + w * 1j * t_values[55])) + (R57 / (1 + w * 1j * t_values[56])) + (R58 / (1 + w * 1j * t_values[57])) + (R59 / (1 + w * 1j * t_values[58])) + (R60 / (1 + w * 1j * t_values[59])) + (R61 / (1 + w * 1j * t_values[60])) + (R62 / (1 + w * 1j * t_values[61])) + (R63 / (1 + w * 1j * t_values[62])) + (R64 / (1 + w * 1j * t_values[63])) + (R65 / (1 + w * 1j * t_values[64])) + (R66 / (1 + w * 1j * t_values[65])) + (R67 / (1 + w * 1j * t_values[66])) + (R68 / (1 + w * 1j * t_values[67])) + (R69 / (1 + w * 1j * t_values[68])) + (R70 / (1 + w * 1j * t_values[69])) + (R71 / (1 + w * 1j * t_values[70])) + (R72 / (1 + w * 1j * t_values[71])) + (R73 / (1 + w * 1j * t_values[72])) + (R74 / (1 + w * 1j * t_values[73])) + (R75 / (1 + w * 1j * t_values[74])) ) def KK_RC76_fit(params, w, t_values): """ Kramers-Kronig Function: -RC- <NAME> (<EMAIL> / <EMAIL>) """ Rs = params["Rs"] R1 = params["R1"] R2 = params["R2"] R3 = params["R3"] R4 = params["R4"] R5 = params["R5"] R6 = params["R6"] R7 = params["R7"] R8 = params["R8"] R9 = params["R9"] R10 = params["R10"] R11 = params["R11"] R12 = params["R12"] R13 = params["R13"] R14 = params["R14"] R15 = params["R15"] R16 = params["R16"] R17 = params["R17"] R18 = params["R18"] R19 = params["R19"] R20 = params["R20"] R21 = params["R21"] R22 = params["R22"] R23 = params["R23"] R24 = params["R24"] R25 = params["R25"] R26 = params["R26"] R27 = params["R27"] R28 = params["R28"] R29 = params["R29"] R30 = params["R30"] R31 = params["R31"] R32 = params["R32"] R33 = params["R33"] R34 = params["R34"] R35 = params["R35"] R36 = params["R36"] R37 = params["R37"] R38 = params["R38"] R39 = params["R39"] R40 = params["R40"] R41 = params["R41"] R42 = params["R42"] R43 = params["R43"] R44 = params["R44"] R45 = params["R45"] R46 = params["R46"] R47 = params["R47"] R48 = params["R48"] R49 = params["R49"] R50 = params["R50"] R51 = params["R51"] R52 = params["R52"] R53 = params["R53"] R54 = params["R54"] R55 = params["R55"] R56 = params["R56"] R57 = params["R57"] R58 = params["R58"] R59 = params["R59"] R60 = params["R60"] R61 = params["R61"] R62 = params["R62"] R63 = params["R63"] R64 = params["R64"] R65 = params["R65"] R66 = params["R66"] R67 = params["R67"] R68 = params["R68"] R69 = params["R69"] R70 = params["R70"] R71 = params["R71"] R72 = params["R72"] R73 = params["R73"] R74 = params["R74"] R75 = params["R75"] R76 = params["R76"] return ( Rs + (R1 / (1 + w * 1j * t_values[0])) + (R2 / (1 + w * 1j * t_values[1])) + (R3 / (1 + w * 1j * t_values[2])) + (R4 / (1 + w * 1j * t_values[3])) + (R5 / (1 + w * 1j * t_values[4])) + (R6 / (1 + w * 1j * t_values[5])) + (R7 / (1 + w * 1j * t_values[6])) + (R8 / (1 + w * 1j * t_values[7])) + (R9 / (1 + w * 1j * t_values[8])) + (R10 / (1 + w * 1j * t_values[9])) + (R11 / (1 + w * 1j * t_values[10])) + (R12 / (1 + w * 1j * t_values[11])) + (R13 / (1 + w * 1j * t_values[12])) + (R14 / (1 + w * 1j * t_values[13])) + (R15 / (1 + w * 1j * t_values[14])) + (R16 / (1 + w * 1j * t_values[15])) + (R17 / (1 + w * 1j * t_values[16])) + (R18 / (1 + w * 1j * t_values[17])) + (R19 / (1 + w * 1j * t_values[18])) + (R20 / (1 + w * 1j * t_values[19])) + (R21 / (1 + w * 1j * t_values[20])) + (R22 / (1 + w * 1j * t_values[21])) + (R23 / (1 + w * 1j * t_values[22])) + (R24 / (1 + w * 1j * t_values[23])) + (R25 / (1 + w * 1j * t_values[24])) + (R26 / (1 + w * 1j * t_values[25])) + (R27 / (1 + w * 1j * t_values[26])) + (R28 / (1 + w * 1j * t_values[27])) + (R29 / (1 + w * 1j * t_values[28])) + (R30 / (1 + w
+ m.b214 <= 1) m.c159 = Constraint(expr= - 0.9*m.x71 + m.x95 + m.b215 <= 1) m.c160 = Constraint(expr= - 0.9*m.x72 + m.x96 + m.b216 <= 1) m.c161 = Constraint(expr= - 0.9*m.x73 + m.x97 + m.b217 <= 1) m.c162 = Constraint(expr= - 0.9*m.x70 + m.x94 - m.b214 >= -1) m.c163 = Constraint(expr= - 0.9*m.x71 + m.x95 - m.b215 >= -1) m.c164 = Constraint(expr= - 0.9*m.x72 + m.x96 - m.b216 >= -1) m.c165 = Constraint(expr= - 0.9*m.x73 + m.x97 - m.b217 >= -1) m.c166 = Constraint(expr= m.x70 - 15*m.b214 <= 0) m.c167 = Constraint(expr= m.x71 - 15*m.b215 <= 0) m.c168 = Constraint(expr= m.x72 - 15*m.b216 <= 0) m.c169 = Constraint(expr= m.x73 - 15*m.b217 <= 0) m.c170 = Constraint(expr= m.x94 - 13.5*m.b214 <= 0) m.c171 = Constraint(expr= m.x95 - 13.5*m.b215 <= 0) m.c172 = Constraint(expr= m.x96 - 13.5*m.b216 <= 0) m.c173 = Constraint(expr= m.x97 - 13.5*m.b217 <= 0) m.c174 = Constraint(expr= - 0.6*m.x74 + m.x98 + m.b218 <= 1) m.c175 = Constraint(expr= - 0.6*m.x75 + m.x99 + m.b219 <= 1) m.c176 = Constraint(expr= - 0.6*m.x76 + m.x100 + m.b220 <= 1) m.c177 = Constraint(expr= - 0.6*m.x77 + m.x101 + m.b221 <= 1) m.c178 = Constraint(expr= - 0.6*m.x74 + m.x98 - m.b218 >= -1) m.c179 = Constraint(expr= - 0.6*m.x75 + m.x99 - m.b219 >= -1) m.c180 = Constraint(expr= - 0.6*m.x76 + m.x100 - m.b220 >= -1) m.c181 = Constraint(expr= - 0.6*m.x77 + m.x101 - m.b221 >= -1) m.c182 = Constraint(expr= m.x74 - 15*m.b218 <= 0) m.c183 = Constraint(expr= m.x75 - 15*m.b219 <= 0) m.c184 = Constraint(expr= m.x76 - 15*m.b220 <= 0) m.c185 = Constraint(expr= m.x77 - 15*m.b221 <= 0) m.c186 = Constraint(expr= m.x98 - 9*m.b218 <= 0) m.c187 = Constraint(expr= m.x99 - 9*m.b219 <= 0) m.c188 = Constraint(expr= m.x100 - 9*m.b220 <= 0) m.c189 = Constraint(expr= m.x101 - 9*m.b221 <= 0) m.c190 = Constraint(expr=-1.1*log(1 + m.x78) + m.x102 + m.b222 <= 1) m.c191 = Constraint(expr=-1.1*log(1 + m.x79) + m.x103 + m.b223 <= 1) m.c192 = Constraint(expr=-1.1*log(1 + m.x80) + m.x104 + m.b224 <= 1) m.c193 = Constraint(expr=-1.1*log(1 + m.x81) + m.x105 + m.b225 <= 1) m.c194 = Constraint(expr= m.x78 - 15*m.b222 <= 0) m.c195 = Constraint(expr= m.x79 - 15*m.b223 <= 0) m.c196 = Constraint(expr= m.x80 - 15*m.b224 <= 0) m.c197 = Constraint(expr= m.x81 - 15*m.b225 <= 0) m.c198 = Constraint(expr= m.x102 - 3.04984759446376*m.b222 <= 0) m.c199 = Constraint(expr= m.x103 - 3.04984759446376*m.b223 <= 0) m.c200 = Constraint(expr= m.x104 - 3.04984759446376*m.b224 <= 0) m.c201 = Constraint(expr= m.x105 - 3.04984759446376*m.b225 <= 0) m.c202 = Constraint(expr= - 0.9*m.x82 + m.x146 + m.b226 <= 1) m.c203 = Constraint(expr= - 0.9*m.x83 + m.x147 + m.b227 <= 1) m.c204 = Constraint(expr= - 0.9*m.x84 + m.x148 + m.b228 <= 1) m.c205 = Constraint(expr= - 0.9*m.x85 + m.x149 + m.b229 <= 1) m.c206 = Constraint(expr= - 0.9*m.x82 + m.x146 - m.b226 >= -1) m.c207 = Constraint(expr= - 0.9*m.x83 + m.x147 - m.b227 >= -1) m.c208 = Constraint(expr= - 0.9*m.x84 + m.x148 - m.b228 >= -1) m.c209 = Constraint(expr= - 0.9*m.x85 + m.x149 - m.b229 >= -1) m.c210 = Constraint(expr= - m.x114 + m.x146 + m.b226 <= 1) m.c211 = Constraint(expr= - m.x115 + m.x147 + m.b227 <= 1) m.c212 = Constraint(expr= - m.x116 + m.x148 + m.b228 <= 1) m.c213 = Constraint(expr= - m.x117 + m.x149 + m.b229 <= 1) m.c214 = Constraint(expr= - m.x114 + m.x146 - m.b226 >= -1) m.c215 = Constraint(expr= - m.x115 + m.x147 - m.b227 >= -1) m.c216 = Constraint(expr= - m.x116 + m.x148 - m.b228 >= -1) m.c217 = Constraint(expr= - m.x117 + m.x149 - m.b229 >= -1) m.c218 = Constraint(expr= m.x82 - 1.83548069293539*m.b226 <= 0) m.c219 = Constraint(expr= m.x83 - 1.83548069293539*m.b227 <= 0) m.c220 = Constraint(expr= m.x84 - 1.83548069293539*m.b228 <= 0) m.c221 = Constraint(expr= m.x85 - 1.83548069293539*m.b229 <= 0) m.c222 = Constraint(expr= m.x114 - 20*m.b226 <= 0) m.c223 = Constraint(expr= m.x115 - 20*m.b227 <= 0) m.c224 = Constraint(expr= m.x116 - 20*m.b228 <= 0) m.c225 = Constraint(expr= m.x117 - 20*m.b229 <= 0) m.c226 = Constraint(expr= m.x146 - 20*m.b226 <= 0) m.c227 = Constraint(expr= m.x147 - 20*m.b227 <= 0) m.c228 = Constraint(expr= m.x148 - 20*m.b228 <= 0) m.c229 = Constraint(expr= m.x149 - 20*m.b229 <= 0) m.c230 = Constraint(expr=-log(1 + m.x86) + m.x150 + m.b230 <= 1) m.c231 = Constraint(expr=-log(1 + m.x87) + m.x151 + m.b231 <= 1) m.c232 = Constraint(expr=-log(1 + m.x88) + m.x152 + m.b232 <= 1) m.c233 = Constraint(expr=-log(1 + m.x89) + m.x153 + m.b233 <= 1) m.c234 = Constraint(expr= m.x86 - 1.32154609891348*m.b230 <= 0) m.c235 = Constraint(expr= m.x87 - 1.32154609891348*m.b231 <= 0) m.c236 = Constraint(expr= m.x88 - 1.32154609891348*m.b232 <= 0) m.c237 = Constraint(expr= m.x89 - 1.32154609891348*m.b233 <= 0) m.c238 = Constraint(expr= m.x150 - 0.842233385663186*m.b230 <= 0) m.c239 = Constraint(expr= m.x151 - 0.842233385663186*m.b231 <= 0) m.c240 = Constraint(expr= m.x152 - 0.842233385663186*m.b232 <= 0) m.c241 = Constraint(expr= m.x153 - 0.842233385663186*m.b233 <= 0) m.c242 = Constraint(expr=-0.7*log(1 + m.x106) + m.x154 + m.b234 <= 1) m.c243 = Constraint(expr=-0.7*log(1 + m.x107) + m.x155 + m.b235 <= 1) m.c244 = Constraint(expr=-0.7*log(1 + m.x108) + m.x156 + m.b236 <= 1) m.c245 = Constraint(expr=-0.7*log(1 + m.x109) + m.x157 + m.b237 <= 1) m.c246 = Constraint(expr= m.x106 - 1.26558121681553*m.b234 <= 0) m.c247 = Constraint(expr= m.x107 - 1.26558121681553*m.b235 <= 0) m.c248 = Constraint(expr= m.x108 - 1.26558121681553*m.b236 <= 0) m.c249 = Constraint(expr= m.x109 - 1.26558121681553*m.b237 <= 0) m.c250 = Constraint(expr= m.x154 - 0.572481933717686*m.b234 <= 0) m.c251 = Constraint(expr= m.x155 - 0.572481933717686*m.b235 <= 0) m.c252 = Constraint(expr= m.x156 - 0.572481933717686*m.b236 <= 0) m.c253 = Constraint(expr= m.x157 - 0.572481933717686*m.b237 <= 0) m.c254 = Constraint(expr=-0.65*log(1 + m.x110) + m.x158 + m.b238 <= 1) m.c255 = Constraint(expr=-0.65*log(1 + m.x111) + m.x159 + m.b239 <= 1) m.c256 = Constraint(expr=-0.65*log(1 + m.x112) + m.x160 + m.b240 <= 1) m.c257 = Constraint(expr=-0.65*log(1 + m.x113) + m.x161 + m.b241 <= 1) m.c258 = Constraint(expr=-0.65*log(1 + m.x122) + m.x158 + m.b238 <= 1) m.c259 = Constraint(expr=-0.65*log(1 + m.x123) + m.x159 + m.b239 <= 1) m.c260 = Constraint(expr=-0.65*log(1 + m.x124) + m.x160 + m.b240 <= 1) m.c261 = Constraint(expr=-0.65*log(1 + m.x125) + m.x161 + m.b241 <= 1) m.c262 = Constraint(expr= m.x110 - 1.26558121681553*m.b238 <= 0) m.c263 = Constraint(expr= m.x111 - 1.26558121681553*m.b239 <= 0) m.c264 = Constraint(expr= m.x112 - 1.26558121681553*m.b240 <= 0) m.c265 = Constraint(expr= m.x113 - 1.26558121681553*m.b241 <= 0) m.c266 = Constraint(expr= m.x122 - 33.5*m.b238 <= 0) m.c267 = Constraint(expr= m.x123 - 33.5*m.b239 <= 0) m.c268 = Constraint(expr= m.x124 - 33.5*m.b240 <= 0) m.c269 = Constraint(expr= m.x125 - 33.5*m.b241 <= 0) m.c270 = Constraint(expr= m.x158 - 2.30162356062425*m.b238 <= 0) m.c271 = Constraint(expr= m.x159 - 2.30162356062425*m.b239 <= 0) m.c272 = Constraint(expr= m.x160 - 2.30162356062425*m.b240 <= 0) m.c273 = Constraint(expr= m.x161 - 2.30162356062425*m.b241 <= 0) m.c274 = Constraint(expr= - m.x126 + m.x162 + m.b242 <= 1) m.c275 = Constraint(expr= - m.x127 + m.x163 + m.b243 <= 1) m.c276 = Constraint(expr= - m.x128 + m.x164 + m.b244 <= 1) m.c277 = Constraint(expr= - m.x129 + m.x165 + m.b245 <= 1) m.c278 = Constraint(expr= - m.x126 + m.x162 - m.b242 >= -1) m.c279 = Constraint(expr= - m.x127 + m.x163 - m.b243 >= -1) m.c280 = Constraint(expr= - m.x128 + m.x164 - m.b244 >= -1) m.c281 = Constraint(expr= - m.x129 + m.x165 - m.b245 >= -1) m.c282 = Constraint(expr= m.x126 - 9*m.b242 <= 0) m.c283 = Constraint(expr= m.x127 - 9*m.b243 <= 0) m.c284 = Constraint(expr= m.x128 - 9*m.b244 <= 0) m.c285 = Constraint(expr= m.x129 - 9*m.b245 <= 0) m.c286 = Constraint(expr= m.x162 - 9*m.b242 <= 0) m.c287 = Constraint(expr= m.x163 - 9*m.b243 <= 0) m.c288 = Constraint(expr= m.x164 - 9*m.b244 <= 0) m.c289 = Constraint(expr= m.x165 - 9*m.b245 <= 0) m.c290 = Constraint(expr= - m.x130 + m.x166 + m.b246 <= 1) m.c291 = Constraint(expr= - m.x131 + m.x167 + m.b247 <= 1) m.c292 = Constraint(expr= - m.x132 + m.x168 + m.b248 <= 1) m.c293 = Constraint(expr= - m.x133 + m.x169 + m.b249 <= 1) m.c294 = Constraint(expr= - m.x130 + m.x166 - m.b246 >= -1) m.c295 = Constraint(expr= - m.x131 + m.x167 - m.b247 >= -1) m.c296 = Constraint(expr= - m.x132 + m.x168 - m.b248 >= -1) m.c297 = Constraint(expr= - m.x133 + m.x169 - m.b249 >= -1) m.c298 = Constraint(expr= m.x130 - 9*m.b246 <= 0) m.c299 = Constraint(expr= m.x131 - 9*m.b247 <= 0) m.c300 = Constraint(expr= m.x132 - 9*m.b248 <= 0) m.c301 = Constraint(expr= m.x133 - 9*m.b249 <= 0) m.c302 = Constraint(expr= m.x166 - 9*m.b246 <= 0) m.c303 = Constraint(expr= m.x167 - 9*m.b247 <= 0) m.c304 = Constraint(expr= m.x168 - 9*m.b248 <= 0) m.c305 = Constraint(expr= m.x169 - 9*m.b249 <= 0) m.c306 = Constraint(expr=-0.75*log(1 + m.x134) + m.x170 + m.b250 <= 1) m.c307 = Constraint(expr=-0.75*log(1 + m.x135) + m.x171 + m.b251 <= 1) m.c308 = Constraint(expr=-0.75*log(1 + m.x136) + m.x172 + m.b252 <= 1) m.c309 = Constraint(expr=-0.75*log(1 + m.x137) + m.x173 + m.b253 <= 1) m.c310 = Constraint(expr= m.x134 - 3.04984759446376*m.b250 <=
""" This module contains functions for building and loading NMODL mechanisms Author: <NAME> (<EMAIL>) Copyright: 2012-2014 <NAME>. License: This file is part of the "NineLine" package, which is released under the MIT Licence, see LICENSE for details. """ from __future__ import absolute_import from __future__ import unicode_literals from builtins import next, str import os.path import tempfile import platform import re import uuid from itertools import chain import subprocess as sp from collections import defaultdict import sympy import neuron import nineml.units as un from nineml.abstraction import Alias, AnalogSendPort, Dynamics from neuron import load_mechanisms from pype9.simulate.common.code_gen import BaseCodeGenerator from pype9.simulate.common.cells import ( WithSynapses, DynamicsWithSynapses) from pype9.exceptions import ( Pype9BuildError, Pype9RuntimeError, Pype9CommandNotFoundError) import pype9 from datetime import datetime from nineml.abstraction import (StateAssignment, Parameter, StateVariable, Constant, Expression) from nineml.abstraction.dynamics.visitors.queriers import ( DynamicsInterfaceInferer) from sympy.printing import ccode from pype9.utils.mpi import is_mpi_master, mpi_comm from pype9.simulate.neuron.units import UnitHandler try: from nineml.extensions.kinetics import Kinetics # @UnusedImport except ImportError: KineticsClass = type(None) from pype9.annotations import ( PYPE9_NS, ION_SPECIES, MEMBRANE_VOLTAGE, MEMBRANE_CAPACITANCE, TRANSFORM_SRC, NONSPECIFIC_CURRENT, BUILD_TRANS, EXTERNAL_CURRENTS, NO_TIME_DERIVS, NUM_TIME_DERIVS, MECH_TYPE, FULL_CELL_MECH, ARTIFICIAL_CELL_MECH) import logging TRANSFORM_NS = 'NeuronBuildTransform' logger = logging.getLogger("pype9") class CodeGenerator(BaseCodeGenerator): SIMULATOR_NAME = 'neuron' SIMULATOR_VERSION = neuron.h.nrnversion(0) ODE_SOLVER_DEFAULT = 'derivimplicit' REGIME_VARNAME = 'regime_' SEED_VARNAME = 'seed_' BASE_TMPL_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')) UnitHandler = UnitHandler _neuron_units = {un.mV: 'millivolt', un.S: 'siemens', un.mA: 'milliamp'} _inbuilt_ions = ['na', 'k', 'ca'] def __init__(self, gsl_path=None, **kwargs): super(CodeGenerator, self).__init__(**kwargs) self.nrnivmodl_path = self.get_neuron_util_path('nrnivmodl') self.modlunit_path = self.get_neuron_util_path('modlunit', default=None) # Compile wrappers around GSL random distribution functions if is_mpi_master(): if not os.path.exists(self.libninemlnrn_so): self.compile_libninemlnrn() mpi_comm.barrier() self.nrnivmodl_flags = [ '-L' + self.libninemlnrn_dir, '-Wl,-rpath,' + self.libninemlnrn_dir, '-lninemlnrn', '-lgsl', '-lgslcblas'] if gsl_path is not None: self.nrnivmodl_path.append('-L' + gsl_path) else: self.nrnivmodl_flags.extend(self.get_gsl_prefixes()) # Work out the name of the installation directory for the compiled # NMODL files on the current platform self.specials_dir = self._get_specials_dir() def generate_source_files(self, component_class, src_dir, name=None, **kwargs): """ Generates main NMODL file for cell (and synapse) class Parameters ---------- membrane_voltage : str Specifies the state that represents membrane voltage. membrane_capacitance : str Specifies the state that represents membrane capacitance. default_capacitance : float Specifies the quantity assigned to the membrane capacitance by default v_threshold: float The threshold for the neuron to emit a spike. external_ports : list(str) Analog ports to strip from expressions as they represent synapses or injected currents, which can be inserted manually by NEURON objects. is_subcomponent : bool Whether to use the 'SUFFIX' tag or not. ode_solver : str specifies the ODE solver to use """ if name is None: name = component_class.name template = 'main.tmpl' self.generate_mod_file(template, component_class, src_dir, name, kwargs) def generate_mod_file(self, template, component_class, src_dir, name, template_args): # Get list of all unique triggers within the component class so they # can be referred to by an index (i.e. their index in the list). all_triggers = [] for regime in component_class.regimes: for on_condition in regime.on_conditions: if on_condition.trigger.rhs not in all_triggers: all_triggers.append(on_condition.trigger.rhs) tmpl_args = { 'code_gen': self, 'component_name': name, 'component_class': component_class, 'all_triggers': all_triggers, 'version': pype9.__version__, 'src_dir': src_dir, 'timestamp': datetime.now().strftime('%a %d %b %y %I:%M:%S%p'), 'unit_handler': UnitHandler(component_class), 'ode_solver': self.ODE_SOLVER_DEFAULT, 'external_ports': [], 'is_subcomponent': True, 'regime_varname': self.REGIME_VARNAME, 'seed_varname': self.SEED_VARNAME} # # FIXME: weight_vars needs to be removed or implemented properly # 'weight_variables': []} tmpl_args.update(template_args) # Render mod file self.render_to_file( template, tmpl_args, component_class.name + '.mod', src_dir) def transform_for_build(self, name, component_class, **kwargs): """ Copies and transforms the component class to match the format of the simulator (overridden in derived class) Parameters ---------- name : str The name of the transformed component class component_class : nineml.Dynamics The component class to be transformed """ self._set_build_props(component_class, **kwargs) if not isinstance(component_class, WithSynapses): raise Pype9RuntimeError( "'component_class' must be a DynamicsWithSynapses object") # --------------------------------------------------------------------- # Clone original component class # --------------------------------------------------------------------- trfrm = component_class.dynamics.flatten() # --------------------------------------------------------------------- # Get the membrane voltage and convert it to 'v' # --------------------------------------------------------------------- try: name = kwargs['membrane_voltage'] try: orig_v = component_class.element( name, nineml_children=Dynamics.nineml_children) except KeyError: raise Pype9BuildError( "Could not find specified membrane voltage '{}'" .format(name)) except KeyError: # Guess voltage from its dimension if not supplied candidate_vs = [cv for cv in component_class.state_variables if cv.dimension == un.voltage] if len(candidate_vs) == 0: candidate_vs = [ cv for cv in component_class.analog_receive_ports if cv.dimension == un.voltage] if len(candidate_vs) == 1: orig_v = candidate_vs[0] logger.info("Guessing that '{}' is the membrane voltage" .format(orig_v)) elif len(candidate_vs) > 1: try: orig_v = next(c for c in candidate_vs if c.name == 'v') logger.info("Guessing that '{}' is the membrane voltage" .format(orig_v)) except StopIteration: raise Pype9BuildError( "Could not guess the membrane voltage, candidates: " "'{}'" .format("', '".join(v.name for v in candidate_vs))) else: orig_v = None logger.info( "Can't find candidate for the membrane voltage in " "state_variables '{}' or analog_receive_ports '{}', " "treating '{}' as an \"artificial cell\"".format( "', '".join( sv.name for sv in component_class.state_variables), "', '".join( p.name for p in component_class.analog_receive_ports), component_class.name)) if orig_v is not None: # Map voltage to hard-coded 'v' symbol if orig_v.name != 'v': trfrm.rename_symbol(orig_v.name, 'v') v = trfrm.state_variable('v') v.annotations.set((BUILD_TRANS, PYPE9_NS), TRANSFORM_SRC, orig_v) else: v = trfrm.state_variable('v') # Add annotations to the original and build models component_class.annotations.set((BUILD_TRANS, PYPE9_NS), MEMBRANE_VOLTAGE, orig_v.name) # @IgnorePep8 trfrm.annotations.set((BUILD_TRANS, PYPE9_NS), MEMBRANE_VOLTAGE, 'v') # Remove associated analog send port if present try: trfrm.remove(trfrm.analog_send_port('v')) except KeyError: pass # Need to convert to AnalogReceivePort if v is a StateVariable if isinstance(v, StateVariable): self._transform_full_component(trfrm, component_class, v, **kwargs) trfrm.annotations.set((BUILD_TRANS, PYPE9_NS), MECH_TYPE, FULL_CELL_MECH) else: raise NotImplementedError( "Build sub-components is not supported in PyPe9 v0.1") else: trfrm.annotations.set((BUILD_TRANS, PYPE9_NS), MECH_TYPE, ARTIFICIAL_CELL_MECH) # ----------------------------------------------------------------- # Insert dummy aliases for parameters (such as capacitance) that # now do not show up in the inferred interface for the transformed # class (i.e. that were only # present in the voltage time derivative) # ----------------------------------------------------------------- # Infer required parameters inferred = DynamicsInterfaceInferer(trfrm) for parameter in list(trfrm.parameters): if parameter.name not in inferred.parameter_names: trfrm.add(Alias(parameter.name + '___dummy', parameter.name)) # ----------------------------------------------------------------- # Validate the transformed component class and construct prototype # ----------------------------------------------------------------- trfrm.validate() trfrm_with_syn = DynamicsWithSynapses( name, trfrm, component_class.synapses, component_class.connection_parameter_sets) # Retun a prototype of the transformed class return trfrm_with_syn def _transform_full_component(self, trfrm, component_class, v, **kwargs): # ----------------------------------------------------------------- # Remove all analog send ports with 'current' dimension so they # don't get confused with the converted voltage time derivative # expression # ----------------------------------------------------------------- for port in list(trfrm.analog_send_ports): if port.dimension == un.current: trfrm.remove(port) # ----------------------------------------------------------------- # Insert membrane capacitance if not present # ----------------------------------------------------------------- # Get or guess the location of the membrane capacitance try: name = kwargs['membrane_capacitance'] try: orig_cm = component_class.parameter(name) except KeyError: raise Pype9BuildError( "Could not find specified membrane capacitance '{}'" .format(name)) cm = trfrm.parameter(orig_cm.name) except KeyError: # 'membrane_capacitance' was not specified candidate_cms = [ccm for ccm in component_class.parameters if ccm.dimension == un.capacitance] if len(candidate_cms) == 1: orig_cm = candidate_cms[0] cm = trfrm.parameter(orig_cm.name) logger.info("Guessing that '{}' is the membrane capacitance" .format(orig_cm)) elif len(candidate_cms) > 1: raise Pype9BuildError( "Could not guess the membrane capacitance, candidates:" " '{}'".format("', '".join(candidate_cms))) else: cm = Parameter("cm___pype9", dimension=un.capacitance) trfrm.add(cm) cm.annotations.set((BUILD_TRANS, PYPE9_NS), TRANSFORM_SRC, None) trfrm.annotations.set((BUILD_TRANS, PYPE9_NS), MEMBRANE_CAPACITANCE, cm.name) # ----------------------------------------------------------------- # Replace membrane voltage equation with membrane current # ----------------------------------------------------------------- # Determine the regimes in which each state variables has a time # derivative in has_td = defaultdict(list) # List which regimes need to be clamped to their last voltage # (as it has no time derivative) clamped_regimes = [] # The voltage clamp equation where v_clamp is the last voltage # value and g_clamp_ is a large conductance clamp_i = sympy.sympify('g_clamp___pype9 * (v - v_clamp___pype9)') memb_is = [] for regime in trfrm.regimes: # Add an appropriate membrane current try: # Convert the voltage time derivative into a membrane # current dvdt = regime.time_derivative(v.name) regime.remove(dvdt) i = -dvdt.rhs * cm memb_is.append(i) except KeyError: i = clamp_i clamped_regimes.append(regime) regime.add(Alias('i___pype9', i)) # Record state vars that have a time deriv. in this regime for var in regime.time_derivative_variables: if var != 'v': has_td[var].append(regime) # Pick the most popular membrane current to be the alias in # the global scope assert memb_is, "No regimes contain voltage time derivatives" memb_i = Alias('i___pype9', max(memb_is, key=memb_is.count)) # Add membrane current along with a analog send port trfrm.add(memb_i) i_port = AnalogSendPort('i___pype9', dimension=un.current) i_port.annotations.set((BUILD_TRANS, PYPE9_NS), ION_SPECIES, NONSPECIFIC_CURRENT) trfrm.add(i_port) # Remove membrane currents that match the membrane current in the # outer scope for regime in trfrm.regimes: if regime.alias('i___pype9') ==
below :param bool bucket_key_enabled: Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS. """ if apply_server_side_encryption_by_default is not None: pulumi.set(__self__, "apply_server_side_encryption_by_default", apply_server_side_encryption_by_default) if bucket_key_enabled is not None: pulumi.set(__self__, "bucket_key_enabled", bucket_key_enabled) @property @pulumi.getter(name="applyServerSideEncryptionByDefault") def apply_server_side_encryption_by_default(self) -> Optional['outputs.BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefault']: """ A single object for setting server-side encryption by default documented below """ return pulumi.get(self, "apply_server_side_encryption_by_default") @property @pulumi.getter(name="bucketKeyEnabled") def bucket_key_enabled(self) -> Optional[bool]: """ Whether or not to use [Amazon S3 Bucket Keys](https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) for SSE-KMS. """ return pulumi.get(self, "bucket_key_enabled") @pulumi.output_type class BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefault(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "sseAlgorithm": suggest = "sse_algorithm" elif key == "kmsMasterKeyId": suggest = "kms_master_key_id" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefault. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefault.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketServerSideEncryptionConfigurationV2RuleApplyServerSideEncryptionByDefault.__key_warning(key) return super().get(key, default) def __init__(__self__, *, sse_algorithm: str, kms_master_key_id: Optional[str] = None): """ :param str sse_algorithm: The server-side encryption algorithm to use. Valid values are `AES256` and `aws:kms` :param str kms_master_key_id: The AWS KMS master key ID used for the SSE-KMS encryption. This can only be used when you set the value of `sse_algorithm` as `aws:kms`. The default `aws/s3` AWS KMS master key is used if this element is absent while the `sse_algorithm` is `aws:kms`. """ pulumi.set(__self__, "sse_algorithm", sse_algorithm) if kms_master_key_id is not None: pulumi.set(__self__, "kms_master_key_id", kms_master_key_id) @property @pulumi.getter(name="sseAlgorithm") def sse_algorithm(self) -> str: """ The server-side encryption algorithm to use. Valid values are `AES256` and `aws:kms` """ return pulumi.get(self, "sse_algorithm") @property @pulumi.getter(name="kmsMasterKeyId") def kms_master_key_id(self) -> Optional[str]: """ The AWS KMS master key ID used for the SSE-KMS encryption. This can only be used when you set the value of `sse_algorithm` as `aws:kms`. The default `aws/s3` AWS KMS master key is used if this element is absent while the `sse_algorithm` is `aws:kms`. """ return pulumi.get(self, "kms_master_key_id") @pulumi.output_type class BucketV2CorsRule(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "allowedHeaders": suggest = "allowed_headers" elif key == "allowedMethods": suggest = "allowed_methods" elif key == "allowedOrigins": suggest = "allowed_origins" elif key == "exposeHeaders": suggest = "expose_headers" elif key == "maxAgeSeconds": suggest = "max_age_seconds" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketV2CorsRule. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketV2CorsRule.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketV2CorsRule.__key_warning(key) return super().get(key, default) def __init__(__self__, *, allowed_headers: Optional[Sequence[str]] = None, allowed_methods: Optional[Sequence[str]] = None, allowed_origins: Optional[Sequence[str]] = None, expose_headers: Optional[Sequence[str]] = None, max_age_seconds: Optional[int] = None): """ :param Sequence[str] allowed_headers: Set of headers that are specified in the Access-Control-Request-Headers header. :param Sequence[str] allowed_methods: Set of HTTP methods that the origin is allowed to execute. :param Sequence[str] allowed_origins: Set of origins customers are able to access the bucket from. :param Sequence[str] expose_headers: Set of headers in the response that customers are able to access from their applications. * `max_age_seconds` The time in seconds that browser can cache the response for a preflight request. """ if allowed_headers is not None: pulumi.set(__self__, "allowed_headers", allowed_headers) if allowed_methods is not None: pulumi.set(__self__, "allowed_methods", allowed_methods) if allowed_origins is not None: pulumi.set(__self__, "allowed_origins", allowed_origins) if expose_headers is not None: pulumi.set(__self__, "expose_headers", expose_headers) if max_age_seconds is not None: pulumi.set(__self__, "max_age_seconds", max_age_seconds) @property @pulumi.getter(name="allowedHeaders") def allowed_headers(self) -> Optional[Sequence[str]]: """ Set of headers that are specified in the Access-Control-Request-Headers header. """ return pulumi.get(self, "allowed_headers") @property @pulumi.getter(name="allowedMethods") def allowed_methods(self) -> Optional[Sequence[str]]: """ Set of HTTP methods that the origin is allowed to execute. """ return pulumi.get(self, "allowed_methods") @property @pulumi.getter(name="allowedOrigins") def allowed_origins(self) -> Optional[Sequence[str]]: """ Set of origins customers are able to access the bucket from. """ return pulumi.get(self, "allowed_origins") @property @pulumi.getter(name="exposeHeaders") def expose_headers(self) -> Optional[Sequence[str]]: """ Set of headers in the response that customers are able to access from their applications. * `max_age_seconds` The time in seconds that browser can cache the response for a preflight request. """ return pulumi.get(self, "expose_headers") @property @pulumi.getter(name="maxAgeSeconds") def max_age_seconds(self) -> Optional[int]: return pulumi.get(self, "max_age_seconds") @pulumi.output_type class BucketV2Grant(dict): def __init__(__self__, *, id: Optional[str] = None, permissions: Optional[Sequence[str]] = None, type: Optional[str] = None, uri: Optional[str] = None): """ :param str id: Unique identifier for the rule. :param Sequence[str] permissions: List of permissions given to the grantee. :param str type: Type of grantee. :param str uri: URI of the grantee group. """ if id is not None: pulumi.set(__self__, "id", id) if permissions is not None: pulumi.set(__self__, "permissions", permissions) if type is not None: pulumi.set(__self__, "type", type) if uri is not None: pulumi.set(__self__, "uri", uri) @property @pulumi.getter def id(self) -> Optional[str]: """ Unique identifier for the rule. """ return pulumi.get(self, "id") @property @pulumi.getter def permissions(self) -> Optional[Sequence[str]]: """ List of permissions given to the grantee. """ return pulumi.get(self, "permissions") @property @pulumi.getter def type(self) -> Optional[str]: """ Type of grantee. """ return pulumi.get(self, "type") @property @pulumi.getter def uri(self) -> Optional[str]: """ URI of the grantee group. """ return pulumi.get(self, "uri") @pulumi.output_type class BucketV2LifecycleRule(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "abortIncompleteMultipartUploadDays": suggest = "abort_incomplete_multipart_upload_days" elif key == "noncurrentVersionExpirations": suggest = "noncurrent_version_expirations" elif key == "noncurrentVersionTransitions": suggest = "noncurrent_version_transitions" if suggest: pulumi.log.warn(f"Key '{key}' not found in BucketV2LifecycleRule. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: BucketV2LifecycleRule.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: BucketV2LifecycleRule.__key_warning(key) return super().get(key, default) def __init__(__self__, *, abort_incomplete_multipart_upload_days: Optional[int] = None, enabled: Optional[bool] = None, expirations: Optional[Sequence['outputs.BucketV2LifecycleRuleExpiration']] = None, id: Optional[str] = None, noncurrent_version_expirations: Optional[Sequence['outputs.BucketV2LifecycleRuleNoncurrentVersionExpiration']] = None, noncurrent_version_transitions: Optional[Sequence['outputs.BucketV2LifecycleRuleNoncurrentVersionTransition']] = None, prefix: Optional[str] = None, tags: Optional[Mapping[str, str]] = None, transitions: Optional[Sequence['outputs.BucketV2LifecycleRuleTransition']] = None): """ :param int abort_incomplete_multipart_upload_days: Number of days after initiating a multipart upload when the multipart upload must be completed. :param bool enabled: Whether versioning is enabled. :param Sequence['BucketV2LifecycleRuleExpirationArgs'] expirations: The expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. :param str id: Unique identifier for the rule. :param Sequence['BucketV2LifecycleRuleNoncurrentVersionExpirationArgs'] noncurrent_version_expirations: When noncurrent object versions expire. :param Sequence['BucketV2LifecycleRuleNoncurrentVersionTransitionArgs'] noncurrent_version_transitions: When noncurrent object versions transition. :param str prefix: Object keyname prefix identifying one or more objects to which the rule applies :param Mapping[str, str] tags: A map of tags to assign to the bucket. If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level. :param Sequence['BucketV2LifecycleRuleTransitionArgs'] transitions: Specifies when an Amazon S3 object transitions to a specified storage class. """ if abort_incomplete_multipart_upload_days is not None: pulumi.set(__self__, "abort_incomplete_multipart_upload_days", abort_incomplete_multipart_upload_days) if enabled is not None: pulumi.set(__self__, "enabled", enabled) if expirations is not None: pulumi.set(__self__, "expirations", expirations) if id is not None: pulumi.set(__self__, "id", id) if noncurrent_version_expirations is not None: pulumi.set(__self__, "noncurrent_version_expirations", noncurrent_version_expirations) if noncurrent_version_transitions is not None: pulumi.set(__self__, "noncurrent_version_transitions", noncurrent_version_transitions) if prefix is not None: pulumi.set(__self__, "prefix", prefix) if tags is not None: pulumi.set(__self__, "tags", tags) if transitions is not None: pulumi.set(__self__, "transitions", transitions) @property @pulumi.getter(name="abortIncompleteMultipartUploadDays") def abort_incomplete_multipart_upload_days(self) -> Optional[int]: """ Number of days after initiating a multipart upload when the multipart upload must be completed. """ return pulumi.get(self, "abort_incomplete_multipart_upload_days") @property @pulumi.getter def enabled(self) -> Optional[bool]: """ Whether versioning is enabled. """ return pulumi.get(self, "enabled") @property @pulumi.getter def expirations(self) -> Optional[Sequence['outputs.BucketV2LifecycleRuleExpiration']]: """ The expiration for the lifecycle of the object in the form of date, days and, whether the object has a delete marker. """ return pulumi.get(self, "expirations") @property @pulumi.getter def id(self) -> Optional[str]: """ Unique identifier for the rule. """ return pulumi.get(self, "id") @property @pulumi.getter(name="noncurrentVersionExpirations") def noncurrent_version_expirations(self) -> Optional[Sequence['outputs.BucketV2LifecycleRuleNoncurrentVersionExpiration']]: """ When noncurrent object versions expire. """ return pulumi.get(self, "noncurrent_version_expirations") @property @pulumi.getter(name="noncurrentVersionTransitions") def noncurrent_version_transitions(self) -> Optional[Sequence['outputs.BucketV2LifecycleRuleNoncurrentVersionTransition']]: """ When noncurrent object versions transition. """ return pulumi.get(self, "noncurrent_version_transitions") @property @pulumi.getter def prefix(self) -> Optional[str]: """ Object keyname prefix identifying one or more objects to which the rule applies """ return pulumi.get(self, "prefix") @property @pulumi.getter def tags(self) -> Optional[Mapping[str, str]]: """ A map of tags to assign to the bucket. If configured with a provider `default_tags` configuration block present, tags
<reponame>BishoyAbdelmalik/discordbot<filename>poopiBot.py # poopiBot.py import discord import threading import asyncio from discord import channel import substring import random import requests import os import subprocess from discord.ext import commands import aiohttp from io import BytesIO from requests.sessions import session import youtube_dl from youtube_search import YoutubeSearch import validators import json from collections import deque ydl_opts = { 'format': 'bestaudio/best', 'outtmpl': '%(id)s.mp3', 'postprocessors': [{ 'key': 'FFmpegExtractAudio', 'preferredcodec': 'mp3', 'preferredquality': '192', }], } TOKEN = open("token.txt").read() # api-endpoint meme = "http://localhost:956/moderate" meme2 = "http://localhost:956/sbubby" meme3 = "http://localhost:956/dank" meme4 = "http://localhost:956/light" meme5 = "http://localhost:956/programing" meme6 = "http://localhost:956/meme" print(os.system("node /bot/memeAPI/server.js &")) emojiThumbsUp = '\N{THUMBS UP SIGN}' servers = {} # client = discord.Client() def endSong(guild: str, path: str): if not servers[guild]["voice_client"].is_connected(): servers[guild] = {} else: servers[guild]["song_count"][path] = servers[guild]["song_count"][path]-1 if servers[guild]["song_count"][path] == 0: os.remove(path) servers[guild]["song_count"].pop(path) if len(servers[guild]["music_queue"]) != 0: playMusic(guild) def download_song(v_id: str): with youtube_dl.YoutubeDL(ydl_opts) as ydl: ydl.extract_info(get_yt_url(v_id), download=True) def playMusic(guild: str): if servers[guild]["voice_client"].is_playing(): return if len(servers[guild]["music_queue"]) != 0: path = servers[guild]["music_queue"].pop() servers[guild]["current_playing"] = path[:-4] download_song(path[:-4]) servers[guild]["voice_client"].play(discord.FFmpegPCMAudio( path), after=lambda x: endSong(guild, path)) servers[guild]["voice_client"].source = discord.PCMVolumeTransformer( servers[guild]["voice_client"].source, 1) if len(servers[guild]["music_queue"]) != 0: download_song(servers[guild]["music_queue"][0][:-4]) def get_url(url: str): if not validators.url(url): youtube_search = YoutubeSearch(url, max_results=1).to_json() youtube_search = json.loads(youtube_search) v_id = youtube_search["videos"][0]["id"] url = get_yt_url(v_id) elif "list" in url or "playlist" in url: pass # with youtube_dl.YoutubeDL({}) as ydl: # result=ydl.extract_info(url, download=False) # if 'entries' in result: # # Can be a playlist or a list of videos # video = result['entries'] # #loops entries to grab each video_url # for j, item in enumerate(video): # video = result['entries'][j]["webpage_url"] # url=video return url def get_yt_url(v_id: str): return "https://www.youtube.com/watch?v="+v_id class MyClient(discord.Client): async def music_skip(self, message): global servers guild = message.guild if len(servers[guild]["music_queue"]) == 0: await message.channel.send("No more files in queue") servers[guild]["voice_client"].stop() return async def music_now_playing(self, message): global servers guild = message.guild if not servers[guild]["voice_client"].is_playing(): await message.channel.send("Nothing is playing") else: await message.channel.send(get_yt_url(servers[guild]["current_playing"])) return async def music_player(self, message, location): if not message.author.voice: await message.channel.send("join vc first") else: global servers guild = message.guild channel = message.author.voice.channel if not guild in servers: servers[guild] = {} if not "music_queue" in servers[guild]: music_queue = deque() servers[guild]["music_queue"] = music_queue try: servers[guild]["voice_client"] = await channel.connect() except: print("already in vc add music") if(servers[guild]["voice_client"] == None): await message.channel.send("error") return msg = message.content for attachment in message.attachments: m3u_url = attachment.url print(m3u_url) if m3u_url.endswith(".m3u") or m3u_url.endswith(".txt"): response = requests.get(m3u_url) links = response.text.split() print(links) for link in links: if validators.url(link): msg += " " + link possible_urls = msg.split()[1:] a_url = "" urls = [] for url in possible_urls: if validators.url(url): if len(a_url.strip()) > 0: urls.append(get_url(a_url.strip())) a_url = "" urls.append(get_url(url)) else: a_url += " "+url if len(a_url.strip()) > 0: urls.append(get_url(a_url.strip())) a_url = "" print(urls) for url in urls: print(url) path = url[url.index("v=")+2:] + ".mp3" if location == 0: servers[guild]["music_queue"].appendleft(path) else: servers[guild]["music_queue"].append(path) if not "song_count" in servers[guild]: servers[guild]["song_count"] = {} if not path in servers[guild]["song_count"]: servers[guild]["song_count"][path] = 0 servers[guild]["song_count"][path] = servers[guild]["song_count"][path]+1 if len(urls) < 2: await message.channel.send(url+"\nAdded to the queue") if len(urls) > 1: await message.channel.send(str(len(urls))+" songs added to the queue") playMusic(guild) await message.delete() return async def music_play(self, message): await self.music_player(message, 0) async def music_play_top(self, message): await self.music_player(message, 1) async def on_ready(self): print(f'{self.user} has connected to Discord!') print('Servers connected to:') for server in client.guilds: print(server) async def add_student_role(self, member): role = discord.utils.get(member.guild.roles, id=805898946434170900) print(role) await member.add_roles(role) async def add_108_role(self, member): role = discord.utils.get(member.guild.roles, id=809577813354872883) print(role) await member.add_roles(role) async def add_110_role(self, member): role = discord.utils.get(member.guild.roles, id=809577623126409240) print(role) await member.add_roles(role) async def add_182_role(self, member): role = discord.utils.get(member.guild.roles, id=809577657398198322) print(role) await member.add_roles(role) async def add_122_role(self, member): role = discord.utils.get(member.guild.roles, id=809577695586549793) print(role) await member.add_roles(role) async def add_282_role(self, member): role = discord.utils.get(member.guild.roles, id=809577733020581898) print(role) await member.add_roles(role) async def add_160_role(self, member): role = discord.utils.get(member.guild.roles, id=809577851292090368) print(role) await member.add_roles(role) async def on_member_join(self, member): print("hello "+member) if str(member.guild) == "CS/CIT Tutoring": await self.add_student_role(member) await member.create_dm() await member.dm_channel.send( f'Hi {member.name}, poopi welcomes you!' ) async def on_message(self, message): if message.author == self.user: return user = "" isAdmin = message.channel.permissions_for(message.author).administrator print(message.guild) print(message.content) msgs = ["aha", "sure", "why not", "why", "mmm why", "tell me more", "yeah", "I see", "I guess", "IDK", "lamo", "lol", "ok boomer", "good point", "no", "yes", "Awwww"] if message.guild == None: await message.channel.send(random.choice(msgs)) if message.guild.id != 690308124808183859: if str(message.guild) == "CS/CIT Tutoring": await self.add_student_role(message.author) themsg = message.content.lower() if "tutor" in themsg and ("today" in themsg or "right now" in themsg or "now" in themsg or "online" in themsg) and "?" in themsg: await message.channel.send("https://cdn.discordapp.com/attachments/805908246224568360/828794865109434368/unknown.png") if themsg.startswith("-join"): join = int(message.content[5:]) if join == 110: await self.add_110_role(message.author) await message.add_reaction(emojiThumbsUp) elif join == 108: await self.add_108_role(message.author) await message.add_reaction(emojiThumbsUp) elif join == 182: await self.add_182_role(message.author) await message.add_reaction(emojiThumbsUp) elif join == 282: await self.add_282_role(message.author) await message.add_reaction(emojiThumbsUp) elif join == 160: await self.add_160_role(message.author) await message.add_reaction(emojiThumbsUp) elif join == 122: await self.add_122_role(message.author) await message.add_reaction(emojiThumbsUp) pass return if '!setPin' in message.content: if isAdmin: try: f = open(str(message.guild.id)+"_Pins", "x") f.write(str(message.channel.id)) f.close() await message.add_reaction(emojiThumbsUp) except: await message.channel.send("unset pin channel first") else: await message.channel.send("Only admins can set pins channel") return if '!unsetPin' in message.content: if isAdmin: try: os.remove(str(message.guild.id)+"_Pins") except: print() else: await message.channel.send("Only admins can unset pins channel") return try: if message.channel.id == int(open(str(message.guild.id)+"_Pins").read()): await message.delete() user = message.author await user.create_dm() await user.dm_channel.send("Hey, don't send messages in pins channel 😡 😡 😡") return except: print("no pins channel") pick = random.randint(1, 100) if pick == 5: await message.channel.send('mmmm'+" <@" + str(message.author.id)+">") if '!pin' in message.content.lower(): # print(open(str(message.guild)+"_Pins").read()) try: channelID = int(open(str(message.guild.id)+"_Pins").read()) except: channelID = -1 if channelID == -1: await message.channel.send("set pin channel first") else: pinChannel = client.get_channel(channelID) await pinChannel.send(message.content.replace('!pin', '').strip()+"\n\nPinned by"+" <@" + str(message.author.id)+">") await message.channel.send('Pinned ```'+message.content.replace('!pin', '').strip()+'```') await message.delete() return if '!!pt' in message.content.lower(): await self.music_play_top(message) return if '!!p' in message.content.lower(): await self.music_play(message) return if '!!skip' in message.content.lower() or '!!s' in message.content.lower(): await self.music_skip(message) return if '!!np' in message.content.lower(): await self.music_now_playing(message) return if '!bugs' in message.content.lower(): await message.channel.send("https://media.discordapp.net/attachments/538955632951296010/771989679713157140/db1.png") return if message.mention_everyone: await message.channel.send('mmmm maybe'+" <@" + str(message.author.id)+">") return if '!start 380' in message.content: subprocess.Popen(["sh", "/380/server/start.sh"], shell=False, stdin=None, stdout=None, stderr=None, close_fds=True) return if '!stop 380' in message.content: subprocess.Popen(["pkill", "-9", "-f", "server.py"], shell=False, stdin=None, stdout=None, stderr=None, close_fds=True) return if '@' in message.content: user = substring.substringByChar( message.content, startChar="<", endChar=">") if 'gWhy' in message.content: await message.channel.send('CANBAS'+" " + user) return if 'CANBAS'.lower() in message.content.lower(): await message.channel.send('gWhy'+" " + user) return if 'why not' in message.content.lower(): if bool(random.getrandbits(1)): await message.channel.send('Gwacause not'+" <@" + str(message.author.id)+">") return if 'why' in message.content.lower(): if bool(random.getrandbits(1)): await message.channel.send('Gwacause'+" <@" + str(message.author.id)+">") return if 'user' in message.content.lower(): if bool(random.getrandbits(1)): await message.channel.send('Jyuicerr'+" <@" + str(message.author.id)+">") return if '!pathetic' in message.content.lower(): pathetic_arr = ["https://media.discordapp.net/attachments/699127599817031700/724169633262862356/pathetic-56074431.png", "https://media.discordapp.net/attachments/699127599817031700/724168345154224140/unknown.png", "https://media.discordapp.net/attachments/699127599817031700/724168083782238208/aexeoj2Y_700w_0.png", "https://media.discordapp.net/attachments/699127599817031700/724168065666908190/D-WdtJ9UwAAGkY3.png", "https://media.discordapp.net/attachments/699127599817031700/724161695487885322/447.png", "https://media.discordapp.net/attachments/699127599817031700/724711838813651014/unknown.png", "https://media.discordapp.net/attachments/699127599817031700/724713507039215646/pathetic.jpeg", "https://cdn.discordapp.com/attachments/678381326533132324/739654715268137071/unknown_3.png"] check = random.randint(0, 1) # whatever percentage if not check: query = "pathetic meme" r = requests.get("https://api.qwant.com/api/search/images", params={ 'count': 50, 'q': query, 't': 'images', 'locale': 'en_US', 'uiv': 4 }, headers={ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36' } ) response = r.json().get('data').get('result').get('items') urls = [r.get('media') for r in response] url = random.choice(urls) else: check = random.randint(0, 1) if not check: url = pathetic_arr[7] else: url = random.choice(pathetic_arr) response = requests.get(url, stream=True) # filename=data["meme"][data["meme"].index("m/")+2:] # print(filename[filename.index("."):]) header = response.headers print(header) if int(response.headers["Content-Length"]) < 8000000: # extenstion=filename[filename.index("."):] extenstion = "."+header["Content-Type"].split("/")[1] filename = "pathetic"+extenstion with open(filename, 'wb') as f: f.write(response.content) await message.channel.send(file=discord.File(filename)) os.remove(filename) else: await message.channel.send(patheticIMGS[pick]) return if '!meme' in message.content.lower(): pick = random.randint(1, 7) link = meme if pick == 1: link = meme if pick == 2: link = meme2 if pick == 3: link = meme3 if pick == 4: link = meme4 if pick == 5: link = meme5 if pick == 6: link = meme6 r = requests.get(url=link) # extracting data in json format data = r.json() response = requests.get(data["meme"], stream=True) filename = data["meme"][data["meme"].index("m/")+2:] # print(filename[filename.index("."):]) print(response.headers) if int(response.headers["Content-Length"]) < 8000000: extenstion = filename[filename.index("."):] filename = "meme"+extenstion with open(filename, 'wb') as f: f.write(response.content) await message.channel.send(file=discord.File(filename)) os.remove(filename) else: await message.channel.send(data["meme"]) return if '!joke' in message.content.lower(): headers = {'Accept': 'application/json'} joke = "https://icanhazdadjoke.com/" r = requests.get(url=joke, headers=headers) data = r.json() await message.channel.send(data["joke"]) return if '!potato' in message.content.lower(): pick = random.randint(1, 7) link = "http://localhost:956/potato" r = requests.get(url=link) # extracting data in json format data = r.json() response = requests.get(data["meme"], stream=True) filename = data["meme"][data["meme"].index("m/")+2:] # print(filename[filename.index("."):]) print(response.headers) if int(response.headers["Content-Length"]) < 8000000:
<gh_stars>1-10 # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import calendar import datetime import os import shutil import stat import sys import tempfile import time import uuid import fixtures import httpretty import iso8601 import mock import testresources import testtools import webob from keystoneclient.common import cms from keystoneclient import exceptions from keystoneclient.middleware import auth_token from keystoneclient.openstack.common import jsonutils from keystoneclient.openstack.common import memorycache from keystoneclient.openstack.common import timeutils from keystoneclient.tests import client_fixtures EXPECTED_V2_DEFAULT_ENV_RESPONSE = { 'HTTP_X_IDENTITY_STATUS': 'Confirmed', 'HTTP_X_TENANT_ID': 'tenant_id1', 'HTTP_X_TENANT_NAME': 'tenant_name1', 'HTTP_X_USER_ID': 'user_id1', 'HTTP_X_USER_NAME': 'user_name1', 'HTTP_X_ROLES': 'role1,role2', 'HTTP_X_USER': 'user_name1', # deprecated (diablo-compat) 'HTTP_X_TENANT': 'tenant_name1', # deprecated (diablo-compat) 'HTTP_X_ROLE': 'role1,role2', # deprecated (diablo-compat) } BASE_HOST = 'https://keystone.example.com:1234' BASE_URI = '%s/testadmin' % BASE_HOST FAKE_ADMIN_TOKEN_ID = '<PASSWORD>' FAKE_ADMIN_TOKEN = jsonutils.dumps( {'access': {'token': {'id': FAKE_ADMIN_TOKEN_ID, 'expires': '2022-10-03T16:58:01Z'}}}) VERSION_LIST_v3 = jsonutils.dumps({ "versions": { "values": [ { "id": "v3.0", "status": "stable", "updated": "2013-03-06T00:00:00Z", "links": [{'href': '%s/v3' % BASE_URI, 'rel': 'self'}] }, { "id": "v2.0", "status": "stable", "updated": "2011-11-19T00:00:00Z", "links": [{'href': '%s/v2.0' % BASE_URI, 'rel': 'self'}] } ] } }) VERSION_LIST_v2 = jsonutils.dumps({ "versions": { "values": [ { "id": "v2.0", "status": "stable", "updated": "2011-11-19T00:00:00Z", "links": [] } ] } }) ERROR_TOKEN = '<PASSWORD>' MEMCACHED_SERVERS = ['localhost:11211'] MEMCACHED_AVAILABLE = None def memcached_available(): """Do a sanity check against memcached. Returns ``True`` if the following conditions are met (otherwise, returns ``False``): - ``python-memcached`` is installed - a usable ``memcached`` instance is available via ``MEMCACHED_SERVERS`` - the client is able to set and get a key/value pair """ global MEMCACHED_AVAILABLE if MEMCACHED_AVAILABLE is None: try: import memcache c = memcache.Client(MEMCACHED_SERVERS) c.set('ping', 'pong', time=1) MEMCACHED_AVAILABLE = c.get('ping') == 'pong' except ImportError: MEMCACHED_AVAILABLE = False return MEMCACHED_AVAILABLE class NoModuleFinder(object): """Disallow further imports of 'module'.""" def __init__(self, module): self.module = module def find_module(self, fullname, path): if fullname == self.module or fullname.startswith(self.module + '.'): raise ImportError def cleanup_revoked_file(filename): try: os.remove(filename) except OSError: pass class DisableModuleFixture(fixtures.Fixture): """A fixture to provide support for unloading/disabling modules.""" def __init__(self, module, *args, **kw): super(DisableModuleFixture, self).__init__(*args, **kw) self.module = module self._finders = [] self._cleared_modules = {} def tearDown(self): super(DisableModuleFixture, self).tearDown() for finder in self._finders: sys.meta_path.remove(finder) sys.modules.update(self._cleared_modules) def clear_module(self): cleared_modules = {} for fullname in sys.modules.keys(): if (fullname == self.module or fullname.startswith(self.module + '.')): cleared_modules[fullname] = sys.modules.pop(fullname) return cleared_modules def setUp(self): """Ensure ImportError for the specified module.""" super(DisableModuleFixture, self).setUp() # Clear 'module' references in sys.modules self._cleared_modules.update(self.clear_module()) finder = NoModuleFinder(self.module) self._finders.append(finder) sys.meta_path.insert(0, finder) class TimezoneFixture(fixtures.Fixture): @staticmethod def supported(): # tzset is only supported on Unix. return hasattr(time, 'tzset') def __init__(self, new_tz): super(TimezoneFixture, self).__init__() self.tz = new_tz self.old_tz = os.environ.get('TZ') def setUp(self): super(TimezoneFixture, self).setUp() if not self.supported(): raise NotImplementedError('timezone override is not supported.') os.environ['TZ'] = self.tz time.tzset() self.addCleanup(self.cleanup) def cleanup(self): if self.old_tz is not None: os.environ['TZ'] = self.old_tz elif 'TZ' in os.environ: del os.environ['TZ'] time.tzset() class FakeApp(object): """This represents a WSGI app protected by the auth_token middleware.""" SUCCESS = b'SUCCESS' def __init__(self, expected_env=None): self.expected_env = dict(EXPECTED_V2_DEFAULT_ENV_RESPONSE) if expected_env: self.expected_env.update(expected_env) def __call__(self, env, start_response): for k, v in self.expected_env.items(): assert env[k] == v, '%s != %s' % (env[k], v) resp = webob.Response() resp.body = FakeApp.SUCCESS return resp(env, start_response) class v3FakeApp(FakeApp): """This represents a v3 WSGI app protected by the auth_token middleware.""" def __init__(self, expected_env=None): # with v3 additions, these are for the DEFAULT TOKEN v3_default_env_additions = { 'HTTP_X_PROJECT_ID': 'tenant_id1', 'HTTP_X_PROJECT_NAME': 'tenant_name1', 'HTTP_X_PROJECT_DOMAIN_ID': 'domain_id1', 'HTTP_X_PROJECT_DOMAIN_NAME': 'domain_name1', 'HTTP_X_USER_DOMAIN_ID': 'domain_id1', 'HTTP_X_USER_DOMAIN_NAME': 'domain_name1' } if expected_env: v3_default_env_additions.update(expected_env) super(v3FakeApp, self).__init__(v3_default_env_additions) class BaseAuthTokenMiddlewareTest(testtools.TestCase): """Base test class for auth_token middleware. All the tests allow for running with auth_token configured for receiving v2 or v3 tokens, with the choice being made by passing configuration data into Setup(). The base class will, by default, run all the tests expecting v2 token formats. Child classes can override this to specify, for instance, v3 format. """ def setUp(self, expected_env=None, auth_version=None, fake_app=None): testtools.TestCase.setUp(self) self.expected_env = expected_env or dict() self.fake_app = fake_app or FakeApp self.middleware = None self.conf = { 'auth_host': 'keystone.example.com', 'auth_port': 1234, 'auth_protocol': 'https', 'auth_admin_prefix': '/testadmin', 'signing_dir': client_fixtures.CERTDIR, 'auth_version': auth_version, 'auth_uri': 'https://keystone.example.com:1234', } self.auth_version = auth_version self.response_status = None self.response_headers = None def set_middleware(self, fake_app=None, expected_env=None, conf=None): """Configure the class ready to call the auth_token middleware. Set up the various fake items needed to run the middleware. Individual tests that need to further refine these can call this function to override the class defaults. """ if conf: self.conf.update(conf) if not fake_app: fake_app = self.fake_app if expected_env: self.expected_env.update(expected_env) self.middleware = auth_token.AuthProtocol(fake_app(self.expected_env), self.conf) self.middleware._iso8601 = iso8601 with tempfile.NamedTemporaryFile(dir=self.middleware.signing_dirname, delete=False) as f: pass self.middleware.revoked_file_name = f.name self.addCleanup(cleanup_revoked_file, self.middleware.revoked_file_name) self.middleware.token_revocation_list = jsonutils.dumps( {"revoked": [], "extra": "success"}) def start_fake_response(self, status, headers): self.response_status = int(status.split(' ', 1)[0]) self.response_headers = dict(headers) def assertLastPath(self, path): if path: self.assertEqual(path, httpretty.last_request().path) else: self.assertIsInstance(httpretty.last_request(), httpretty.core.HTTPrettyRequestEmpty) if tuple(sys.version_info)[0:2] < (2, 7): # 2.6 doesn't have the assert dict equals so make sure that it exists class AdjustedBaseAuthTokenMiddlewareTest(BaseAuthTokenMiddlewareTest): def assertIsInstance(self, obj, cls, msg=None): """Same as self.assertTrue(isinstance(obj, cls)), with a nicer default message. """ if not isinstance(obj, cls): standardMsg = '%s is not an instance of %r' % (obj, cls) self.fail(self._formatMessage(msg, standardMsg)) def assertDictEqual(self, d1, d2, msg=None): # Simple version taken from 2.7 self.assertIsInstance(d1, dict, 'First argument is not a dictionary') self.assertIsInstance(d2, dict, 'Second argument is not a dictionary') if d1 != d2: if msg: self.fail(msg) else: standardMsg = '%r != %r' % (d1, d2) self.fail(standardMsg) BaseAuthTokenMiddlewareTest = AdjustedBaseAuthTokenMiddlewareTest class MultiStepAuthTokenMiddlewareTest(BaseAuthTokenMiddlewareTest, testresources.ResourcedTestCase): resources = [('examples', client_fixtures.EXAMPLES_RESOURCE)] @httpretty.activate def test_fetch_revocation_list_with_expire(self): self.set_middleware() # Get a token, then try to retrieve revocation list and get a 401. # Get a new token, try to retrieve revocation list and return 200. httpretty.register_uri(httpretty.POST, "%s/v2.0/tokens" % BASE_URI, body=FAKE_ADMIN_TOKEN) responses = [httpretty.Response(body='', status=401), httpretty.Response( body=self.examples.SIGNED_REVOCATION_LIST)] httpretty.register_uri(httpretty.GET, "%s/v2.0/tokens/revoked" % BASE_URI, responses=responses) fetched_list = jsonutils.loads(self.middleware.fetch_revocation_list()) self.assertEqual(fetched_list, self.examples.REVOCATION_LIST) # Check that 4 requests have been made self.assertEqual(len(httpretty.httpretty.latest_requests), 4) class DiabloAuthTokenMiddlewareTest(BaseAuthTokenMiddlewareTest, testresources.ResourcedTestCase): resources = [('examples', client_fixtures.EXAMPLES_RESOURCE)] """Auth Token middleware should understand Diablo keystone responses.""" def setUp(self): # pre-diablo only had Tenant ID, which was also the Name expected_env = { 'HTTP_X_TENANT_ID': 'tenant_id1', 'HTTP_X_TENANT_NAME': 'tenant_id1', # now deprecated (diablo-compat) 'HTTP_X_TENANT': 'tenant_id1', } super(DiabloAuthTokenMiddlewareTest, self).setUp( expected_env=expected_env) httpretty.reset() httpretty.enable() self.addCleanup(httpretty.disable) httpretty.register_uri(httpretty.GET, "%s/" % BASE_URI, body=VERSION_LIST_v2, status=300) httpretty.register_uri(httpretty.POST, "%s/v2.0/tokens" % BASE_URI, body=FAKE_ADMIN_TOKEN) self.token_id = self.examples.VALID_DIABLO_TOKEN token_response = self.examples.JSON_TOKEN_RESPONSES[self.token_id] httpretty.register_uri(httpretty.GET, "%s/v2.0/tokens/%s" % (BASE_URI, self.token_id), body=token_response) self.set_middleware() def test_valid_diablo_response(self): req = webob.Request.blank('/') req.headers['X-Auth-Token'] = self.token_id self.middleware(req.environ, self.start_fake_response) self.assertEqual(self.response_status, 200) self.assertIn('keystone.token_info', req.environ) class NoMemcacheAuthToken(BaseAuthTokenMiddlewareTest): def setUp(self): super(NoMemcacheAuthToken, self).setUp() self.useFixture(DisableModuleFixture('memcache')) def test_nomemcache(self): conf = { 'admin_token': '<PASSWORD>', 'auth_host': 'keystone.example.com', 'auth_port': 1234, 'memcached_servers': MEMCACHED_SERVERS, 'auth_uri': 'https://keystone.example.com:1234', } auth_token.AuthProtocol(FakeApp(), conf) def test_not_use_cache_from_env(self): env = {'swift.cache': 'CACHE_TEST'} conf = { 'memcached_servers': MEMCACHED_SERVERS } self.set_middleware(conf=conf) self.middleware._init_cache(env) with self.middleware._cache_pool.reserve() as cache: self.assertNotEqual(cache, 'CACHE_TEST') def test_multiple_context_managers_share_single_client(self): env = {} conf = { 'memcached_servers': MEMCACHED_SERVERS } self.set_middleware(conf=conf) self.middleware._init_cache(env) caches = [] with self.middleware._cache_pool.reserve() as cache: caches.append(cache) with self.middleware._cache_pool.reserve() as cache: caches.append(cache) self.assertIs(caches[0], caches[1]) self.assertEqual(set(caches), set(self.middleware._cache_pool)) def test_nested_context_managers_create_multiple_clients(self): env = {} conf = { 'memcached_servers': MEMCACHED_SERVERS } self.set_middleware(conf=conf) self.middleware._init_cache(env) with self.middleware._cache_pool.reserve() as outer_cache: with self.middleware._cache_pool.reserve() as inner_cache: self.assertNotEqual(outer_cache, inner_cache) self.assertEqual( set([inner_cache, outer_cache]), set(self.middleware._cache_pool)) class CommonAuthTokenMiddlewareTest(object): def test_init_does_not_call_http(self): conf = { 'revocation_cache_time': 1 } self.set_middleware(conf=conf) self.assertLastPath(None) def test_init_by_ipv6Addr_auth_host(self): conf = { 'auth_host': 'fdf8:f53e:61e4::18', 'auth_port': 1234, 'auth_protocol': 'http', 'auth_uri': None, } self.set_middleware(conf=conf) expected_auth_uri = 'http://[fdf8:f53e:61e4::18]:1234' self.assertEqual(expected_auth_uri, self.middleware.auth_uri) def assert_valid_request_200(self, token, with_catalog=True): req = webob.Request.blank('/') req.headers['X-Auth-Token'] = token body = self.middleware(req.environ, self.start_fake_response) self.assertEqual(self.response_status, 200) if with_catalog: self.assertTrue(req.headers.get('X-Service-Catalog')) else: self.assertNotIn('X-Service-Catalog', req.headers) self.assertEqual(body, [FakeApp.SUCCESS]) self.assertIn('keystone.token_info', req.environ) def test_valid_uuid_request(self): self.assert_valid_request_200(self.token_dict['uuid_token_default']) self.assert_valid_last_url(self.token_dict['uuid_token_default']) def test_valid_signed_request(self): self.assert_valid_request_200( self.token_dict['signed_token_scoped']) self.assertEqual(self.middleware.conf['auth_admin_prefix'], "/testadmin") #ensure that signed requests do not generate HTTP traffic self.assertLastPath(None) def test_revoked_token_receives_401(self): self.middleware.token_revocation_list = self.get_revocation_list_json() req = webob.Request.blank('/') req.headers['X-Auth-Token'] = self.token_dict['revoked_token'] self.middleware(req.environ, self.start_fake_response) self.assertEqual(self.response_status, 401) def get_revocation_list_json(self, token_ids=None): if token_ids is None: token_ids = [self.token_dict['revoked_token_hash']] revocation_list = {'revoked': [{'id': x, 'expires': timeutils.utcnow()} for x in token_ids]} return jsonutils.dumps(revocation_list) def test_is_signed_token_revoked_returns_false(self): #explicitly setting an empty revocation list here to document intent self.middleware.token_revocation_list = jsonutils.dumps( {"revoked": [], "extra": "success"}) result = self.middleware.is_signed_token_revoked( self.token_dict['revoked_token']) self.assertFalse(result) def test_is_signed_token_revoked_returns_true(self): self.middleware.token_revocation_list = self.get_revocation_list_json() result = self.middleware.is_signed_token_revoked( self.token_dict['revoked_token']) self.assertTrue(result) def test_verify_signed_token_raises_exception_for_revoked_token(self):
#!/usr/bin/env python # encoding: utf-8 """ analyze.py Command line tool for analyzing variants that are annotated with genmod. Created by <NAME> on 2014-09-03. Copyright (c) 2014 __MoonsoInc__. All rights reserved. """ from __future__ import print_function import sys import os import click import inspect try: import cPickle as pickle except: import pickle from codecs import open from datetime import datetime from tempfile import NamedTemporaryFile from pprint import pprint as pp from configobj import ConfigObj import pkg_resources from vcf_parser import VCFParser import genmod from genmod.utils import print_headers from genmod.errors import warning # This is an ad hoc solution to remove huge mostly uninteresting genes. # Please modify this set for your own needs PROBLEMATIC_GENES = set(['MIR6077-1', 'MIR6077-2', 'MIR4315-1', 'MIR4315-2', 'LINC00623', 'LINC00869', 'NBPF8', 'NBPF9', 'NBPF20', 'PPIAL4A', 'PPIAL4B', 'PPIAL4C', 'PDE4DIP', 'LOC100132057', 'LOC100288162', 'SRGAP2D', 'FAM272C', 'SNAR-A3', 'SNAR-A4', 'SNAR-A5', 'SNAR-A6', 'SNAR-A7', 'SNAR-A8', 'SNAR-A9', 'SNAR-A10', 'SNAR-A11', 'SNAR-A14', 'GLUD1P7', ]) def check_families(variant_file): """Loop through the vcf file and check which families that are found.""" families = set([]) if variant_file == '-': variant_parser = VCFParser(fsock = sys.stdin) else: variant_parser = VCFParser(infile = variant_file) for variant in variant_parser: genetic_models = variant['info_dict'].get('GeneticModels', None) if genetic_models: for family_models in genetic_models: family = family_models.split(':')[0] families.add(family) return families def print_results(variant_dict, outfile, vcf_header, family_id, score_key='CADD', freq_key='1000G_freq', mode = 'homozygote', silent=False): """Print the variants to a results file or stdout.""" score_dict = {} # A dictionary with {variant_id: score}. Score is usually cadd score or rank score # for variant_id, variant in sorted(variant_dict.items(), key = lambda sort_key: float(sort_key[1]['info_dict'].get('CADD', '0')), reverse=True): column_width = 12 length_of_output = 20 for variant_id in variant_dict: # Get the score for each variant: max_score = max( [ float(score) for score in variant_dict[variant_id]['info_dict'].get( score_key, '0') ] ) if mode == 'compound': # If we look at compounds we want to consider the combined score family_compounds = compound_dict[variant_id]['info_dict'].get('Compounds', None) if compounds: for family in family_compounds: splitted_compounds = family.split(':') if splitted_compounds[0] == family_id: compounds = splitted_compounds[1].split('|') for variant_2_id in compounds: if variant_2_id in variant_dict: max_score_2 = max( [ float(score) for score in variant_dict[variant_2_id]['info_dict'].get( score_key, '0') ] ) if max_score_2 > 10: # print(variant_dict[variant_2_id]) variant_pair = (variant_id, variant_2_id) max_score = (max_score + max_score_2)/2 already_scored = [set(var_pair) for var_pair in list(score_dict.keys())] if set(variant_pair) not in already_scored: score_dict[variant_pair] = max_score else: score_dict[variant_id] = max_score if mode == 'compound': print('\nCompound analysis:\n') if mode == 'dominant': print('\nDominant analysis:\n') if mode == 'homozygote': print('\nHomozygote analysis:\n') if mode == 'denovo': print('\nDe novo analysis:\n') if mode == 'xlinked': print('\nX-linked analysis:\n') header = ['Chrom', 'Position', 'Reference', 'Alternative', 'Cadd score', '1000GMAF', 'Annotation' ] print(''.join(word.ljust(column_width) for word in header)) i = 0 with open(outfile , mode='a', encoding='utf-8') as f: for variant_id in sorted(score_dict, key=score_dict.get, reverse=True): if mode == 'compound': if i < length_of_output: print('Pair %s' % (i+1)) for compound_id in variant_id: print_line = [variant_dict[compound_id]['CHROM'], variant_dict[compound_id]['POS'], variant_dict[compound_id]['REF'], variant_dict[compound_id]['ALT'], variant_dict[compound_id]['info_dict'].get(score_key, '-'), variant_dict[compound_id]['info_dict'].get(freq_key, '-'), variant_dict[compound_id]['info_dict'].get('Annotation', '-') ] if i < length_of_output: print(''.join(word.ljust(column_width) for word in print_line)) print_line = [variant_dict[compound_id].get(entry, '-') for entry in vcf_header] f.write('\t'.join(print_line)+'\n') else: print_line = [variant_dict[variant_id]['CHROM'], variant_dict[variant_id]['POS'], variant_dict[variant_id]['REF'], variant_dict[variant_id]['ALT'], variant_dict[variant_id]['info_dict'].get(score_key, ['-'])[0], variant_dict[variant_id]['info_dict'].get(freq_key, ['-'])[0], variant_dict[variant_id]['info_dict'].get('Annotation', ['-'])[0] ] # Print the highest ranked variants to screen: if i < length_of_output: print(''.join(word.ljust(column_width) for word in print_line)) print_line = [variant_dict[variant_id].get(entry, '-') for entry in vcf_header] f.write('\t'.join(print_line)+'\n') i += 1 return ### This is for analyzing the variants ### def make_models(list_of_models): """Make a dictionary of the prefered models. If no models are specified all are considered interesting.""" model_set = set() # If no models are specified we allow all models if len(list_of_models) == 0: list_of_models = ['AR', 'AD', 'X'] for model in list_of_models: if 'AR' in model: model_set.add('AR_hom') model_set.add('AR_hom_dn') model_set.add('AR_comp') model_set.add('AR_comp_dn') if 'AD' in model: model_set.add('AD') model_set.add('AD_dn') if 'X' in model: model_set.add('XR') model_set.add('XR_dn') model_set.add('XD') model_set.add('XD_dn') return model_set def remove_inacurate_compounds(compound_dict, family_id): """If the second variant in a compound pair does not meet the requirements they should not be considered.""" for variant_id in list(compound_dict.keys()): # Get the compounds for the variant family_compounds = compound_dict[variant_id]['info_dict'].get('Compounds', None) if compounds: for family in family_compounds.split(','): splitted_compounds = family.split(':') if splitted_compounds[0] == family_id: compounds = splitted_compounds[1].split('|') compound_set = set(compounds) for compound in compounds: # If requrements are not met it has never been placed in compound dict if compound not in compound_dict: compound_set.discard(compound) # If no compounds in the pair upfills the requirements we remove the pair if len(compound_set) == 0: compound_dict.pop(variant_id) return def covered_in_all(variant, coverage_treshold = 7): """Check if the variant is covered in all individuals.""" for individual in variant['genotypes']: if variant['genotypes'][individual].quality_depth < coverage_treshold: return False return True def get_interesting_variants(variant_parser, family_id, dominant_dict, homozygote_dict, compound_dict, x_linked_dict, dominant_dn_dict, freq_treshold, freq_keyword, cadd_treshold, cadd_keyword, gq_treshold, coverage, exclude_problematic): """Collect the interesting variants in their dictionarys. add RankScore.""" inheritance_keyword = 'GeneticModels' de_novo_set = set(['AD_dn', 'AR_hom_dn', 'AR_comp_dn', 'XD_dn', 'XR_dn']) dominant_set = set(['AD']) homozygote_set = set(['AR_hom']) compound_set = set(['AR_comp']) x_linked_set = set(['XD', 'XR']) dominant_dn_set = set(['AD_dn']) for variant in variant_parser: annotation = set(variant['info_dict'].get('Annotation', '')) models_found = set([]) family_models = variant['info_dict'].get(inheritance_keyword, None) if family_models: #This is a string on the form 'fam_1:AR_hom,fam_2:AR_hom|AR_hom_dn for family_info in family_models: splitted_family = family_info.split(':') if splitted_family[0] == family_id: models_found = set(splitted_family[1].split('|')) maf = min( [ float(frequency) for frequency in variant['info_dict'].get( freq_keyword, '0' ) ] ) cadd_score = max( [ float(cscore) for cscore in variant['info_dict'].get( cadd_keyword, '0' ) ] ) variant_id = variant['variant_id'] # There is a list of huge genes that becomes problematic when analysing single individuals interesting = True if not models_found: interesting = False if exclude_problematic: if annotation.intersection(PROBLEMATIC_GENES): interesting = False # if not covered_in_all(variant, coverage): # interesting = False if not variant['FILTER'] == 'PASS': interesting = False if not float(variant['QUAL']) > gq_treshold: interesting = False if interesting: # Check if cadd score is available: if cadd_score > cadd_treshold: # Check if MAF is below treshold: if maf < freq_treshold: # First we look at the variants that are not dn: if models_found.intersection(dominant_set): dominant_dict[variant_id] = variant if models_found.intersection(homozygote_set): homozygote_dict[variant_id] = variant if models_found.intersection(compound_set): compound_dict[variant_id] = variant if models_found.intersection(x_linked_set): x_linked_dict[variant_id] = variant if models_found.intersection(dominant_dn_set): dominant_dn_dict[variant_id] = variant return @click.command() @click.argument('variant_file', nargs=1, type=click.Path(exists=True), metavar='<vcf_file> or "-"' ) @click.option('-t' ,'--family_type', type=click.Choice(['ped', 'alt', 'cmms', 'mip']), default='ped', help="""If the analysis use one of the known setups, please specify which one.""" ) # @click.option('-c', '--config_file', # type=click.Path(exists=True), # help="""Specify the path to a config file.""" # ) @click.option('--frequency_treshold', '-freq', default=0.02, nargs=1, help="""Specify maf treshold for variants to be considered. Default 0.02""" ) @click.option('--frequency_keyword', '-freqkey', default='1000G_freq', nargs=1, help="""Specify keyword for frequency in vcf. Default 1000G_freq""" ) @click.option('--cadd_treshold', '-cadd', default=12.0, nargs=1, help="""Specify the cadd treshold for variants to be considered. Default 12.0""" ) @click.option('--cadd_keyword', '-caddkey', default='CADD', nargs=1, help="""Specify keyword for CADD scores in vcf. Default CADD""" ) @click.option('--coverage', '-cov', default=7, nargs=1, help="""Specify minimum read depth in all individuals for variant to be considered. Default 7""" ) @click.option('--gq_treshold', '-gq', default=20, nargs=1, help="""Specify genotype quality treshold for variants to be considered. Default 20.""" ) # @click.option('-p', '--patterns', # type=click.Choice(['AR', 'AD', 'X']), # multiple=True, # help='Specify the inheritance patterns. Default is all patterns' # ) @click.option('-o', '--outdir', type=click.Path(exists=True), default=os.getcwd(), help="""Specify the path to a directory where results should be stored. Default is ./""" ) @click.option('-s', '--silent', is_flag=True, help='Do not output variants.' ) @click.option('-exclude', '--exclude_problematic', is_flag=True, help="""Exclude problematic genes. This flag is preferable if analysis of only one individual.""" ) @click.option('-v', '--verbose', is_flag=True, help='Increase output verbosity.' ) def analyze(variant_file, family_type, frequency_treshold, frequency_keyword, cadd_treshold, cadd_keyword, coverage, gq_treshold, outdir, silent, exclude_problematic, verbose): """Analyze the annotated variants in a VCF file. If there are multiple families in the ped one analysis per family will be done. The variants are analyzed in five different categories based on what inheritance patterns that are followed. The differen analysies are: AR compound\n AR homozygote\n Dominant\n X linked\n Dominant dn\n Which variants to be considered are specified in the command line. Defaults are (based on a rare disease assumption): MAF < 0.02\n CADD score > 12\n Coverage in all individuals > 7\n Call quality > 20\n The highest
<gh_stars>1-10 import argparse import korbinian import pandas as pd import numpy as np import random import sys # import debugging tools from korbinian.utils import pr, pc, pn, aaa def calc_aa_propensity_from_csv_col(seq_list_csv_in, aa_prop_csv_out, col_name, sep=","): """Calculation of amino acid propensity for TM and non-TM region in dataset. Parameters ---------- seq_list_csv_in : csv input csv file which contains sequences from region of interest (e.g. TM and nonTM regions), normally as comma separated values aa_prop_csv_out: csv output csv file which contains the aa propensity for the region of interest col_name: str specify which column should be used (e.g. TM01_seq or nonTM_seq). This should contain sequences from the interested protein region. sep: str data format. Default: comma separated file """ # open csv df = pd.read_csv(seq_list_csv_in, sep=sep) # extract column of interest, and drop empty rows ser = df[col_name].dropna() # create a string to hold segments from all proteins in list massive_string_all_prot = "" for seq in ser: if type(seq) == str: massive_string_all_prot += seq # calculate aa propensity in region of interest aa_propensity_ser = calc_aa_propensity(massive_string_all_prot) # save aa propensity series to output csv file aa_propensity_ser.to_csv(aa_prop_csv_out, sep="\t") def calc_aa_propensity_TM_nonTM(df, TM_col='TM01_seq', nonTM_col='nonTMD_seq'): """Calculation of amino acid propensity for TM and non-TM region in dataset. Parameters ---------- df : pd.DataFrame dataframe which contains the TM and non-TM sequences for each protein TM_col: str column that contains TM sequences nonTM_col: str column that contains non-TM sequences Returns ------- prob_table : pd.DataFrame show the aa propensity in TM and non-TM region, respectively index is the AA columns are the input columns plus aap (e.g. "TM01_seq" + "_aap") """ # create a string to hold all TM segments from all proteins massive_string_TM = "" for seq in df[TM_col]: if type(seq) == str: massive_string_TM += seq # create a string to hold all non-TM segments from all proteins massive_string_nonTM = "" for seq in df[nonTM_col]: if type(seq) == str: massive_string_nonTM += seq # calculate aa propensity in TM region TM_aa_propensity_ser = calc_aa_propensity(massive_string_TM) # calculate aa propensity in non-TM region nonTM_aa_propensity_ser = calc_aa_propensity(massive_string_nonTM) # merge the two table into one dataframe aa_propensity_TM_nonTM_df = pd.concat([TM_aa_propensity_ser, nonTM_aa_propensity_ser], axis=1) # rename the columns to match the content, with the orig name plus "amino acid propensity" aa_propensity_TM_nonTM_df.columns = [TM_col + "_aap", nonTM_col + "_aap"] return aa_propensity_TM_nonTM_df def calc_aa_propensity(seq): """calculate aa propensity for each residue in a particular sequence. Parameters ---------- seq : string TM or non-TM sequence Returns ------- aa_prop_norm_ser : pd.Series Series containing corresponding aa propensity """ # count absolute number of each residue in the input string number_each_aa_dict = {} all_aa = ['A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'Y'] # create an dictionary of the numbers {"A" : 57, "C" : 5, ...} etc for aa in all_aa: number_each_aa_dict[aa] = seq.count(aa) # create a dictionary to hold the propensity of each residue aa_propensity_dict = {} length = len(seq) for aa in number_each_aa_dict: aa_propensity_dict[aa] = number_each_aa_dict[aa] / length # turn the dictionary into a pd.Series aa_prop_ser = pd.Series(aa_propensity_dict) # normalise so that all the aa propensities add up to 1.0 # this is important if "X" or "U" is in the sequences aa_prop_norm_ser = aa_prop_ser / aa_prop_ser.sum() # name the index column aa_prop_norm_ser.index.name = "freq" return aa_prop_norm_ser def calc_random_aa_ident_from_ser(aa_prop_ser): """ Calculates the random AA identity from an input series of AA propensity values. Parameters ---------- aa_prop_ser : pd.Series Series of AA propensity values index : A, C, D, E etc values : 0.04, 0.06, .20 etc Returns ------- calc_rand_ident : float Calculated random identity value, e.g. 0.092. """ all_aa = "ACDEFGHIKLMNPQRSTVWY" # check that the index has 20 aa if not sorted(list(aa_prop_ser.index)) == sorted(list(all_aa)): raise ValueError("Index must be comprised of 20 amino acids, in capitals, single-letter notation.") # multiply the probabilities by themselves # if 25% of the residues are leucines, and the probability of them staying leucines is 25% # that means that 0.25 * 0.25 is the random identity of leucine # calculate this for all AA and simply sum! # this gives the random identity overall! aa_prop_ser = aa_prop_ser**2 calc_rand_ident = aa_prop_ser.sum() return calc_rand_ident def calc_random_aa_ident(aa_prop_csv_in, rand_seq_ident_csv_out): """Mathematical calculation of random amino acid identity based on a particular amino acid propensity. Protein regions with a limited aa propensity (e.g. transmembrane regions) have a measurable amino acid identity EVEN IN NON-HOMOLOGUES. This is referred to here as the random amino acid identity. This formula takes the aa propensity of a sequence or dataset as an input, and calculates the random aa identity. Parameters ---------- aa_prop_csv_in: csv input csv file containing aa propensity for a particular sequence or dataset. Typically obtained from the function calc_aa_propensity_from_csv_col rand_seq_ident_csv_out: csv Output csv file contaning calculated random aa identity (due to limited aa propensity), and all the input values """ # open csv into a pandas series, normally with all 20 aa as the index, and a proportion (0.08, 0.09 etc) as the data. aa_prop_ser = pd.Series.from_csv(aa_prop_csv_in, sep="\t") # calculate the random identity mathematically random_aa_identity = calc_random_aa_ident_from_ser(aa_prop_ser) # create a output series to contain all the output information output_ser = pd.Series() output_ser["random_sequence_identity_output"] = random_aa_identity aa_prop_ser.index = aa_prop_ser.index + "_input" output_ser = pd.concat([output_ser, aa_prop_ser]) # save the setries as csv file output_ser.to_csv(rand_seq_ident_csv_out, sep="\t") sys.stdout.write("calc_random_aa_ident is finished\n") def calc_random_aa_ident_via_randomisation(aa_prop_csv_in, rand_seq_ident_csv_out, seq_len=1000, number_seq=1000, ident=0.0, multiprocessing_mode=False): """Calculation of random amino acid identity based on a particular amino acid propensity. Protein regions with a limited aa propensity (e.g. transmembrane regions) have a measurable amino acid identity EVEN IN NON-HOMOLOGUES. This is referred to here as the random amino acid identity. This formula takes the aa propensity of a sequence or dataset as an input, and calculates the random aa identity. Parameters ---------- aa_prop_csv_in: csv input csv file containing aa propensity for a particular sequence or dataset. Typically obtained from the function calc_aa_propensity_from_csv_col rand_seq_ident_csv_out: csv outout csv file contaning calculated random aa identity (due to limited aa propensity), and all the input values seq_len: int length of randomly created sequences. To achieve a more plausible result using randomisation method, greater values (> 5000) are recommended. Defalut value: 1000 number_seq: int number of aligned sequences. Larger values are recommended. Default value: 1000 ident: float desired overall identity of randomly created sequence matrix. This will not affect the random aa identity, but smaller values might increase the precision of the calculation. Default value: 0.0 (completely randomised) Returns ------- Only returns values when multiprocessing_mode = True (no file is saved) random_aa_identity : float Random amino acid identity, based on the limited AA propensity in the dataset. output_ser = pd.Series Output pandas series consisting of the random_aa_identity, and the input aa_prop_ser derived from aa_prop_csv_in. """ # open csv into a pandas series, normally with all 20 aa as the index, and a proportion (0.08, 0.09 etc) as the data. aa_prop_ser = pd.Series.from_csv(aa_prop_csv_in, sep="\t") # extract aa array and propensity array aa_propensities = np.array(aa_prop_ser) aa_arr = np.array(aa_prop_ser.index) # calculate number of residues that need to be replaced based on the desired percentage identity. number_mutations = int(np.round(seq_len*(1 - ident))) # generate random sequences, extract the original reference sequence and the sequence cluster orig_and_mut_seqs = generate_random_seq(seq_len, number_seq, number_mutations, aa_arr, aa_propensities) # sys.stdout.write("\n") # sys.stdout.flush() # extract the original sequence, of which the matrix are variants orig_seq = orig_and_mut_seqs[0] # calculate aa propensity and find all used aa in the orig_seq aa_prop_orig_seq = calc_aa_propensity(orig_seq) aa_in_orig_seq_list = list(aa_prop_orig_seq.loc[aa_prop_orig_seq > 0].index) # extract the matrix of mutated sequences, slightly different from the orig_seq mut_seqs_matrix = orig_and_mut_seqs[1] # make a list of residues in each position (each column in MSA) list_of_columnwise_strings = [] for i in range(mut_seqs_matrix.shape[1]): """ joins up everything in column orig seq : G I L I mut1 G I L I mut2 G V L I mut3 G I L P G : GGG I : IVI L : LLL etc. """ # takes one column, joins all aa into a single
<reponame>olvrou/CCF # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the Apache 2.0 License. import os import time from enum import Enum import paramiko import logging import subprocess import getpass from contextlib import contextmanager import infra.path import json import uuid import ctypes import signal from loguru import logger as LOG DBG = os.getenv("DBG", "cgdb") _libc = ctypes.CDLL("libc.so.6") def _term_on_pdeathsig(): # usr/include/linux/prctl.h: #define PR_SET_PDEATHSIG 1 _libc.prctl(1, signal.SIGTERM) def popen(*args, **kwargs): kwargs["preexec_fn"] = _term_on_pdeathsig return subprocess.Popen(*args, **kwargs) def coverage_enabled(bin): return ( subprocess.run( f"nm -C {bin} | grep __llvm_coverage_mapping", shell=True ).returncode == 0 ) @contextmanager def sftp_session(hostname): client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(hostname) try: session = client.open_sftp() try: yield session finally: session.close() finally: client.close() def log_errors(out_path, err_path): error_filter = ["[fail]", "[fatal]"] try: errors = 0 with open(out_path, "r") as lines: for line in lines: if any(x in line for x in error_filter): LOG.error("{}: {}".format(out_path, line.rstrip())) errors += 1 if errors: try: with open(err_path, "r") as lines: LOG.error("{} contents:".format(err_path)) LOG.error(lines.read()) except IOError: LOG.exception("Could not read err output {}".format(err_path)) except IOError: LOG.exception("Could not check output {} for errors".format(out_path)) class CmdMixin(object): def set_recovery(self): self.cmd.append("--start=recover") self.cmd = list(dict.fromkeys(self.cmd)) def set_perf(self): self.cmd = [ "perf", "record", "--freq=1000", "--call-graph=dwarf", "-s", ] + self.cmd class SSHRemote(CmdMixin): def __init__( self, name, hostname, exe_files, data_files, cmd, workspace, label, env=None ): """ Runs a command on a remote host, through an SSH connection. A temporary directory is created, and some files can be shipped over. The command is run out of that directory. Note that the name matters, since the temporary directory that will be first deleted, then created and populated is workspace/label_name. There is deliberately no cleanup on shutdown, to make debugging/inspection possible. setup() connects, creates the directory and ships over the files start() runs the specified command stop() disconnects, which shuts down the command via SIGHUP restart() reconnects and reruns the specified command """ self.hostname = hostname # For SSHRemote, both executable files (host and enclave) and data # files (ledger, secrets) are copied to the remote self.files = exe_files self.files += data_files self.cmd = cmd self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.root = os.path.join(workspace, label + "_" + name) self.name = name self.env = env or {} def _rc(self, cmd): LOG.info("[{}] {}".format(self.hostname, cmd)) _, stdout, _ = self.client.exec_command(cmd) return stdout.channel.recv_exit_status() def _connect(self): LOG.debug("[{}] connect".format(self.hostname)) self.client.connect(self.hostname) def _setup_files(self): assert self._rc("rm -rf {}".format(self.root)) == 0 assert self._rc("mkdir -p {}".format(self.root)) == 0 session = self.client.open_sftp() for path in self.files: tgt_path = os.path.join(self.root, os.path.basename(path)) LOG.info("[{}] copy {} from {}".format(self.hostname, tgt_path, path)) session.put(path, tgt_path) session.close() executable = self.cmd[0] if executable.startswith("./"): executable = executable[2:] assert self._rc("chmod +x {}".format(os.path.join(self.root, executable))) == 0 def get(self, filename, timeout=60, targetname=None): """ Get file called `filename` under the root of the remote. If the file is missing, wait for timeout, and raise an exception. If the file is present, it is copied to the CWD on the caller's host, as `targetname` if it is set. This call spins up a separate client because we don't want to interrupt the main cmd that may be running. """ with sftp_session(self.hostname) as session: for seconds in range(timeout): try: targetname = targetname or filename session.get(os.path.join(self.root, filename), targetname) LOG.debug( "[{}] found {} after {}s".format( self.hostname, filename, seconds ) ) break except Exception: time.sleep(1) else: raise ValueError(filename) def list_files(self, timeout=60): files = [] with sftp_session(self.hostname) as session: for seconds in range(timeout): try: files = session.listdir(self.root) break except Exception: time.sleep(1) else: raise ValueError(self.root) return files def get_logs(self): with sftp_session(self.hostname) as session: for filename in ("err", "out"): try: filepath = os.path.join(self.root, filename) local_filepath = "{}_{}_{}".format( self.hostname, filename, self.name ) session.get(filepath, local_filepath) LOG.info("Downloaded {}".format(local_filepath)) except Exception: LOG.warning( "Failed to download {} from {}".format(filepath, self.hostname) ) def _wait_for_termination(self, stdout, timeout=10): chan = stdout.channel for _ in range(timeout): if chan.exit_status_ready(): if chan.recv_exit_status() is not 0: raise RuntimeError("SSHRemote did not terminate gracefully") else: LOG.success("Command finished") return else: LOG.error("Command not ready") time.sleep(1) raise TimeoutError("Timed out waiting for SSHRemote to terminate") def start(self, wait_for_termination=False): """ Start cmd on the remote host. stdout and err are captured to file locally. We create a pty on the remote host under which to run the command, so as to get a SIGHUP on disconnection. """ cmd = self._cmd() LOG.info("[{}] {}".format(self.hostname, cmd)) stdin, stdout, stderr = self.client.exec_command(cmd, get_pty=True) if wait_for_termination: self._wait_for_termination(stdout) def stop(self): """ Disconnect the client, and therefore shut down the command as well. """ LOG.info("[{}] closing".format(self.hostname)) self.get_logs() log_errors( "{}_out_{}".format(self.hostname, self.name), "{}_err_{}".format(self.hostname, self.name), ) self.client.close() def restart(self): self._connect() self.start() def setup(self): """ Connect to the remote host, empty the temporary directory if it exsits, and populate it with the initial set of files. """ self._connect() self._setup_files() def _cmd(self): env = " ".join(f"{key}={value}" for key, value in self.env.items()) cmd = " ".join(self.cmd) return f"cd {self.root} && {env} ./{cmd} 1>out 2>err 0</dev/null" def _dbg(self): return "cd {} && {} --args ./{}".format(self.root, DBG, " ".join(self.cmd)) def _connect_new(self): client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.connect(self.hostname) return client def wait_for_stdout_line(self, line, timeout): client = self._connect_new() try: for _ in range(timeout): _, stdout, _ = client.exec_command(f"grep -F '{line}' {self.root}/out") if stdout.channel.recv_exit_status() == 0: return time.sleep(1) raise ValueError( "{} not found in stdout after {} seconds".format(line, timeout) ) finally: client.close() def print_result(self, lines): client = self._connect_new() try: _, stdout, _ = client.exec_command(f"tail -{lines} {self.root}/out") if stdout.channel.recv_exit_status() == 0: LOG.success(f"Result for {self.name}:") for line in stdout.read().splitlines(): LOG.debug(line.decode()) return finally: client.close() @contextmanager def ssh_remote(name, hostname, files, cmd): """ Context Manager wrapper for SSHRemote """ remote = SSHRemote(name, hostname, files, cmd) try: remote.setup() remote.start() yield remote finally: remote.stop() class LocalRemote(CmdMixin): def __init__( self, name, hostname, exe_files, data_files, cmd, workspace, label, env=None ): """ Local Equivalent to the SSHRemote """ self.hostname = hostname self.exe_files = exe_files self.data_files = data_files self.cmd = cmd self.root = os.path.join(workspace, label + "_" + name) self.proc = None self.stdout = None self.stderr = None self.env = env self.name = name def _rc(self, cmd): LOG.info("[{}] {}".format(self.hostname, cmd)) return subprocess.call(cmd, shell=True) def _setup_files(self): assert self._rc("rm -rf {}".format(self.root)) == 0 assert self._rc("mkdir -p {}".format(self.root)) == 0 for path in self.exe_files: dst_path = os.path.join(self.root, os.path.basename(path)) src_path = os.path.join(os.getcwd(), path) assert self._rc("ln -s {} {}".format(src_path, dst_path)) == 0 for path in self.data_files: dst_path = os.path.join(self.root, os.path.basename(path)) src_path = os.path.join(os.getcwd(), path) assert self._rc("cp {} {}".format(src_path, dst_path)) == 0 # Make sure relative paths include current directory. Absolute paths will be unaffected self.cmd[0] = os.path.join(".", os.path.normpath(self.cmd[0])) def get(self, filename, timeout=60, targetname=None): path = os.path.join(self.root, filename) for _ in range(timeout): if os.path.exists(path): break time.sleep(1) else: raise ValueError(path) targetname = targetname or filename assert self._rc("cp {} {}".format(path, targetname)) == 0 def list_files(self): return os.listdir(self.root) def _wait_for_termination(self, timeout=10): try: self.proc.wait(timeout) except subprocess.TimeoutExpired: raise TimeoutError("Timed out waiting for LocalRemote to terminate") if self.proc.returncode is not 0: raise RuntimeError("LocalRemote did not terminate gracefully") def start(self, wait_for_termination=False, timeout=10): """ Start cmd. stdout and err are captured to file locally. """ cmd = self._cmd() LOG.info(f"[{self.hostname}] {cmd} (env: {self.env})") self.stdout = open(os.path.join(self.root, "out"), "wb") self.stderr = open(os.path.join(self.root, "err"), "wb") self.proc = popen( self.cmd, cwd=self.root, stdout=self.stdout, stderr=self.stderr, env=self.env, ) if wait_for_termination: self._wait_for_termination() def stop(self): """ Disconnect the client, and therefore shut down the command as well. """ LOG.info("[{}] closing".format(self.hostname)) if self.proc: self.proc.terminate() self.proc.wait() if self.stdout: self.stdout.close() if self.stderr: self.stderr.close() log_errors(os.path.join(self.root, "out"), os.path.join(self.root, "err")) def restart(self): self.start() def setup(self): """ Empty the temporary directory if it exists, and populate it with the initial set of files. """ self._setup_files() def _cmd(self): return "cd {} && {} 1>out 2>err".format(self.root, " ".join(self.cmd)) def _dbg(self): return "cd {} && {} --args {}".format(self.root, DBG, " ".join(self.cmd)) def wait_for_stdout_line(self, line, timeout): for _ in range(timeout): with open(os.path.join(self.root, "out"), "rb") as out: for out_line in out: if line.strip() in out_line.strip().decode(): return time.sleep(1) raise ValueError( "{} not found in stdout after {} seconds".format(line, timeout) ) def print_result(self, line): with open(os.path.join(self.root, "out"), "rb") as out: lines = out.read().splitlines() result = lines[-line:] LOG.success(f"Result for {self.name}:") for line in result: LOG.debug(line.decode()) CCF_TO_OE_LOG_LEVEL = { "trace": "VERBOSE", "debug": "INFO", "info": "WARNING", "fail": "ERROR", "fatal": "FATAL", } class CCFRemote(object): BIN = "cchost" DEPS = [] def __init__( self, lib_path, local_node_id, host, pubhost, node_port, rpc_port, remote_class, enclave_type, verify_quote, workspace, label, other_quote=None, other_quoted_data=None, log_level="info", ignore_quote=False, sig_max_tx=1000, sig_max_ms=1000, node_status="pending", election_timeout=1000, memory_reserve_startup=0, notify_server=None,
# Hangman Game # ----------------------------------- # Helper code # You don't need to understand this helper code, # but you will have to know how to use the functions # (so be sure to read the docstrings!) import random import string import re WORDLIST_FILENAME = "words.txt" def load_words(): """ Returns a list of valid words. Words are strings of lowercase letters. Depending on the size of the word list, this function may take a while to finish. """ print("Loading word list from file...") # inFile: file inFile = open(WORDLIST_FILENAME, 'r') # line: string line = inFile.readline() # wordlist: list of strings wordlist = line.split() print(" ", len(wordlist), "words loaded.") return wordlist def choose_word(wordlist): """ wordlist (list): list of words (strings) Returns a word from wordlist at random """ return random.choice(wordlist) # end of helper code # ----------------------------------- # Load the list of words into the variable wordlist # so that it can be accessed from anywhere in the program wordlist = load_words() letters_guessed = [] def is_word_guessed(secret_word, letters_guessed): """ secret_word: string, the word the user is guessing; assumes all letters are lowercase letters_guessed: list (of letters), which letters have been guessed so far; assumes that all letters are lowercase returns: boolean, True if all the letters of secret_word are in letters_guessed; False otherwise """ return all(letter in letters_guessed for letter in set(secret_word)) def get_guessed_word(secret_word, letters_guessed): """ secret_word: string, the word the user is guessing letters_guessed: list (of letters), which letters have been guessed so far returns: string, comprised of letters, underscores (_), and spaces that represents which letters in secret_word have been guessed so far. """ word = [l if l in letters_guessed else '_ ' for l in secret_word] return ''.join(word) def get_available_letters(letters_guessed): """ letters_guessed: list (of letters), which letters have been guessed so far returns: string (of letters), comprised of letters that represents which letters have not yet been guessed. """ letters = [l for l in string.ascii_lowercase if l not in letters_guessed] return ''.join(letters) def hangman(secret_word): """ secret_word: string, the secret word to guess. Starts up an interactive game of Hangman. * At the start of the game, let the user know how many letters the secret_word contains and how many guesses s/he starts with. * The user should start with 6 guesses * Before each round, you should display to the user how many guesses s/he has left and the letters that the user has not yet guessed. * Ask the user to supply one guess per round. Remember to make sure that the user puts in a letter! * The user should receive feedback immediately after each guess about whether their guess appears in the computer's word. * After each guess, you should display to the user the partially guessed word so far. Follows the other limitations detailed in the problem write-up. """ start_round(secret_word, 6, 3, False, True) # When you've completed your hangman function, scroll down to the bottom # of the file and uncomment the first two lines to test # (hint: you might want to pick your own # secret_word while you're doing your own testing) # ----------------------------------- def start_round(secret_word, guesses_remaining, warnings_remaining, hints_enabled, is_first_round=False): """ Starts a round of Hangman. :param secret_word: The secret word that the user needs to guess. :type secret_word: str :param guesses_remaining: The amount of guesses remaining. :type guesses_remaining: int :param warnings_remaining: The amount of warnings remaining. :type warnings_remaining: int :param hints_enabled: Whether or not hint requests are enabled. :type warnings_remaining: bool :param is_first_round: Whether or not the player just started the game. :type warnings_remaining: bool :rtype: None """ # Greet user for new game if is_first_round: print('Welcome to the game Hangman!') print('I am thinking of a word that is {} letters long.' .format(len(secret_word))) print('You have 3 warnings left.') # Prompt the user for a guess guesses = pluralize('guess', guesses_remaining) print('-------------\nYou have {} left.'.format(guesses)) letters = get_available_letters(letters_guessed) print('Available letters:', letters) guess = input('Please guess a letter: ') # Handle hint requests if hints_enabled and guess == '*': guessed_word = get_guessed_word(secret_word, letters_guessed) print('Possible matches are:') show_possible_matches(guessed_word) start_round(secret_word, guesses_remaining, warnings_remaining, hints_enabled) return # Handle invalid guesses if guess in letters_guessed or not re.match('[a-z]', guess, re.I): warning = 'Oops! That is not a valid letter.' if guess in letters_guessed: warning = 'Oops! You already guessed that letter.' if warnings_remaining < 1: # Subtract a guess warnings_remaining = 3 guesses_remaining -= 1 if guesses_remaining < 1: lose_game(secret_word) warning += ' You have no warnings left so you lose one guess: ' else: # Subtract a warning warnings_remaining -= 1 warnings = pluralize('warning', warnings_remaining) warning += ' You have {} left: '.format(warnings) warning += get_guessed_word(secret_word, letters_guessed) print(warning) start_round(secret_word, guesses_remaining, warnings_remaining, hints_enabled) return # Update guesses letters_guessed.append(guess) lost_guesses = get_lost_guesses(guess, secret_word) guesses_remaining -= lost_guesses # Output guess success or failure guessed_word = get_guessed_word(secret_word, letters_guessed) if lost_guesses == 0: print('Good guess:', guessed_word) else: print('Oops! That letter is not in my word:', guessed_word) # Player wins game when all letters are guessed if is_word_guessed(secret_word, letters_guessed): win_game(secret_word, guesses_remaining) if guesses_remaining < 1: # Player loses game if no guesses remain lose_game(secret_word) else: # Start a new round if player has guesses remaining start_round(secret_word, guesses_remaining, warnings_remaining, hints_enabled) def pluralize(noun, quantity): """ Returns a pluralization of a noun preceded by its quantity by suffixing a morpheme (limited to the regular plural morphemes 's' and 'es'). :param noun: The noun to be pluralized. :type noun: str :param quantity: The quantity of the noun. :type quantity: int :returns: The pluralization of the noun. :rtype: str """ unpluralized_noun = ' '.join([str(quantity), noun]) if (quantity == 1): return unpluralized_noun morpheme = 's' if (noun.endswith('s')): morpheme = 'es' return ''.join([unpluralized_noun, morpheme]) def get_lost_guesses(guessed_letter, secret_word): """ Calculates the number of guesses lost from a guess according to the secret_word. :param guessed_letter: The letter the player guessed. :type guessed_letter: str :param secret_word: The secret word that the player needs to guess. :type secret_word: str :returns: The number of guesses the player lost. :rtype: int """ if guessed_letter in secret_word: return 0 else: if guessed_letter in 'aeiou': return 2 return 1 def win_game(secret_word, guesses_remaining): """ Alerts the player that they won the game before terminating the game. :param secret_word: The secret word that the player guessed. :type secret_word: str :param guesses_remaining: The amount of guesses remaining. :type guesses_remaining: int :rtype: None """ score = len(set(secret_word)) * guesses_remaining print('------------\nCongratulations, you won!') print('Your total score for this game is:', score) exit() def lose_game(secret_word): """ Alerts the player that they lost the game before terminating the game. :param secret_word: The secret word that the player failed to guess. :type secret_word: str :rtype: None """ print('Sorry, you ran out of guesses. The word was', secret_word) exit() def match_with_gaps(my_word, other_word): """ my_word: string with _ characters, current guess of secret word other_word: string, regular English word returns: boolean, True if all the actual letters of my_word match the corresponding letters of other_word, or the letter is the special symbol _ , and my_word and other_word are of the same length; False otherwise: """ my_word = my_word.replace('_ ', '_') other_word_letters = [] non_other_word_letters = [] if len(my_word) != len(other_word): return False for index, letter in enumerate(my_word): other_letter = other_word[index] if letter == '_': non_other_word_letters.append(other_letter) if other_letter in other_word_letters: return False else: other_word_letters.append(other_letter) if letter != other_letter or letter in non_other_word_letters: return False return True def show_possible_matches(my_word): """ my_word: string with _ characters, current guess of secret word returns: nothing, but should print out every word in wordlist that matches my_word. Keep in mind that in hangman when a letter is guessed, all the positions at which that letter occurs in the secret word are revealed. Therefore, the hidden letter(_ ) cannot be one of the letters in the word that has already been revealed. """ matches = [word for word in wordlist if match_with_gaps(my_word, word)] if len(matches) == 0: print('No matches found') print(' '.join(matches)) def hangman_with_hints(secret_word): """ Starts an interactive game of Hangman with hints. :param secret_word: The secret word that the player guessed. :type secret_word: str
<filename>pyhdb/cursor.py # Copyright 2014, 2015 SAP SE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections ### from pyhdb.protocol.message import RequestMessage from pyhdb.protocol.segments import RequestSegment from pyhdb.protocol.types import escape_values, by_type_code from pyhdb.protocol.parts import Command, FetchSize, ResultSetId, StatementId, Parameters, WriteLobRequest from pyhdb.protocol.constants import message_types, function_codes, part_kinds from pyhdb.exceptions import ProgrammingError, InterfaceError, DatabaseError from pyhdb.compat import izip FORMAT_OPERATION_ERRORS = [ 'not enough arguments for format string', 'not all arguments converted during string formatting' ] def format_operation(operation, parameters=None): if parameters is not None: e_values = escape_values(parameters) try: operation = operation % e_values except TypeError as msg: if str(msg) in FORMAT_OPERATION_ERRORS: # Python DBAPI expects a ProgrammingError in this case raise ProgrammingError(str(msg)) else: # some other error message appeared, so just reraise exception: raise return operation class PreparedStatement(object): """Reference object to a prepared statement including parameter (meta) data""" ParamTuple = collections.namedtuple('Parameter', 'id type_code length value') def __init__(self, connection, statement_id, params_metadata, result_metadata_part): """Initialize PreparedStatement part object :param connection: connection object :param statement_id: 8-byte statement identifier :param params_metadata: A tuple of named-tuple instances containing parameter meta data: Example: (ParameterMetadata(options=2, datatype=26, mode=1, id=0, length=24, fraction=0),) :param result_metadata_part: can be None """ self._connection = connection self.statement_id = statement_id self._params_metadata = params_metadata self.result_metadata_part = result_metadata_part self._multi_row_parameters = None self._num_rows = None self._iter_row_count = None def prepare_parameters(self, multi_row_parameters): """ Attribute sql parameters with meta data for a prepared statement. Make some basic checks that at least the number of parameters is correct. :param multi_row_parameters: A list/tuple containing list/tuples of parameters (for multiple rows) :returns: A generator producing parameters attributed with meta data for one sql statement (a row) at a time """ self._multi_row_parameters = multi_row_parameters self._num_rows = len(multi_row_parameters) self._iter_row_count = 0 return self def __repr__(self): return '<PreparedStatement id=%r>' % self.statement_id def __iter__(self): return self def __bool__(self): return self._iter_row_count < self._num_rows # Python 2.7 compat __nonzero__ = __bool__ def __next__(self): if self._iter_row_count == self._num_rows: raise StopIteration() parameters = self._multi_row_parameters[self._iter_row_count] if not isinstance(parameters, (list, tuple, dict)): raise ProgrammingError("Prepared statement parameters supplied as %s, shall be list, tuple or dict." % type(parameters).__name__) if len(parameters) != len(self._params_metadata): raise ProgrammingError("Prepared statement parameters expected %d supplied %d." % (len(self._params_metadata), len(parameters))) row_params = [self.ParamTuple(p.id, p.datatype, p.length, parameters[p.id]) for p in self._params_metadata] self._iter_row_count += 1 return row_params # Python 2.7 compat next = __next__ def back(self): assert self._iter_row_count > 0, 'already stepped back to beginning of iterator data' self._iter_row_count -= 1 class Cursor(object): """Database cursor class""" def __init__(self, connection): self.connection = connection self._buffer = iter([]) self._received_last_resultset_part = False self._executed = None self.rowcount = -1 self._column_types = None self.description = None self.rownumber = None self.arraysize = 1 self._prepared_statements = {} @property def prepared_statement_ids(self): return self._prepared_statements.keys() def get_prepared_statement(self, statement_id): return self._prepared_statements[statement_id] def prepare(self, statement): """Prepare SQL statement in HANA and cache it :param statement; a valid SQL statement :returns: statement_id (of prepared and cached statement) """ self._check_closed() self._column_types = None statement_id = params_metadata = result_metadata_part = None request = RequestMessage.new( self.connection, RequestSegment( message_types.PREPARE, Command(statement) ) ) response = self.connection.send_request(request) for part in response.segments[0].parts: if part.kind == part_kinds.STATEMENTID: statement_id = part.statement_id elif part.kind == part_kinds.PARAMETERMETADATA: params_metadata = part.values elif part.kind == part_kinds.RESULTSETMETADATA: result_metadata_part = part # Check that both variables have been set in previous loop, we need them: assert statement_id is not None assert params_metadata is not None # cache statement: self._prepared_statements[statement_id] = PreparedStatement(self.connection, statement_id, params_metadata, result_metadata_part) return statement_id def execute_prepared(self, prepared_statement, multi_row_parameters): """ :param prepared_statement: A PreparedStatement instance :param multi_row_parameters: A list/tuple containing list/tuples of parameters (for multiple rows) """ self._check_closed() # Convert parameters into a generator producing lists with parameters as named tuples (incl. some meta data): parameters = prepared_statement.prepare_parameters(multi_row_parameters) while parameters: request = RequestMessage.new( self.connection, RequestSegment( message_types.EXECUTE, (StatementId(prepared_statement.statement_id), Parameters(parameters)) ) ) reply = self.connection.send_request(request) parts = reply.segments[0].parts function_code = reply.segments[0].function_code if function_code == function_codes.SELECT: self._handle_select(parts, prepared_statement.result_metadata_part) elif function_code in function_codes.DML: self._handle_upsert(parts, request.segments[0].parts[1].unwritten_lobs) elif function_code == function_codes.DDL: # No additional handling is required pass elif function_code in (function_codes.DBPROCEDURECALL, function_codes.DBPROCEDURECALLWITHRESULT): self._handle_dbproc_call(parts, prepared_statement._params_metadata) # resultset metadata set in prepare else: raise InterfaceError("Invalid or unsupported function code received: %d" % function_code) def _execute_direct(self, operation): """Execute statements which are not going through 'prepare_statement' (aka 'direct execution'). Either their have no parameters, or Python's string expansion has been applied to the SQL statement. :param operation: """ request = RequestMessage.new( self.connection, RequestSegment( message_types.EXECUTEDIRECT, Command(operation) ) ) reply = self.connection.send_request(request) parts = reply.segments[0].parts function_code = reply.segments[0].function_code if function_code == function_codes.SELECT: self._handle_select(parts) elif function_code in function_codes.DML: self._handle_upsert(parts) elif function_code == function_codes.DDL: # No additional handling is required pass elif function_code in (function_codes.DBPROCEDURECALL, function_codes.DBPROCEDURECALLWITHRESULT): self._handle_dbproc_call(parts, None) else: raise InterfaceError("Invalid or unsupported function code received: %d" % function_code) def execute(self, statement, parameters=None): """Execute statement on database :param statement: a valid SQL statement :param parameters: a list/tuple of parameters :returns: this cursor In order to be compatible with Python's DBAPI five parameter styles must be supported. paramstyle Meaning --------------------------------------------------------- 1) qmark Question mark style, e.g. ...WHERE name=? 2) numeric Numeric, positional style, e.g. ...WHERE name=:1 3) named Named style, e.g. ...WHERE name=:name 4) format ANSI C printf format codes, e.g. ...WHERE name=%s 5) pyformat Python extended format codes, e.g. ...WHERE name=%(name)s Hana's 'prepare statement' feature supports 1) and 2), while 4 and 5 are handle by Python's own string expansion mechanism. Note that case 3 is not yet supported by this method! """ self._check_closed() if not parameters: # Directly execute the statement, nothing else to prepare: self._execute_direct(statement) else: self.executemany(statement, parameters=[parameters]) return self def executemany(self, statement, parameters): """Execute statement on database with multiple rows to be inserted/updated :param statement: a valid SQL statement :param parameters: a nested list/tuple of parameters for multiple rows :returns: this cursor """ # First try safer hana-style parameter expansion: try: statement_id = self.prepare(statement) except DatabaseError as msg: # Hana expansion failed, check message to be sure of reason: if 'incorrect syntax near "%"' not in str(msg): # Probably some other error than related to string expansion -> raise an error raise # Statement contained percentage char, so perform Python style parameter expansion: for row_params in parameters: operation = format_operation(statement, row_params) self._execute_direct(operation) else: # Continue with Hana style statement execution: prepared_statement = self.get_prepared_statement(statement_id) self.execute_prepared(prepared_statement, parameters) # Return cursor object: return self def _handle_upsert(self, parts, unwritten_lobs=()): """Handle reply messages from INSERT or UPDATE statements""" self.description = None self._received_last_resultset_part = True # set to 'True' so that cursor.fetch*() returns just empty list for part in parts: if part.kind == part_kinds.ROWSAFFECTED: self.rowcount = part.values[0] elif part.kind in (part_kinds.TRANSACTIONFLAGS, part_kinds.STATEMENTCONTEXT): pass elif part.kind == part_kinds.WRITELOBREPLY: # This part occurrs after lobs have been submitted not at all or only partially during an insert. # In this case the parameter part of the Request message contains a list called 'unwritten_lobs' # with LobBuffer instances. # Those instances are in the same order as 'locator_ids' received in the reply message. These IDs # are then used to deliver the missing LOB data to the server via WRITE_LOB_REQUESTs. for lob_buffer, lob_locator_id in izip(unwritten_lobs, part.locator_ids): # store locator_id in every lob buffer instance for later reference: lob_buffer.locator_id = lob_locator_id self._perform_lob_write_requests(unwritten_lobs) else: raise InterfaceError("Prepared insert statement response, unexpected part kind %d." % part.kind) self._executed = True def _perform_lob_write_requests(self, unwritten_lobs): """After sending incomplete LOB data during an INSERT or UPDATE this method will be called. It sends missing LOB data possibly in multiple LOBWRITE requests for all LOBs. :param unwritten_lobs: A deque list of LobBuffer instances containing LOB data. Those buffers have been assembled in the parts.Parameter.pack_lob_data() method. """ while unwritten_lobs: request = RequestMessage.new( self.connection, RequestSegment( message_types.WRITELOB, WriteLobRequest(unwritten_lobs) ) ) self.connection.send_request(request) def _handle_select(self, parts, result_metadata=None): """Handle reply messages from SELECT statements"""
adapter_seq_read2=adapter_seq_read2, max_adapter_removal=max_adapter_removal, overlap_length=overlap_length, zero_cap=zero_cap, quality_base=quality_base, error_rate=error_rate, min_qual_score=min_qual_score, min_read_len=min_read_len, keep_temp_files=keep_temp_files, sort_mem=sort_mem) total_input += lib_input total_unique += lib_unique ## Remove clonal reads if remove_clonal == True: lib_clonal = remove_clonal_bam(input_bam = path_to_output+sample+"_"+str(current_library) +"_processed_reads.bam", output_bam = path_to_output+sample+"_"+str(current_library) +"_processed_reads_no_clonal.bam", metric = path_to_output+sample+"_"+ str(current_library)+".metric", is_pe = True, path_to_picard=path_to_picard, java_options=java_options) subprocess.check_call(shlex.split("rm "+path_to_output+sample+"_"+ str(current_library)+"_processed_reads.bam")) if not keep_clonal_stats: subprocess.check_call(shlex.split("rm "+" "+path_to_output+sample+"_"+ str(current_library)+".metric")) total_clonal += lib_clonal print_checkpoint("There are " + str(total_input) + " total input read pairs") print_checkpoint("There are " + str(total_unique) + " uniquely mapping read pairs, " + str(float(total_unique) / total_input*100) + " percent remaining") if remove_clonal == True: total_non_clonal = total_unique - total_clonal print_checkpoint("There are "+str(total_non_clonal)+" non-clonal read pairs, "+ str(float(total_non_clonal) / total_input*100)+" percent remaining") ## Merge bam files to get final bam file library_files = [path_to_output+sample+"_"+str(library)+"_processed_reads_no_clonal.bam" for library in set(libraries)] if len(library_files) > 1: merge_bam_files(library_files, path_to_output+sample+ "_processed_reads_no_clonal.bam", path_to_samtools) subprocess.check_call(shlex.split("rm "+" ".join(library_files))) else: subprocess.check_call( shlex.split("mv "+library_files[0]+" "+ path_to_output+sample+"_processed_reads_no_clonal.bam") ) ## If not removing clonal reads else: library_files = [path_to_output+sample+"_"+str(library)+"_processed_reads.bam" for library in set(libraries)] if len(library_files) > 1: merge_bam_files(library_files,path_to_output+sample+"_processed_reads.bam",path_to_samtools) subprocess.check_call(shlex.split("rm "+" ".join(library_files))) else: subprocess.check_call(shlex.split("mv "+library_files[0]+" "+path_to_output+sample+"_processed_reads.bam")) #Calling methylated sites if generate_allc_file: print_checkpoint("Begin calling mCs") if remove_clonal == True: output_bam_file = path_to_output+sample+"_processed_reads_no_clonal.bam" else: output_bam_file = path_to_output+sample+"_processed_reads.bam" call_methylated_sites_pe(output_bam_file, sample, reference_fasta, unmethylated_control, sig_cutoff=sig_cutoff, num_procs=num_procs, num_upstr_bases=num_upstr_bases, num_downstr_bases=num_downstr_bases, generate_mpileup_file=generate_mpileup_file, compress_output=compress_output, bgzip=bgzip, path_to_bgzip=path_to_bgzip, path_to_tabix=path_to_tabix, min_cov=min_cov, binom_test=binom_test, remove_chr_prefix=remove_chr_prefix, sort_mem=sort_mem, path_to_files=path_to_output, path_to_samtools=path_to_samtools, min_base_quality=min_base_quality, keep_temp_files=keep_temp_files) print_checkpoint("Done") def run_mapping_pe(current_library, library_read1_files, library_read2_files, sample, forward_reference, reverse_reference, reference_fasta, path_to_output="", path_to_samtools="", path_to_aligner="", aligner="bowtie2", aligner_options=None, merge_by_max_mapq=False, min_mapq=30, num_procs=1, trim_reads=True, path_to_cutadapt="", adapter_seq_read1="AGATCGGAAGAGCACACGTCTGAAC", adapter_seq_read2="AGATCGGAAGAGCGTCGTGTAGGGA", max_adapter_removal=None, overlap_length=None, zero_cap=None, quality_base=None, error_rate=None, min_qual_score=10, min_read_len=30, keep_temp_files=False, sort_mem="500M"): """ This function runs the mapping portion of the methylation calling pipeline. For Paired-end data processing. current_library is the ID that you'd like to run mapping on. library_read1_files is a list of library IDs (in the same order as the files list) indiciating which libraries each set of fastq files belong to. If you use a glob, you only need to indicate the library ID for those fastqs once (i.e., the length of files and libraries should be the same) library_read2_files is a list of library IDs (in the same order as the files list) indiciating which libraries each set of fastq files belong to. If you use a glob, you only need to indicate the library ID for those fastqs once (i.e., the length of files and libraries should be the same) sample is a string indicating the name of the sample you're processing. It will be included in the output files. forward_reference is a string indicating the path to the forward strand reference created by build_ref reverse_reference is a string indicating the path to the reverse strand reference created by build_ref reference_fasta is a string indicating the path to a fasta file containing the sequences you used for mapping path_to_samtools is a string indicating the path to the directory containing your installation of samtools. Samtools is assumed to be in your path if this is not provided. path_to_aligner is a string indicating the path to the folder in which bowtie resides. Bowtie is assumed to be in your path if this option isn't used aligner_options is a list of strings indicating options you'd like passed to bowtie2 (or bowtie) num_procs is an integer indicating how many num_procs you'd like to run this function over trim_reads is a boolean indicating that you want to have reads trimmed by cutadapt path_to_cutadapt is the path to the cutadapt execuatable. Otherwise this is assumed to be in your path. adapter_seq_read1: Sequence of an adapter that was ligated to the 3' end of read 1. The adapter itself and anything that follows is trimmed. adapter_seq_read2: Sequence of an adapter that was ligated to the 3' end of read 2. The adapter itself and anything that follows is trimmed. max_adapter_removal indicates the maximum number of times to try to remove adapters. Useful when an adapter gets appended multiple times. overlap_length is the minimum overlap length. If the overlap between the read and the adapter is shorter than LENGTH, the read is not modified. This reduces the no. of bases trimmed purely due to short random adapter matches. zero_cap causes negative quality values to be set to zero (workaround to avoid segmentation faults in BWA). quality_base is the offset for quality scores. In other words, assume that quality values are encoded as ascii(quality + QUALITY_BASE). The default (33) is usually correct, except for reads produced by some versions of the Illumina pipeline, where this should be set to 64. error_rate is the maximum allowed error rate (no. of errors divided by the length of the matching region) (default: 0.1) min_qual_score allows you to trim low-quality ends from reads before adapter removal. The algorithm is the same as the one used by BWA (Subtract CUTOFF from all qualities; compute partial sums from all indices to the end of the sequence; cut sequence at the index at which the sum is minimal). min_read_len indicates the minimum length a read must be to be kept. Reads that are too short even before adapter removal are also discarded. In colorspace, an initial primer is not counted. keep_temp_files is a boolean indicating that you'd like to keep the intermediate files generated by this function. This can be useful for debugging, but in general should be left False. sort_mem is the parameter to pass to unix sort with -S/--buffer-size command """ #Default option if aligner_options is None: if aligner.lower() == "minimap2": aligner_options = ["-ax","sr","--secondary=no"] elif aligner.lower() == "bowtie": aligner_options = ["-X 1000", "-S", "-k 1", "-m 1", "--best", "--strata", "--chunkmbs 3072", "-n 1", "-e 100"] aligner_options.append("--phred33-quals") else: # bowtie2 aligner_options = [] aligner_options = ["-X 1000", "--no-discordant", "--no-mixed"] aligner_options.append("--phred33-quals") # CASAVA >= 1.8 quality_base = 33 if len(path_to_output) !=0: path_to_output+="/" total_input = 0 total_unique = 0 prefix = path_to_output+sample+"_"+str(current_library) #Split files print_checkpoint("Begin splitting reads for "+str(current_library)) total_input_read1 = split_fastq_file(num_procs,library_read1_files,prefix+"_read1_split_") total_input_read2 = split_fastq_file(num_procs,library_read2_files,prefix+"_read2_split_") #Check if there are same number of reads in read 1 and read 2 if total_input_read1 != total_input_read2: print_error("There are different numbers of read 1 and read 2 "+ "for library \"" + str(current_library) + "\" !") else: total_input = total_input_read1 if trim_reads: #Trimming print_checkpoint("Begin trimming reads for "+str(current_library)) quality_trim_pe( inputf_read1=[prefix+"_read1_split_"+str(i) for i in range(0,num_procs)], outputf_read1=[prefix+"_read1_split_trimmed_"+str(i) for i in range(0,num_procs)], inputf_read2=[prefix+"_read2_split_"+str(i) for i in range(0,num_procs)], outputf_read2=[prefix+"_read2_split_trimmed_"+str(i) for i in range(0,num_procs)], adapter_seq_read1=adapter_seq_read1, adapter_seq_read2=adapter_seq_read2, error_rate=error_rate, quality_base = quality_base, min_qual_score=min_qual_score, min_read_len=min_read_len, input_format="fastq", num_procs=num_procs, max_adapter_removal=max_adapter_removal, overlap_length=overlap_length, zero_cap=zero_cap, path_to_cutadapt=path_to_cutadapt) subprocess.check_call(shlex.split("rm "+" ".join([prefix+"_read1_split_"+str(i) for i in range(0,num_procs)]))) subprocess.check_call(shlex.split("rm "+" ".join([prefix+"_read2_split_"+str(i) for i in range(0,num_procs)]))) #Conversion print_checkpoint("Begin converting reads for "+str(current_library)) if num_procs > 1: pool = multiprocessing.Pool(num_procs)#read1 for inputf,output in zip([prefix+"_read1_split_trimmed_"+str(i) for i in range(0,num_procs)], [prefix+"_read1_split_trimmed_converted_"+str(i) for i in range(0,num_procs)]): pool.apply_async(convert_reads_pe,(inputf,output)) for inputf,output in zip([prefix+"_read2_split_trimmed_"+str(i) for i in range(0,num_procs)], [prefix+"_read2_split_trimmed_converted_"+str(i) for i in range(0,num_procs)]): pool.apply_async(convert_reads_pe,(inputf,output,True)) pool.close() pool.join() else: for inputf,output in zip([prefix+"_read1_split_trimmed_"+str(i) for i in range(0,num_procs)], [prefix+"_read1_split_trimmed_converted_"+str(i) for i in range(0,num_procs)]): convert_reads_pe(inputf,output) for inputf,output in zip([prefix+"_read2_split_trimmed_"+str(i) for i in range(0,num_procs)], [prefix+"_read2_split_trimmed_converted_"+str(i) for i in range(0,num_procs)]): convert_reads_pe(inputf,output,True) subprocess.check_call( shlex.split("rm "+" ".join([prefix+"_read1_split_trimmed_"+str(i) for i in range(0,num_procs)])) ) subprocess.check_call( shlex.split("rm "+" ".join([prefix+"_read2_split_trimmed_"+str(i) for i in range(0,num_procs)])) ) #Run bowtie input_fastq_read1 = [prefix+"_read1_split_trimmed_converted_"+str(i) for i in range(0,num_procs)] input_fastq_read2 = [prefix+"_read2_split_trimmed_converted_"+str(i) for i in range(0,num_procs)] else: print_checkpoint("No trimming applied on reads") #Conversion print_checkpoint("Begin converting reads for "+str(current_library)) if num_procs > 1: pool = multiprocessing.Pool(num_procs)#read1 for inputf,output in zip([prefix+"_read1_split_"+str(i) for i in range(0,num_procs)], [prefix+"_read1_split_converted_"+str(i) for i in range(0,num_procs)]): pool.apply_async(convert_reads_pe,(inputf,output)) for inputf,output in zip([prefix+"_read2_split_"+str(i) for i in range(0,num_procs)], [prefix+"_read2_split_converted_"+str(i) for i in range(0,num_procs)]): pool.apply_async(convert_reads_pe,(inputf,output,True)) pool.close() pool.join() else: for inputf,output in zip([prefix+"_read1_split_"+str(i) for i in range(0,num_procs)], [prefix+"_read1_split_converted_"+str(i) for i in range(0,num_procs)]): convert_reads_pe(inputf,output) for inputf,output in zip([prefix+"_read2_split_"+str(i) for i in range(0,num_procs)], [prefix+"_read2_split_converted_"+str(i) for i in range(0,num_procs)]): convert_reads_pe(inputf,output,True) subprocess.check_call(shlex.split("rm "+" ".join([prefix+"_read1_split_"+str(i) for i in range(0,num_procs)]))) subprocess.check_call(shlex.split("rm "+" ".join([prefix+"_read2_split_"+str(i) for i in range(0,num_procs)]))) input_fastq_read1 = [prefix+"_read1_split_converted_"+str(i)
<filename>API/AdminPanel/admin_panel_filters/request_list/url_filter_assessments_journal.py from API.setting_tests import TokenSave # Dev01 Staging staging_dev01 = 'https://api-test-ege.interneturok.ru/api/v1/journal/admin/school_users?' put_mark_in_user = 'https://api-test-ege.interneturok.ru/api/v2/results/homeworks/27060198' # FilterYears year_2020 = '&year_id=2020' year_2017_2018 = '&year_id=2019' # FilterTrainingFormat format_all = '' format_basic_education = '&school_type=1' format_additional_education = '&school_type=2' # FilterSchool school_all = '&school_ids=' school_not = '&school_ids=5' school_stolichniy_kit = '&school_ids=15' # FilterGrades klass_all = '&subject_ids=52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,150,151,154,155,156,157,158,159,160,161,162,163,164,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,225,226,227,228,229,230,232,233,234,235' klass_one = '&subject_ids=127,131,135,139,154,170,171,172,173' klass_two = '&subject_ids=128,132,136,140,143,155,174,175,176,177' klass_three = '&subject_ids=129,133,137,141,144,156,178,179,180,181' klass_four = '&subject_ids=130,134,138,142,145,157,182,183,184,185,186,216' klass_five = '&subject_ids=52,53,54,55,56,57,146,147,148,158,187,188,189,190' klass_six = '&subject_ids=58,59,60,61,62,63,64,65,159,191,192,193,194' klass_seven = '&subject_ids=66,68,69,70,71,72,73,74,75,76,160,167,168,169,195,196,204,211' klass_eight = '&subject_ids=77,78,79,80,81,82,83,84,85,86,87,88,161,197,205,206,207,212' klass_nine = '&subject_ids=89,90,91,92,93,94,95,96,97,98,99,100,150,151,162,198,208,209,210,213' klass_ten = '&subject_ids=101,102,103,104,105,106,107,108,109,110,111,112,163,199,200,214' klass_eleven = '&subject_ids=113,114,115,116,117,118,119,120,121,122,123,164,201,202,203,215' # FilterSubjects subject_all = '&subject_ids=52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,150,151,154,155,156,157,158,159,160,161,162,163,164,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,225,226,227,228,229,230,232,233,234,235' subject_russian_language = '&subject_ids=52,58,66,77,89,101,113,127,128,129,130' subject_literatura = '&subject_ids=53,59,68,78,90,102,114' subject_english_language = '&subject_ids=54,60,69,79,91,103,115,143,144,145' subject_matematika = '&subject_ids=55,61,135,136,137,138' subject_history = '&subject_ids=56,62,72,83,95,107,119' subject_prirodavedenie = '&subject_ids=57' subject_objestvoznanie = '&subject_ids=63,73,84,96,108,120,148' subject_geografia = '&subject_ids=64,74,85,97,109,147' subject_biologia = '&subject_ids=65,76,88,100,112,123,146' subject_algebra_standart = '&subject_ids=70,80,92' subject_geometria_standart = '&subject_ids=71,81,93' subject_fizika_standart = '&subject_ids=75,86,98' subject_informatika = '&subject_ids=82,94,106,118,167' subject_himiya = '&subject_ids=87,99,111,122' subject_algebra = '&subject_ids=104,116' subject_geometria = '&subject_ids=105,117' subject_fizika = '&subject_ids=46,110,121' subject_literaturnoe_chtenie = '&subject_ids=131,132,133,134' subject_okruzauchi_mir = '&subject_ids=139,140,141,142' subject_matematika_profi = '&subject_ids=28' subject_matematika_bazovyai = '&subject_ids=153' subject_vvodni_yrok = '&subject_ids=158,159,160,161,162,163,164,154,155,156,157' subject_podgotovka_k_OGE_matematika = '&subject_ids=150' subject_podgotovka_k_OGE_russian_language = '&subject_ids=151' subject_algebra_effective = '&subject_ids=168' subject_fizika_effective = '&subject_ids=169' subject_geometria_effective = '&subject_ids=204,206,209' subject_fizra = '&subject_ids=170,174,178,187,182,191,198,195,199,201,197' subject_tehnologiya = '&subject_ids=173,177,181,185,190,194,196' subject_izo = '&subject_ids=176,172,180,184,189,193' subject_music = '&subject_ids=179,171,192,183,188,175' subject_osnovi_sovetskoi_itiki = '&subject_ids=186' subject_obz = '&subject_ids=200,202' subject_astronomia = '&subject_ids=203' subject_superJob = '&subject_ids=211,212,213,214,215' subject_german_language = '&subject_ids=221,222,225,226,227' subject_osnovy_sovet_itiki = '&subject_ids=186' subject_tendo_studio = '&subject_ids=216,217,218,219,220' # FilterUser search_all_users = '&student_id=' search_user_hexcal = '&student_id=<EMAIL>' # FilterQuarter quarter_two = '&quarter=2' quarter_three = '&quarter=3' quarter_four = '&quarter=4' year = '&quarter=5' # Other token_admin = '&token=' token_admin_put_mark = '?token=' page = '&page=1' # The token received after login admin received_token = TokenSave.get_token_user_admin() class FilterYears: year_2020 = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + subject_all + token_admin + received_token + year_2020 year_2017_2018 = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + subject_all + token_admin + received_token + year_2017_2018 class FilterTrainingFormat: format_all = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + subject_all + token_admin + received_token + year_2020 format_basic_education = staging_dev01 + page + quarter_three + school_all + format_basic_education + search_user_hexcal + subject_all + token_admin + received_token + year_2020 format_additional_education = staging_dev01 + page + quarter_three + school_all + format_additional_education + search_user_hexcal + subject_all + token_admin + received_token + year_2020 class FilterSchool: school_all = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + subject_all + token_admin + received_token + year_2020 school_no_listed = staging_dev01 + page + quarter_three + school_not + search_user_hexcal + subject_all + token_admin + received_token + year_2020 school_stolishnya_kit = staging_dev01 + page + quarter_three + school_stolichniy_kit + search_user_hexcal + subject_all + token_admin + received_token + year_2020 class FilterGrades: klass_all = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + token_admin + received_token + year_2020 klass_one = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_one + token_admin + received_token + year_2020 klass_two = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_two + token_admin + received_token + year_2020 klass_three = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_three + token_admin + received_token + year_2020 klass_four = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_four + token_admin + received_token + year_2020 klass_five = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_five + token_admin + received_token + year_2020 klass_six = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_six + token_admin + received_token + year_2020 klass_seven = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_seven + token_admin + received_token + year_2020 klass_eight = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_eight + token_admin + received_token + year_2020 klass_nine = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_nine + token_admin + received_token + year_2020 klass_ten = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_ten + token_admin + received_token + year_2020 klass_eleven = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_eleven + token_admin + received_token + year_2020 class FilterSubjects: subject_all = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + token_admin + received_token + year_2020 subject_russian_language = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_russian_language + token_admin + received_token + year_2020 subject_literatura = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_literatura + token_admin + received_token + year_2020 subject_english_language = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_english_language + token_admin + received_token + year_2020 subject_matematika = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_matematika + token_admin + received_token + year_2020 subject_history = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_history + token_admin + received_token + year_2020 subject_prirodavedenie = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_prirodavedenie + token_admin + received_token + year_2020 subject_objestvoznanie = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_objestvoznanie + token_admin + received_token + year_2020 subject_geografia = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_geografia + token_admin + received_token + year_2020 subject_biologia = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_biologia + token_admin + received_token + year_2020 subject_algebra_standart = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_algebra_standart + token_admin + received_token + year_2020 subject_geometria_standart = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_geometria_standart + token_admin + received_token + year_2020 subject_fizika_standart = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_fizika_standart + token_admin + received_token + year_2020 subject_informatika = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_informatika + token_admin + received_token + year_2020 subject_himiya = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_himiya + token_admin + received_token + year_2020 subject_algebra = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_algebra + token_admin + received_token + year_2020 subject_geometria = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_geometria + token_admin + received_token + year_2020 subject_fizika = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_fizika + token_admin + received_token + year_2020 subject_literaturnoe_chtenie = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_literaturnoe_chtenie + token_admin + received_token + year_2020 subject_okruzauchi_mir = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_okruzauchi_mir + token_admin + received_token + year_2020 subject_vvodni_yrok = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_vvodni_yrok + token_admin + received_token + year_2020 subject_podgotovka_k_OGE_matematika = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_podgotovka_k_OGE_matematika + token_admin + received_token + year_2020 subject_podgotovka_k_OGE_russian_language = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_podgotovka_k_OGE_russian_language + token_admin + received_token + year_2020 subject_algebra_effective = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_algebra_effective + token_admin + received_token + year_2020 subject_fizika_effective = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_fizika_effective + token_admin + received_token + year_2020 subject_geometria_effective = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_geometria_effective + token_admin + received_token + year_2020 subject_fizra = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_fizra + token_admin + received_token + year_2020 subject_tehnologiya = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_tehnologiya + token_admin + received_token + year_2020 subject_izo = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_izo + token_admin + received_token + year_2020 subject_music = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_music + token_admin + received_token + year_2020 subject_osnovi_sovetskoi_itiki = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_osnovi_sovetskoi_itiki + token_admin + received_token + year_2020 subject_obz = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_obz + token_admin + received_token + year_2020 subject_astronomia = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_astronomia + token_admin + received_token + year_2020 subject_superJob = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_superJob + token_admin + received_token + year_2020 subject_german_language = staging_dev01 + page + quarter_three + school_all + search_user_hexcal + klass_all + subject_german_language + token_admin
# helper functions for testing properties of candidate schedules import requests from sets import Set import json import calendar import itertools import os from collections import OrderedDict import random from definitions import ROOT_DIR, lessonTypeCodes, LOCAL_API_DIR from z3 import * LUNCH_HOURS = [11, 12, 13] def lessonTypeToCode(lessonType): if "freeday" in lessonType: return int(lessonType[-1]) return lessonTypeCodes[lessonType] def freeDay(x): day = range(x*24,(x+1)*24) return day + [i+120 for i in day] def hoursBefore(x): hours = [range(i * 24, i * 24 + x) + range(120 + i * 24, 120 + i * 24 + x) for i in range(0,5)] hours = [i for sublist in hours for i in sublist] return hours def hoursAfter(x): hours = [range(i * 24 + x, (i + 1) * 24) + range(120 + i * 24 + x, 120 + (i + 1) * 24) for i in range(0,5)] hours = [i for sublist in hours for i in sublist] return hours def lunchHoursInWeek(): LUNCH_HOURS = [11, 12, 13] return [24 * day + h for day in range(10) for h in LUNCH_HOURS] def transformMod(modtuple): return (modtuple[0], splitIntoLessonTypes(modtuple[1])) def outputFormatter(model, numToTake, modlst): for i in range(numToTake): modIndex = model[Int("x_%s" % i)].as_long() mod = modlst[modIndex] moduleCode = mod[0] for lessonType, slots in mod[1].iteritems(): chosenSlot = model[Int('%s_%s' % (moduleCode, lessonType[:3]))].as_long() slotName = slots[chosenSlot][0] print "%s_%s_%s" % (moduleCode, lessonType[:3], slotName) def modsListToLessonMapping(transformedModsLst): # prepare list of mod -> lessons -> slots val = {i[0]: {k:v.keys() for k, v in i[1].items()} for i in transformedModsLst} return val def timeList(weektext, daytext, starttime, endtime): """Returns list of discrete timeslots based on hour-based indexing in a fortnight used for z3's distinct query. 0-119 first week, 120-239 second week. 24 hours in a day If it is a weekend, return empty list :param weektext: Odd/Even Week :param daytext: day of the week :param starttime: 24h format :param endtime: 24h format :returns: list of hour slots :rtype: list """ if daytext == 'Saturday' or daytext == 'Sunday': return [] weekdays = {"Monday": 0, "Tuesday": 1, "Wednesday": 2, "Thursday": 3, "Friday": 4} ofst = weekdays[daytext]*24 lst = [i+ofst for i in range(int(starttime)/100, int(endtime)/100)] if (weektext == "Odd Week"): return lst elif (weektext == "Even Week"): return [i+120 for i in lst] # default every week else: return [i+120 for i in lst]+lst def splitIntoLessonTypes(mod, option = ""): def shuffle_dict(d): keys = d.keys() random.shuffle(keys) return dict(OrderedDict([(k, d[k]) for k in keys])) if option == "": lessonTypes = Set([i['LessonType'] for i in mod]) mydict = {} for i in lessonTypes: mydict[i] = {} for lst in mod: tList = timeList(lst["WeekText"], lst["DayText"], lst["StartTime"], lst["EndTime"]) classId = lst['ClassNo'] lType = lst['LessonType'] if classId in mydict[lType].keys(): mydict[lType][classId] = mydict[lType][classId] + tList else: mydict[lType][classId] = tList # EXPERIMENT: shuffle to attempt to get a new schedule mydict = {k:shuffle_dict(v) for k,v in mydict.iteritems()} return mydict # elif option == "includevenues": # lessonTypes = Set([i['LessonType'] for i in mod]) # m_dict = {} # for i in lessonTypes: # m_dict[i] = {} # for lst in mod: # tList = timeList(lst["WeekText"], lst["DayText"], lst["StartTime"], lst["EndTime"]) # classId = lst['ClassNo'] # lType = lst['LessonType'] # venue = lst['Venue'] # if classId in m_dict[lType].keys(): # m_dict[lType][classId][0] = m_dict[lType][classId][0] + tList # else: # m_dict[lType][classId] = [tList, venue] # # here we are assuming each ClassNo only has one venue, or if they have different venues, they are in the same cluster # return m_dict else: return "unknown option" def ModWithFullDays(possibleFreedays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']): ''' returns a mod tuple in the same internal representation used to solve timetable query this mock module has a lesson type (freeday-x) corresponding to a full weekdays ''' weekdays = {"Monday": 0, "Tuesday": 1, "Wednesday": 2, "Thursday": 3, "Friday": 4} freedayNumbers = [weekdays[day] for day in possibleFreedays] lessonSlots = {"freeday-1" : [(str(d), freeDay(d)) for d in freedayNumbers]} return ['FREEDAY', lessonSlots] # WARNING DEPRECATED def freedayMod(numFreedays, freedays = []): ''' returns a mod tuple in the same internal representation used to solve timetable query freedays is an array of weekdays to keep free ''' weekdays = {"Monday": 0, "Tuesday": 1, "Wednesday": 2, "Thursday": 3, "Friday": 4} freedayNumbers = [weekdays[day] for day in freedays] # add the specified weekdays first lessonSlots1 = {"freeday-%s" % i : [(str(d), freeDay(d)) for d in freedayNumbers] for i in range(0, len(freedays))} # remaining freedays are symbolic lessonSlots2 = {"freeday-%s" % i : [(str(d), freeDay(d)) for d in range(5)] for i in range(len(freedays), numFreedays)} lessonSlots1.update(lessonSlots2) return ['FREEDAY', lessonSlots1] class CalendarUtils: ''' This class should only contain functions that require the timetable data ''' def __init__(self, semester = 'AY1718S1', local = False): pathToData = os.path.join(ROOT_DIR, '../data/' + semester + '_timetable.json') self.BASE_URL = 'http://api.nusmods.com/20' + semester[2:4] + '-20' + semester[4:6] + '/' + \ semester[-1] + '/modules/' self.LOCAL_BASE_URL = LOCAL_API_DIR + '/20' + semester[2:4] + '-20' + semester[4:6] + '/' + \ semester[-1] + '/modules/' self.local = local # Sample API call: # http://api.nusmods.com/2016-2017/1/modules/ST2131/timetable.json # returns tuple of (ModuleCode, [{Lessons for each type}]) def query(self, code): code = code.upper() # codes are in upper case # If in LOCAL mode then pull everything from local sources try: if self.local: url = self.LOCAL_BASE_URL + code.upper() + '.json' mod = json.load(open(url)) return (code, mod['Timetable']) except IOError: print "url: " + url + " not found" # if all fails default to API url = self.BASE_URL + code.upper() + '/timetable.json' r = requests.get(self.BASE_URL + code.upper() + '/timetable.json') r = r.json() return (code, r) def queryAndTransform(self, moduleCode, option = ""): modtuple = self.query(moduleCode) return (modtuple[0], splitIntoLessonTypes(modtuple[1], option)) # takes in a list of slots and returns lists of free days def gotFreeDay(self, schedule): schedule = [s for s in schedule if 'FREEDAY' not in s] modCodes = Set([s.split('_')[0] for s in schedule]) mods = [self.queryAndTransform(m) for m in modCodes] mods = dict((m[0], m[1]) for m in mods) # get list of hours hours = [] for slot in schedule: mod, lessonType, slotName = slot.split('_') if mod == 'FREEDAY': continue hours += mods[mod][lessonType][slotName] hours.sort() hoursTwoWeeks = [[] for i in range(2 * 5)] for h in hours: hoursTwoWeeks[h / 24].append(h % 24) freeDays = [] for i, day in enumerate(hoursTwoWeeks): if len(day) == 0: if i < 5: freeDays.append('Even ' + calendar.day_name[i % 5]) else: freeDays.append('Odd ' + calendar.day_name[i % 5]) return freeDays def getHours(self, lesson): """Returns list of hours from lesson slot, e.g. 'ST2131_Lecture_SL1' :param lesson: (str) lesson slot of format [module code]_[lesson type]_[lesson code] :returns: list of hours (from 240 hours based indexing) :rtype: list """ mod, lessonType, slot = lesson.split('_') modJSON = self.queryAndTransform(mod)[1] return modJSON[lessonType][slot] def getHoursFromSchedule(self, schedule): hours = [self.getHours(s) for s in schedule] combinedHours = list(itertools.chain.from_iterable(hours)) return combinedHours def scheduleValid(self, schedule): """Returns true if schedule is valid, one of each lesson type and no clash :param schedule: list of lesson slots taken :returns: true if valid, false otherwise :rtype: Boolean """ if len(schedule) == 0: return False # check if lesson types of each covered schedule = [s for s in schedule if "FREEDAY" not in s] mods = Set([s.split('_')[0] for s in schedule]) # get jsons of each mods modsJSON = [self.query(m)[1] for m in mods] allLessonTypes = Set() for mod in mods: modJSON = self.query(mod)[1] for lesson in modJSON: allLessonTypes.add(mod + '_' + str(lesson['LessonType'])) # get set of all lesson types in schedule scheduleLessonType = Set(["_".join(l.split('_')[:2]) for l in schedule]) if len(allLessonTypes.symmetric_difference(scheduleLessonType)) != 0: return False combinedHours = self.getHoursFromSchedule(schedule) return len(combinedHours) == len(Set(combinedHours)) def checkNoLessonsBefore(self, schedule, hour): """ Returns true if schedule does not have any lessons before input hour """ hours = hoursBefore(hour) # check that there not clashed between timetable and "illegal hours" timetableHours = [self.getHours(s) for s in schedule] combinedHours = list(itertools.chain.from_iterable(timetableHours)) illegalHours = Set(hours) timetable = Set(combinedHours) if(len(illegalHours.intersection(timetable)) > 0): return False else: return True def checkNoLessonsAfter(self, schedule, hour): """ Returns true if schedule does not have any lessons before input hour """ hours = hoursAfter(hour) # check that there not clashed
# -*- coding: utf-8 -*- from openerp import tools, models, fields, api, exceptions ############################################################################################################################ Üzem picking ### class RaktarUzemPicking(models.Model): def sajat_raktar(self): return self.env.user.sajat_raktar_id.id _name = 'raktar.uzem_picking' _order = 'id desc' state = fields.Selection([('terv',u'Tervezet'),('kesz',u'Rögzítve')], 'Állapot', default='terv', readonly=True ) uzem_id = fields.Many2one('raktar.mozgasnem', u'Üzem', default=sajat_raktar, required=True ) forrashely_id = fields.Many2one('raktar.mozgasnem', u'Forráshely', readonly=True, states={'terv': [('readonly', False)]}, required=True, domain="[('uzem_raktar_valaszt','=',True)]" ) celallomas_id = fields.Many2one('raktar.mozgasnem', u'Célállomás helye', readonly=True, states={'terv': [('readonly', False)]}, required=True, domain="[('uzem_raktar_valaszt','=',True)]" ) forrasdokumentum = fields.Char(u'Forrásdokumentum') megjegyzes = fields.Char(u'Megjegyzés') # virtual fields name = fields.Integer(u'ID', compute='_compute_name') uzem_move_ids = fields.One2many('raktar.uzem_move', 'uzem_picking_id', u'Tételek', readonly=True, states={'terv': [('readonly', False)]} ) @api.one @api.depends() def _compute_name(self): # self.name = str(self.id) self.name = self.id @api.one def import_impex(self): for impex in self.env['raktar.impex'].search([]): move_row = { 'uzem_picking_id' : self.id, # 'gyartasi_lap_id' : impex.gyartasi_lap_id.id, 'gyartasi_lap_sorsz': impex.sorszam, 'product_id' : impex.product_id.id, 'mennyiseg' : impex.mennyiseg, } self.env['raktar.uzem_move'].create(move_row) return True @api.one def export_impex(self): self.env['raktar.impex'].search([]).unlink() for move in self.uzem_move_ids: impex_row = { 'sorszam' : move.gyartasi_lap_id.id, 'rendelesszam' : move.gyartasi_lap_id.rendelesszam, 'termekkod' : move.product_id.cikkszam, 'homogen' : '', # 'db' : move.mennyiseg, 'mennyiseg' : move.mennyiseg, 'ertek' : 0.0, 'gyartasi_lap_id' : move.gyartasi_lap_id.id, 'product_id' : move.product_id.id, 'homogen_id' : False, } self.env['raktar.impex'].create(impex_row) return True @api.one def state2kesz(self): self.state = 'kesz' return True ############################################################################################################################ Üzem move ### class RaktarUzemMove(models.Model): def sajat_raktar(self): return self.env.user.sajat_raktar_id.id _name = 'raktar.uzem_move' _order = 'id desc' _rec_name = 'product_id' uzem_picking_id = fields.Many2one('raktar.uzem_picking', u'Fej ID') state = fields.Selection([('terv',u'Tervezet'),('kesz',u'Rögzítve')], 'Állapot', related='uzem_picking_id.state', store=True, readonly=True ) uzem_id = fields.Many2one('raktar.mozgasnem', u'Üzem', default=sajat_raktar, required=True ) forrashely_id = fields.Many2one('raktar.mozgasnem', u'Forráshely', compute='_compute_forrashely_id', store=True, readonly=True ) celallomas_id = fields.Many2one('raktar.mozgasnem', u'Célállomás helye', compute='_compute_celallomas_id', store=True, readonly=True ) gyartasi_lap_sorsz = fields.Integer(u'Sorszám') gyartasi_lap_id = fields.Many2one('raktar.gyartasi_lap', u'Gyártási lap', compute='_compute_gyartasi_lap_id', store=True, readonly=True ) product_id = fields.Many2one('product.product', u'Termék', required=True ) valtozat = fields.Char(u'Változat', default='') mennyiseg = fields.Float(u'Mennyiség', digits=(16, 2), required=True ) @api.one @api.depends('uzem_picking_id') def _compute_forrashely_id(self): self.forrashely_id = self.uzem_picking_id.forrashely_id.id @api.one @api.depends('uzem_picking_id') def _compute_celallomas_id(self): self.celallomas_id = self.uzem_picking_id.celallomas_id.id @api.one @api.depends('gyartasi_lap_sorsz') def _compute_gyartasi_lap_id(self): self.gyartasi_lap_id = self.gyartasi_lap_sorsz @api.onchange('gyartasi_lap_sorsz') def onchange_gyartasi_lap_sorsz(self): self.product_id = False ids = self.gyartasi_lap_id.muveletterv_id.muvelet_cikk_ids.mapped('product_id.id') domain = [('id','in',ids)] if self.gyartasi_lap_sorsz else [] return {'domain': {'product_id': domain}} ############################################################################################################################ Üzem készlet ### class RaktarUzemKeszlet(models.Model): _name = 'raktar.uzem_keszlet' _auto = False _rec_name = 'product_id' _order = 'uzem_id, product_id, valtozat, hely_id' uzem_id = fields.Many2one('raktar.mozgasnem', u'Üzem', readonly=True) hely_id = fields.Many2one('raktar.mozgasnem', u'Raktárhely', readonly=True) product_id = fields.Many2one('product.product', string=u'Termék', readonly=True) valtozat = fields.Char(u'Változat', readonly=True) raktaron = fields.Float(string=u'Raktáron', readonly=True) tervezett = fields.Float(string=u'Várható', readonly=True) false = fields.Boolean(u'Hamis', readonly=True) def init(self, cr): tools.drop_view_if_exists(cr, self._table) cr.execute( """CREATE or REPLACE VIEW %s as ( SELECT row_number() over() AS id, uzem_id, hely_id, product_id, valtozat, sum(raktaron) AS raktaron, sum(tervezett) AS tervezett, FALSE AS false FROM ( SELECT uzem_id,celallomas_id AS hely_id, product_id, valtozat, sum(CASE WHEN state='kesz' THEN mennyiseg ELSE 0 END) AS raktaron, sum(mennyiseg) AS tervezett FROM raktar_uzem_move GROUP BY uzem_id,product_id,valtozat,celallomas_id UNION ALL SELECT uzem_id,forrashely_id hely_id, product_id, valtozat, sum(CASE WHEN state='kesz' THEN -mennyiseg ELSE 0 END) AS raktaron, sum(-mennyiseg) AS tervezett FROM raktar_uzem_move GROUP BY uzem_id,product_id,valtozat,forrashely_id ) AS move GROUP BY uzem_id,product_id,valtozat,hely_id )""" % (self._table) ) ############################################################################################################################ Anyagigénylés ### class RaktarIgenyTemplate(models.AbstractModel): _name = 'raktar.igeny_template' _order = 'id desc' tipus = fields.Selection([('igeny',u'Igénylés'), ('selejt',u'Selejtküldés')], u'Típus', default=lambda self: self.env.context.get('tipus', ''), required=True ) state = fields.Selection([('terv',u'Tervezet'),('uj',u'Új igény'),('nyugta',u'Nyugtázva'),('szallit',u'Szállítás'),('kesz',u'Lezárt')], u'Állapot', default='terv', readonly=True ) sorszam = fields.Integer(u'Sorszám', readonly=True, states={'terv': [('readonly', False)]} ) gyartasi_lap_id = fields.Many2one('raktar.gyartasi_lap', u'Gyártási lap', compute='_compute_gyartasi_lap_id', store=True, readonly=True ) product_id = fields.Many2one('product.product', u'Termék', readonly=True, states={'terv': [('readonly', False)]}, required=True ) darab = fields.Integer(u'Darab', readonly=True, states={'terv': [('readonly', False)]}, required=True ) kuldott_db = fields.Integer(u'Küldött', readonly=True, states={'szallit': [('readonly', False)], 'nyugta': [('readonly', False)]} ) kapott_db = fields.Integer(u'Kapott', readonly=True, states={'szallit': [('readonly', False)]} ) kuldott_ossz_db = fields.Integer(u'Küldött összes', readonly=True ) kapott_ossz_db = fields.Integer(u'Kapott összes', readonly=True ) hely_id = fields.Many2one('raktar.mozgasnem', u'Üzem', domain=[('belso_szallitas', '=', True)], readonly=True, states={'terv': [('readonly', False)]}, required=True ) igeny_ok = fields.Selection([('hiany',u'hiánypótlás'),('selejt',u'selejtpótlás')], 'Kérés oka', default='hiany', readonly=True, states={'terv': [('readonly', False)]}, required=True ) selejt_ok_id = fields.Many2one('raktar.hibakod', u'Hibakód', states={'kesz': [('readonly', True)]} ) megjegyzes = fields.Char(u'Megjegyzés', states={'kesz': [('readonly', True)]} ) # virtual fields rendelesszam = fields.Char(u'Rendelésszám', related='gyartasi_lap_id.rendelesszam', readonly=True, required=True ) termekkod = fields.Char(u'Termékkód', related='gyartasi_lap_id.termekkod', readonly=True, required=True ) @api.one @api.depends('sorszam') def _compute_gyartasi_lap_id(self): self.gyartasi_lap_id = self.sorszam @api.onchange('sorszam') def onchange_sorszam(self): self.product_id = False ids = self.gyartasi_lap_id.muveletterv_id.muvelet_cikk_ids.mapped('product_id.id') # ids = self.gyartasi_lap_id.gyartas_cikk_ids.filtered(lambda r: r.beepules > 0.0).mapped('product_id.id') domain = [('id','in',ids)] if self.sorszam else [] return {'domain': {'product_id': domain}} @api.onchange('kuldott_db') def onchange_kuldott_db(self): if self.kuldott_db > 0: self.kapott_db = 0 @api.onchange('kapott_db') def onchange_kapott_db(self): if self.kapott_db > 0: self.kuldott_db = 0 @api.one def rogzit(self): if self.tipus == 'igeny': # if self.igeny_ok == 'selejt': # copy = self.copy({'tipus': 'selejt', 'state': 'terv', 'megjegyzes': False}) self.state = 'uj' elif self.tipus == 'selejt': self.state = 'nyugta' return True @api.one def state2nyugta(self): self.state = 'nyugta' return True @api.one def state2kesz(self): if self.kuldott_ossz_db != self.kapott_ossz_db: raise exceptions.Warning(u'A küldött és kapott darabszámoknak egyeznie kell!') self.state = 'kesz' return True class RaktarIgeny(models.Model): _name = 'raktar.igeny' _inherit = 'raktar.igeny_template' igeny_log_ids = fields.One2many('raktar.igeny_log', 'igeny_id', u'Anyagigénylés történet', readonly=True) @api.multi def write(self, vals): # if 'kuldott_db' in vals and 'kapott_db' in vals: # raise exceptions.Warning(u'Küldés és fogadás egyszerre nem lehetséges!') old = self.read()[0] del old['id'] old['igeny_id'] = self.id old['gyartasi_lap_id'] = self.gyartasi_lap_id.id old['product_id'] = self.product_id.id old['hely_id'] = self.hely_id.id old['selejt_ok_id'] = self.selejt_ok_id.id old['rogzitesi_ido'] = self.write_date old['rogzito_uid'] = self.write_uid.id kuld_ossz, kap_ossz = self.kuldott_ossz_db, self.kapott_ossz_db if 'kuldott_db' in vals and vals['kuldott_db'] != 0: vals['kuldott_ossz_db'] = kuld_ossz = self.kuldott_ossz_db + vals['kuldott_db'] if self.darab < vals['kuldott_ossz_db']: raise exceptions.Warning(u'Az összes küldött darab meghaladja az eredeti mennyiséget!') vals['kapott_db'] = 0 vals['state'] = 'szallit' elif 'kapott_db' in vals and vals['kapott_db'] != 0: vals['kapott_ossz_db'] = kap_ossz = self.kapott_ossz_db + vals['kapott_db'] if self.darab < vals['kapott_ossz_db']: raise exceptions.Warning(u'Az összes kapott darab meghaladja az eredeti mennyiséget!') vals['kuldott_db'] = 0 vals['state'] = 'szallit' if kuld_ossz > 0 and kuld_ossz == kap_ossz: vals['state'] = 'nyugta' if self.darab == kuld_ossz: vals['state'] = 'kesz' if self.state != 'terv': self.env['raktar.igeny_log'].create(old) super(RaktarIgeny, self).write(vals) return True class RaktarIgenyLog(models.Model): _name = 'raktar.igeny_log' _inherit = 'raktar.igeny_template' igeny_id = fields.Many2one('raktar.igeny', u'Anyagigénylés', required=True) rogzitesi_ido = fields.Datetime(u'Rögzítés ideje') rogzito_uid = fields.Many2one('res.users', u'Rögzítette') ############################################################################################################################ Selejtkövet fej ### class RaktarSelejtkovet(models.Model): _name = 'raktar.selejtkovet' _order = 'id desc' state = fields.Selection([('terv',u'Tervezet'),('folytat',u'Folyamatban'),('zarhat',u'Lezárható'),('kesz',u'Lezárt')], 'Állapot', default='terv', readonly=True ) sorszam = fields.Integer(u'Sorszám', readonly=True, states={'terv': [('readonly', False)]}, required=True ) gyartasi_lap_id = fields.Many2one('raktar.gyartasi_lap', u'Gyártási lap', compute='_compute_gyartasi_lap_id', store=True, readonly=True ) gyartas_id = fields.Many2one('raktar.gyartas', u'Gyártási művelet', domain="[('gyartasi_lap_id','=',sorszam)]", readonly=True, states={'terv': [('readonly', False)]}, required=True, ) product_id = fields.Many2one('product.product', u'Szétszerelt alkatrész', related='gyartas_id.product_id', store=True, readonly=True ) gyartasi_hely_id = fields.Many2one('raktar.mozgasnem', u'Gyártási hely', related='gyartas_id.gyartasi_hely_id', store=True, readonly=True ) mennyiseg = fields.Float(u'Mennyiség', digits=(16, 2), readonly=True, states={'terv': [('readonly', False)]}, required=True ) szamolt = fields.Float(u'Számolt', digits=(16, 2), readonly=True ) hely_id = fields.Many2one('raktar.mozgasnem', u'Üzem', readonly=True ) bontasi_hely_id = fields.Many2one('raktar.mozgasnem', u'Szétszerelési hely', domain=[('belso_szallitas', '=', True)], readonly=True, states={'terv': [('readonly', False)]}, required=True ) munka = fields.Selection([('szet',u'Szétszerelés'), ('ujra','Újragyártás')], u'Munka', readonly=True, states={'terv': [('readonly', False)]} ) megjegyzes = fields.Char(u'Megjegyzés', states={'kesz': [('readonly', True)]} ) picking_id = fields.Many2one('stock.picking', u'Kiszedés', readonly=True) picking2_id = fields.Many2one('stock.picking', u'Kiszedés 2', readonly=True) production_id = fields.Many2one('mrp.production', u'Újragyártás', readonly=True) # virtual fields rendelesszam = fields.Char(u'Rendelésszám', related='gyartasi_lap_id.rendelesszam', readonly=True, required=True ) termekkod = fields.Char(u'Termékkód', related='gyartasi_lap_id.termekkod', readonly=True, required=True ) selejtkovet_tet_ids = fields.One2many('raktar.selejtkovet_tet', 'selejtkovet_id', u'Tételek', readonly=True, states={'folytat': [('readonly', False)]} ) picking_state = fields.Selection([('draft', 'Draft'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Operation'), ('confirmed', 'Waiting Availability'), ('partially_available', 'Partially Available'), ('assigned', 'Ready to Transfer'), ('done', 'Transferred')], u'Kiszedés állapot', related='picking_id.state', readonly=True) picking2_state = fields.Selection([('draft', 'Draft'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Operation'), ('confirmed', 'Waiting Availability'), ('partially_available', 'Partially Available'), ('assigned', 'Ready to Transfer'), ('done', 'Transferred')], u'Kiszedés 2 állapot', related='picking2_id.state', readonly=True) production_state = fields.Selection([('draft', 'New'), ('cancel', 'Cancelled'), ('confirmed', 'Awaiting Raw Materials'), ('ready', 'Ready to Produce'), ('in_production', 'Production Started'), ('done', 'Done')], 'Újragyártás állapot', related='production_id.state') @api.one @api.depends('sorszam') def _compute_gyartasi_lap_id(self): self.gyartasi_lap_id, self.gyartas_id = self.sorszam, False @api.one def rogzit(self): if self.state == 'folytat': return True for bom_line in self.gyartas_id.bom_id.bom_line_ids: qty = self.mennyiseg*bom_line.product_qty/self.gyartas_id.bom_id.product_qty tetel_row = { 'selejtkovet_id' : self.id, 'product_id' : bom_line.product_id.id, 'max_mennyiseg' : round(qty, 0), 'mennyiseg' : round(qty, 0), } if qty >= 1.0: self.env['raktar.selejtkovet_tet'].create(tetel_row) self.state = 'folytat' return True @api.one def veglegesites(self): if self.state == 'zarhat': return True Log = self.env['szefo.log'] Log.create({'loglevel': 'info', 'name': u'Szétszerelés végrehajtása', 'module': 'raktar', 'table': 'selejtkovet', 'rowid': self.id}) selejt = self.env['raktar.mozgasnem'].search([('azon', '=', 'selejt')]) termeles = self.env['raktar.mozgasnem'].search([('azon', '=', 'termeles')]) picking_row = { 'picking_type_id' : selejt.picking_type_id.id, 'origin' : u'Szétszerelés ID: '+str(self.id), 'move_type' : 'direct', } picking = self.env['stock.picking'].create(picking_row) move_row = { 'picking_id' : picking.id, 'product_id' : self.gyartas_id.product_id.id, 'name' : self.gyartas_id.product_id.name, 'product_uom' : self.gyartas_id.product_id.uom_id.id, 'product_uom_qty' : self.mennyiseg, 'location_id' : self.bontasi_hely_id.location_id.id, 'location_dest_id': termeles.location_id.id } self.env['stock.move'].create(move_row) picking.action_confirm() picking.force_assign() self.picking_id = picking.id picking_row = { 'picking_type_id' : selejt.picking_type_id.id, 'origin' : u'Szétszerelés ID: '+str(self.id), 'move_type' : 'direct', } picking2 =
self.TestEnv.Sequential = True self.TestEnv.Validate() # note: to create a train / test split of pats, do this: # all = etable.NewIdxView(self.Pats) # splits = split.Permuted(all, []float64{.8, .2}, []string{"Train", "Test"}) # self.TrainEnv.Table = splits.Splits[0] # self.TestEnv.Table = splits.Splits[1] self.TrainEnv.Init(0) self.TestEnv.Init(0) def ConfigNet(self, net): net.InitName(net, "RA25") inLay = net.AddLayer2D("Input", 5, 5, emer.Input) hid1Lay = net.AddLayer2D("Hidden1", 7, 7, emer.Hidden) hid2Lay = net.AddLayer2D("Hidden2", 7, 7, emer.Hidden) outLay = net.AddLayer2D("Output", 5, 5, emer.Target) # use this to position layers relative to each other # default is Above, YAlign = Front, XAlign = Center hid2Lay.SetRelPos(relpos.Rel(Rel=relpos.RightOf, Other="Hidden1", YAlign=relpos.Front, Space=2)) # note: see emergent/prjn module for all the options on how to connect # NewFull returns a new prjn.Full connectivity pattern net.ConnectLayers(inLay, hid1Lay, prjn.NewFull(), emer.Forward) net.ConnectLayers(hid1Lay, hid2Lay, prjn.NewFull(), emer.Forward) net.ConnectLayers(hid2Lay, outLay, prjn.NewFull(), emer.Forward) net.ConnectLayers(outLay, hid2Lay, prjn.NewFull(), emer.Back) net.ConnectLayers(hid2Lay, hid1Lay, prjn.NewFull(), emer.Back) # note: can set these to do parallel threaded computation across multiple cpus # not worth it for this small of a model, but definitely helps for larger ones # if Thread { # hid2Lay.SetThread(1) # outLay.SetThread(1) # } # note: if you wanted to change a layer type from e.g., Target to Compare, do this: # outLay.SetType(emer.Compare) # that would mean that the output layer doesn't reflect target values in plus phase # and thus removes error-driven learning -- but stats are still computed. net.Defaults() self.SetParams("Network", self.LogSetParams) # only set Network params net.Build() net.InitWts() ###################################### # Init, utils def Init(self): """Init restarts the run, and initializes everything, including network weights and resets the epoch log table""" rand.Seed(self.RndSeed) self.ConfigEnv() # just in case another set of pats was selected.. self.StopNow = False self.SetParams("", self.LogSetParams) # all sheets self.NewRun() self.UpdateView(True) def NewRndSeed(self): """NewRndSeed gets a new random seed based on current time -- otherwise uses the same random seed for every run""" # self.RndSeed = time.Now().UnixNano() def Counters(self, train): """ Counters returns a string of the current counter state use tabs to achieve a reasonable formatting overall and add a few tabs at the end to allow for expansion.. """ if train: return "Run:\t%d\tEpoch:\t%d\tTrial:\t%d\tCycle:\t%d\tName:\t%s\t\t\t" % (self.TrainEnv.Run.Cur, self.TrainEnv.Epoch.Cur, self.TrainEnv.Trial.Cur, self.Time.Cycle, self.TrainEnv.TrialName.Cur) else: return "Run:\t%d\tEpoch:\t%d\tTrial:\t%d\t\tCycle:\t%dName:\t%s\t\t\t" % (self.TrainEnv.Run.Cur, self.TrainEnv.Epoch.Cur, self.TestEnv.Trial.Cur, self.Time.Cycle, self.TestEnv.TrialName.Cur) def UpdateView(self, train): if self.NetView != 0 and self.NetView.IsVisible(): self.NetView.Record(self.Counters(train)) # note: essential to use Go version of update when called from another goroutine self.NetView.GoUpdate() # note: using counters is significantly slower.. ###################################### # Running the network def AlphaCyc(self, train): """ AlphaCyc runs one alpha-cycle (100 msec, 4 quarters) of processing. External inputs must have already been applied prior to calling, using ApplyExt method on relevant layers (see TrainTrial, TestTrial). If train is true, then learning DWt or WtFmDWt calls are made. Handles netview updating within scope of AlphaCycle """ if self.Win != 0: self.Win.PollEvents() # this is essential for GUI responsiveness while running viewUpdt = self.TrainUpdt if not train: viewUpdt = self.TestUpdt # update prior weight changes at start, so any DWt values remain visible at end # you might want to do this less frequently to achieve a mini-batch update # in which case, move it out to the TrainTrial method where the relevant # counters are being dealt with. if train: self.Net.WtFmDWt() self.Net.AlphaCycInit() self.Time.AlphaCycStart() for qtr in range(4): for cyc in range(self.Time.CycPerQtr): self.Net.Cycle(self.Time) if not train: self.LogTstCyc(self.TstCycLog, self.Time.Cycle) self.Time.CycleInc() if self.ViewOn: if viewUpdt == leabra.Cycle: self.UpdateView(train) if viewUpdt == leabra.FastSpike: if (cyc+1)%10 == 0: self.UpdateView(train) self.Net.QuarterFinal(self.Time) self.Time.QuarterInc() if self.ViewOn: if viewUpdt == leabra.Quarter: self.UpdateView(train) if viewUpdt == leabra.Phase: if qtr >= 2: self.UpdateView(train) if train: self.Net.DWt() if self.ViewOn and viewUpdt == leabra.AlphaCycle: self.UpdateView(train) if self.TstCycPlot != 0 and not train: self.TstCycPlot.GoUpdate() def ApplyInputs(self, en): """ ApplyInputs applies input patterns from given environment. It is good practice to have this be a separate method with appropriate args so that it can be used for various different contexts (training, testing, etc). """ self.Net.InitExt() # clear any existing inputs -- not strictly necessary if always # going to the same layers, but good practice and cheap anyway inLay = leabra.Layer(self.Net.LayerByName("Input")) outLay = leabra.Layer(self.Net.LayerByName("Output")) inPats = en.State(inLay.Nm) if inPats != go.nil: inLay.ApplyExt(inPats) outPats = en.State(outLay.Nm) if inPats != go.nil: outLay.ApplyExt(outPats) # NOTE: this is how you can use a pandas.DataFrame() to apply inputs # we are using etable.Table instead because it provides a full GUI # for viewing your patterns, and has a more convenient API, that integrates # with the env environment interface. # # inLay = leabra.Layer(self.Net.LayerByName("Input")) # outLay = leabra.Layer(self.Net.LayerByName("Output")) # pidx = self.Trial # if not self.Sequential: # pidx = self.Porder[self.Trial] # # note: these indexes must be updated based on columns in patterns.. # inp = self.Pats.iloc[pidx,1:26].values # outp = self.Pats.iloc[pidx,26:26+25].values # self.ApplyExt(inLay, inp) # self.ApplyExt(outLay, outp) # # def ApplyExt(self, lay, nparray): # flt = np.ndarray.flatten(nparray, 'C') # slc = go.Slice_float32(flt) # lay.ApplyExt1D(slc) def TrainTrial(self): """ TrainTrial runs one trial of training using TrainEnv""" self.TrainEnv.Step() # the Env encapsulates and manages all counter state # Key to query counters FIRST because current state is in NEXT epoch # if epoch counter has changed epc = env.CounterCur(self.TrainEnv, env.Epoch) chg = env.CounterChg(self.TrainEnv, env.Epoch) if chg: self.LogTrnEpc(self.TrnEpcLog) if self.ViewOn and self.TrainUpdt > leabra.AlphaCycle: self.UpdateView(True) if epc % self.TestInterval == 0: # note: epc is *next* so won't trigger first time self.TestAll() if epc >= self.MaxEpcs: # done with training.. self.RunEnd() if self.TrainEnv.Run.Incr(): # we are done! self.StopNow = True return else: self.NewRun() return self.ApplyInputs(self.TrainEnv) self.AlphaCyc(True) # train self.TrialStats(True) # accumulate def RunEnd(self): """ RunEnd is called at the end of a run -- save weights, record final log, etc here """ self.LogRun(self.RunLog) if self.SaveWts: fnm = self.WeightsFileName() fmt.Printf("Saving Weights to: %v", fnm) self.Net.SaveWtsJSON(gi.FileName(fnm)) def NewRun(self): """ NewRun intializes a new run of the model, using the TrainEnv.Run counter for the new run value """ run = self.TrainEnv.Run.Cur self.TrainEnv.Init(run) self.TestEnv.Init(run) self.Time.Reset() self.Net.InitWts() self.InitStats() self.TrnEpcLog.SetNumRows(0) self.TstEpcLog.SetNumRows(0) def InitStats(self): """ InitStats initializes all the statistics, especially important for the cumulative epoch stats -- called at start of new run """ # accumulators self.SumSSE = 0.0 self.SumAvgSSE = 0.0 self.SumCosDiff = 0.0 self.CntErr = 0.0 self.FirstZero = -1 # clear rest just to make Sim look initialized self.TrlSSE = 0.0 self.TrlAvgSSE = 0.0 self.EpcSSE = 0.0 self.EpcAvgSSE = 0.0 self.EpcPctErr = 0.0 self.EpcCosDiff = 0.0 def TrialStats(self, accum): """ TrialStats computes the trial-level statistics and adds them to the epoch accumulators if accum is true. Note that we're accumulating stats here on the Sim side so the core algorithm side remains as simple as possible, and doesn't need to worry about different time-scales over which stats could be accumulated etc. You can also aggregate directly from log data, as is done for testing stats """ outLay = leabra.Layer(self.Net.LayerByName("Output")) self.TrlCosDiff = outLay.CosDiff.Cos self.TrlSSE = outLay.SSE(0.5) # 0.5 = per-unit tolerance -- right side of .5 self.TrlAvgSSE = self.TrlSSE / len(outLay.Neurons) if accum: self.SumSSE += self.TrlSSE self.SumAvgSSE += self.TrlAvgSSE self.SumCosDiff += self.TrlCosDiff if self.TrlSSE != 0: self.CntErr += 1.0 def TrainEpoch(self): """ TrainEpoch runs training trials for remainder of this epoch """ self.StopNow = False curEpc = self.TrainEnv.Epoch.Cur while True: self.TrainTrial() if self.StopNow or self.TrainEnv.Epoch.Cur != curEpc: break self.Stopped() def TrainRun(self): """ TrainRun runs training trials for remainder of run """ self.StopNow = False curRun = self.TrainEnv.Run.Cur while True: self.TrainTrial() if self.StopNow or self.TrainEnv.Run.Cur != curRun: break self.Stopped() def Train(self): """ Train runs the full training from this point onward """ self.StopNow = False while True: self.TrainTrial() if self.StopNow: break self.Stopped() def Stop(self): """ Stop tells the sim to stop running """ self.StopNow = True def Stopped(self): """ Stopped is called when a run method stops running -- updates the IsRunning flag and toolbar """ self.IsRunning = False if self.Win != 0: self.vp.BlockUpdates() if self.ToolBar != go.nil: self.ToolBar.UpdateActions() self.vp.UnblockUpdates() self.ClassView.Update() self.vp.SetNeedsFullRender() ###################################### # Testing def TestTrial(self): """ TestTrial runs one
<filename>async_strava/strava.py """ Ignoring non run activities None in results of club_activities represents an error in activity. For example - ActivityNotExist 429 - StravaTooManyRequests is called: - When the user goes to the activity page. """ import logging import re import json import asyncio from typing import NoReturn, List, Optional from contextlib import asynccontextmanager from sys import stdout from datetime import datetime, timedelta import aiohttp from bs4 import BeautifulSoup as Bs from lxml import html from aiohttp.client_exceptions import InvalidURL from async_class import AsyncClass from .exceptions import StravaSessionFailed, ServerError, StravaTooManyRequests, ActivityNotExist, ParserError from .attributes import ActivityInfo, Activity # Configure logging LOGGER = logging.getLogger('strava_crawler') LOGGER.setLevel(logging.DEBUG) handler = logging.StreamHandler(stdout) handler.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(filename)s.%(funcName)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) LOGGER.addHandler(handler) class Strava(AsyncClass): """Main class for interacting with www.strava.com website""" def __init__(self, *args, **kwargs): super().__init__(args, kwargs) self.connection_established = False async def __ainit__(self, login: str, password: str, filters: dict) -> NoReturn: self._session = aiohttp.ClientSession() self._login: str = login self._password: str = password self.filters: dict = filters connection = await self._session_reconnecting() if connection == 0: self.connection_established = True # Session connection failure during initialization would be proceed in a context manager async def _strava_authorization(self): """ Makes authorization for current strava session. :return: aiohttp auth request information """ def _csrf_token(text: str) -> str: """ Extracts the csrf token from the passed html text. :param text: html page code :return: csrf token from page code """ tree = html.fromstring(text) tokens: list = tree.xpath('//*[@name="csrf-token"]/@content') return tokens[0] response = await self._session.get('https://www.strava.com/login') csrf_token: str = _csrf_token(await response.text()) data = {'authenticity_token': csrf_token, 'email': self._login, 'password': self._password } return await self._session.post('https://www.strava.com/session', data=data) async def connection_check(self, request_response) -> bool: """ Checks the strava page connection by parsing the html code. :returns: - True - the connection is establish; - False - the connection isn't established. """ html_text = await request_response.text() if html_text[:500].find('logged-out') == -1: # We've logged-in return True # Strava logged us out, maybe there is an alert message soup = await self._get_soup(html_text) alert_message = soup.select_one('div.alert-message') if alert_message is not None: LOGGER.error('alert message in a page: %s', alert_message.text) return False async def _session_reconnecting(self) -> int: """ Updates or reconnects strava session. This function will be removed in next releases, if it would be unnecessary during tests :return: 0 - session established; -1 - can't reconnect """ allowed_attempts: int = 2 for check_counter in range(allowed_attempts): # This one will try to reconnect the session, # if connection wasn't established in the first attempt session_response = await self._strava_authorization() connection = await self.connection_check(session_response) if not connection: LOGGER.error('%i of %i attempt to connect has failed', check_counter + 1, allowed_attempts) await asyncio.sleep(15) else: LOGGER.info('Session established') return 0 # Can't reconnect return -1 @staticmethod async def _get_soup(html_text: str): """Executes blocking task(dom tree parser) in an executor - another thread""" def _bs_object(text): return Bs(text, 'html.parser') soup_loop = asyncio.get_running_loop() return await soup_loop.run_in_executor(None, _bs_object, html_text) async def _get_response(self, uri): """ In my mind - this function has to proceed and return "get" request response. It has to proceed such errors, as 429, ServerDisconnectedError, .. :param uri: requested page :raise StravaTooManyRequests: too many requests per time unit - strava won't let us in for 10 minutes at least :raise ServerError: strava server doesn't answer, or invalid uri :return: request result obj """ response = await self._session.get(uri) status_code = response.status if status_code != 200: # Redirecting would be processed in page handlers if status_code == 429: # This error will cancel connection. # Therefore, within the framework of this class, it is not processed raise StravaTooManyRequests if 0 <= status_code - 400 < 100: # Client error raise ServerError(status_code) if status_code - 500 >= 0: # try to reconnect LOGGER.info('try ro reconnect, status code: %i', status_code) await asyncio.sleep(7) response = await self._session.get(uri) if response.status != 200: raise ServerError(status_code) return response async def get_strava_nickname_from_uri(self, profile_uri: str) -> Optional[str]: """ Gets nickname from strava user profile page. If page not found - def will return '' - an empty str. If incorrect link - None. :NOTE: ServerError processed here :param profile_uri: strava user profile uri :raise StravaTooManyRequests: too many requests per time unit - strava won't let us in for 10 minutes at least :return: user nickname from transmitted uri """ try: response = await self._get_response(profile_uri) except (ServerError, InvalidURL) as exc: LOGGER.error('ServerError in %s - %s', profile_uri, repr(exc)) return '' soup = await self._get_soup(await response.text()) raw_title = soup.select_one('h1.athlete-name') if raw_title is None: LOGGER.info('Incorrect link - there are no strava title at %s', profile_uri) return None return raw_title.text @staticmethod def _process_inline_section(stat_section, activity_href: str) -> dict: """ Processes activity page inline-stats section. :param activity_href: activity uri :param stat_section: inline-stats section html cluster :raise ParserError: website inline section front has changed :return {distance:, moving_time:, pace:} """ def str_time_to_sec(_time: list) -> int: """ Converts time in str view to seconds :param _time: list of separated str values: 14:59->[14,59] :return: number of elapsed seconds :example: pace 14:59 comes to the function like [14, 59]. Function returns 14*60+59=899 seconds """ _seconds: int = 0 _n = len(_time) - 1 for time_el in _time: _seconds += int(time_el) * pow(60, _n) _n -= 1 return _seconds def validate_str_value(el) -> int: """ Values from website often contains letters, like '2s' or '15km/sec'. This function retrieves numbers from such el. If el doesn't contain numbers - returns 0. """ tmp_val = re.search(r'\d+', el) if tmp_val is not None: return int(tmp_val.group(0)) return 0 distance: float = 0.0 moving_time: int = 0 pace: int = 0 try: activity_details = stat_section.select('li') for item in activity_details: tmp = item.select_one('div.label').text cluster_type = tmp.strip() cluster = item.select_one('strong').text if cluster_type == 'Distance': divided_distance = re.findall(r'[\d.]', cluster) if len(divided_distance) != 0: distance = float(''.join(divided_distance)) # else it would be a default value if cluster_type in ('Moving Time', 'Elapsed Time', 'Duration'): divided_moving_time: List[str] = cluster.split(':') # ['2s'] [1, 18, 53] moving_time: int = str_time_to_sec(list(map(validate_str_value, divided_moving_time))) if cluster_type == 'Pace': divided_pace: List[str] = cluster.split(':') # ['7', '18/km'] ['7s/km'] pace: int = str_time_to_sec(list(map(validate_str_value, divided_pace))) return {'distance': distance, 'moving_time': moving_time, 'pace': pace} except Exception as exc: raise ParserError(activity_href, repr(exc)) @staticmethod def _process_more_stats(more_stats_section, activity_href) -> dict: """ Processes activity page more-stats section. :param more_stats_section: more-stats section html cluster :raise ParserError: website more stats section front has changed :return: {elevation_gain:, calories:} """ elevation_gain: int = 0 calories: int = 0 if more_stats_section is not None: # Such block exists, but frontend may have changed try: rows = more_stats_section.select('div.row') for row in rows: values = row.select('div.spans3') descriptions = row.select('div.spans5') for index, desc in enumerate(descriptions): if desc.text.strip() == 'Elevation': # We get value in format '129m\n' or '\n1,345m\n' elevation_gain = int(re.sub(r'[,m\n]', r'', values[index].text)) if desc.text.strip() == 'Calories': calories_value: str = values[index].text.strip() # We can get calories in such views: '-' <=> 0, '684', '1,099' <=> 1099 if calories_value != '—': calories: int = int(re.sub(r',', r'', calories_value)) except Exception as exc: raise ParserError(activity_href, repr(exc)) return {'elevation_gain': elevation_gain, 'calories': calories} @staticmethod def _process_device_section(device_cluster, activity_href) -> dict: """ !!!Temporarily unavailable!!! Processes activity page device section. :param device_cluster: device section html cluster :return: {device:, gear:} """ gear = '-' device = '-' try: if device_cluster: device_section = device_cluster.select_one('div.device') gear_section = device_cluster.select_one('span.gear-name') if gear_section is not None: raw_gear: str = gear_section.text.strip() # adidas Pulseboost HD\n(2,441.7 km) gear = raw_gear.split('\n') if len(gear) == 2 and len(gear[1]) > 2: # remove brackets from gear mileage gear[1] = gear[1][1:len(gear[1]) - 1] if device_section is not None: device: str = device_section.text.strip() except Exception as exc: LOGGER.error(repr(exc)) raise ParserError(activity_href, repr(exc)) return {'device': device, 'gear': tuple(gear)} def _form_activity_info(self, activity_href: str, header, activity_summary) -> Optional[ActivityInfo]: """ Forms ActivityInfo from the data received from the site page. :raise ParserError: website more stats section front has changed :return: None - Activity not corresponding filters/Parser error, ActivityInfo - ok """ comparsion_date: Optional[datetime] = self.filters.get('date') try: activity_details = activity_summary.select_one('div.details') # date looks like '11:40 AM on Sunday, August 22, 2021' raw_date: str = activity_details.select_one('time').text split_date: list = raw_date.split(',') activity_date: datetime = datetime.strptime(split_date[-2].strip() + ' ' + split_date[-1].strip(), '%B %d %Y') if comparsion_date is not None: # There is a date filter if ((comparsion_date.day,
<gh_stars>1-10 # coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'SessionKeyDetails', 'SessionTargetResourceDetails', 'GetBastionsBastionResult', 'GetBastionsFilterResult', 'GetSessionKeyDetailsResult', 'GetSessionTargetResourceDetailsResult', 'GetSessionsFilterResult', 'GetSessionsSessionResult', 'GetSessionsSessionKeyDetailsResult', 'GetSessionsSessionTargetResourceDetailsResult', ] @pulumi.output_type class SessionKeyDetails(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "publicKeyContent": suggest = "public_key_content" if suggest: pulumi.log.warn(f"Key '{key}' not found in SessionKeyDetails. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: SessionKeyDetails.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: SessionKeyDetails.__key_warning(key) return super().get(key, default) def __init__(__self__, *, public_key_content: str): """ :param str public_key_content: The public key in OpenSSH format of the SSH key pair for the session. When you connect to the session, you must provide the private key of the same SSH key pair. """ pulumi.set(__self__, "public_key_content", public_key_content) @property @pulumi.getter(name="publicKeyContent") def public_key_content(self) -> str: """ The public key in OpenSSH format of the SSH key pair for the session. When you connect to the session, you must provide the private key of the same SSH key pair. """ return pulumi.get(self, "public_key_content") @pulumi.output_type class SessionTargetResourceDetails(dict): @staticmethod def __key_warning(key: str): suggest = None if key == "sessionType": suggest = "session_type" elif key == "targetResourceId": suggest = "target_resource_id" elif key == "targetResourceDisplayName": suggest = "target_resource_display_name" elif key == "targetResourceOperatingSystemUserName": suggest = "target_resource_operating_system_user_name" elif key == "targetResourcePort": suggest = "target_resource_port" elif key == "targetResourcePrivateIpAddress": suggest = "target_resource_private_ip_address" if suggest: pulumi.log.warn(f"Key '{key}' not found in SessionTargetResourceDetails. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: SessionTargetResourceDetails.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: SessionTargetResourceDetails.__key_warning(key) return super().get(key, default) def __init__(__self__, *, session_type: str, target_resource_id: str, target_resource_display_name: Optional[str] = None, target_resource_operating_system_user_name: Optional[str] = None, target_resource_port: Optional[int] = None, target_resource_private_ip_address: Optional[str] = None): """ :param str session_type: The session type. :param str target_resource_id: The unique identifier (OCID) of the target resource (a Compute instance, for example) that the session connects to. :param str target_resource_display_name: The display name of the target Compute instance that the session connects to. :param str target_resource_operating_system_user_name: The name of the user on the target resource operating system that the session uses for the connection. :param int target_resource_port: The port number to connect to on the target resource. :param str target_resource_private_ip_address: The private IP address of the target resource that the session connects to. """ pulumi.set(__self__, "session_type", session_type) pulumi.set(__self__, "target_resource_id", target_resource_id) if target_resource_display_name is not None: pulumi.set(__self__, "target_resource_display_name", target_resource_display_name) if target_resource_operating_system_user_name is not None: pulumi.set(__self__, "target_resource_operating_system_user_name", target_resource_operating_system_user_name) if target_resource_port is not None: pulumi.set(__self__, "target_resource_port", target_resource_port) if target_resource_private_ip_address is not None: pulumi.set(__self__, "target_resource_private_ip_address", target_resource_private_ip_address) @property @pulumi.getter(name="sessionType") def session_type(self) -> str: """ The session type. """ return pulumi.get(self, "session_type") @property @pulumi.getter(name="targetResourceId") def target_resource_id(self) -> str: """ The unique identifier (OCID) of the target resource (a Compute instance, for example) that the session connects to. """ return pulumi.get(self, "target_resource_id") @property @pulumi.getter(name="targetResourceDisplayName") def target_resource_display_name(self) -> Optional[str]: """ The display name of the target Compute instance that the session connects to. """ return pulumi.get(self, "target_resource_display_name") @property @pulumi.getter(name="targetResourceOperatingSystemUserName") def target_resource_operating_system_user_name(self) -> Optional[str]: """ The name of the user on the target resource operating system that the session uses for the connection. """ return pulumi.get(self, "target_resource_operating_system_user_name") @property @pulumi.getter(name="targetResourcePort") def target_resource_port(self) -> Optional[int]: """ The port number to connect to on the target resource. """ return pulumi.get(self, "target_resource_port") @property @pulumi.getter(name="targetResourcePrivateIpAddress") def target_resource_private_ip_address(self) -> Optional[str]: """ The private IP address of the target resource that the session connects to. """ return pulumi.get(self, "target_resource_private_ip_address") @pulumi.output_type class GetBastionsBastionResult(dict): def __init__(__self__, *, bastion_type: str, client_cidr_block_allow_lists: Sequence[str], compartment_id: str, defined_tags: Mapping[str, Any], freeform_tags: Mapping[str, Any], id: str, lifecycle_details: str, max_session_ttl_in_seconds: int, max_sessions_allowed: int, name: str, phone_book_entry: str, private_endpoint_ip_address: str, state: str, static_jump_host_ip_addresses: Sequence[str], system_tags: Mapping[str, Any], target_subnet_id: str, target_vcn_id: str, time_created: str, time_updated: str): """ :param str bastion_type: The type of bastion. :param Sequence[str] client_cidr_block_allow_lists: A list of address ranges in CIDR notation that you want to allow to connect to sessions hosted by this bastion. :param str compartment_id: The unique identifier (OCID) of the compartment in which to list resources. :param Mapping[str, Any] defined_tags: Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}` :param Mapping[str, Any] freeform_tags: Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}` :param str id: The unique identifier (OCID) of the bastion, which can't be changed after creation. :param str lifecycle_details: A message describing the current state in more detail. :param int max_session_ttl_in_seconds: The maximum amount of time that any session on the bastion can remain active. :param int max_sessions_allowed: The maximum number of active sessions allowed on the bastion. :param str name: A filter to return only resources that match the entire name given. :param str phone_book_entry: The phonebook entry of the customer's team, which can't be changed after creation. Not applicable to `standard` bastions. :param str private_endpoint_ip_address: The private IP address of the created private endpoint. :param str state: The current state of the bastion. :param Sequence[str] static_jump_host_ip_addresses: A list of IP addresses of the hosts that the bastion has access to. Not applicable to `standard` bastions. :param Mapping[str, Any] system_tags: Usage of system tag keys. These predefined keys are scoped to namespaces. Example: `{"orcl-cloud.free-tier-retained": "true"}` :param str target_subnet_id: The unique identifier (OCID) of the subnet that the bastion connects to. :param str target_vcn_id: The unique identifier (OCID) of the virtual cloud network (VCN) that the bastion connects to. :param str time_created: The time the bastion was created. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z` :param str time_updated: The time the bastion was updated. Format is defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2020-01-25T21:10:29.600Z` """ pulumi.set(__self__, "bastion_type", bastion_type) pulumi.set(__self__, "client_cidr_block_allow_lists", client_cidr_block_allow_lists) pulumi.set(__self__, "compartment_id", compartment_id) pulumi.set(__self__, "defined_tags", defined_tags) pulumi.set(__self__, "freeform_tags", freeform_tags) pulumi.set(__self__, "id", id) pulumi.set(__self__, "lifecycle_details", lifecycle_details) pulumi.set(__self__, "max_session_ttl_in_seconds", max_session_ttl_in_seconds) pulumi.set(__self__, "max_sessions_allowed", max_sessions_allowed) pulumi.set(__self__, "name", name) pulumi.set(__self__, "phone_book_entry", phone_book_entry) pulumi.set(__self__, "private_endpoint_ip_address", private_endpoint_ip_address) pulumi.set(__self__, "state", state) pulumi.set(__self__, "static_jump_host_ip_addresses", static_jump_host_ip_addresses) pulumi.set(__self__, "system_tags", system_tags) pulumi.set(__self__, "target_subnet_id", target_subnet_id) pulumi.set(__self__, "target_vcn_id", target_vcn_id) pulumi.set(__self__, "time_created", time_created) pulumi.set(__self__, "time_updated", time_updated) @property @pulumi.getter(name="bastionType") def bastion_type(self) -> str: """ The type of bastion. """ return pulumi.get(self, "bastion_type") @property @pulumi.getter(name="clientCidrBlockAllowLists") def client_cidr_block_allow_lists(self) -> Sequence[str]: """ A list of address ranges in CIDR notation that you want to allow to connect to sessions hosted by this bastion. """ return pulumi.get(self, "client_cidr_block_allow_lists") @property @pulumi.getter(name="compartmentId") def compartment_id(self) -> str: """ The unique identifier (OCID) of the compartment in which to list resources. """ return pulumi.get(self, "compartment_id") @property @pulumi.getter(name="definedTags") def defined_tags(self) -> Mapping[str, Any]: """ Defined tags for this resource. Each key is predefined and scoped to a namespace. Example: `{"foo-namespace.bar-key": "value"}` """ return pulumi.get(self, "defined_tags") @property @pulumi.getter(name="freeformTags") def freeform_tags(self) -> Mapping[str, Any]: """ Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}` """ return pulumi.get(self, "freeform_tags") @property @pulumi.getter def id(self) -> str: """ The unique identifier (OCID) of the bastion, which can't be changed after creation. """ return pulumi.get(self, "id") @property @pulumi.getter(name="lifecycleDetails") def lifecycle_details(self) -> str: """ A message describing the current state in more detail. """ return pulumi.get(self, "lifecycle_details") @property @pulumi.getter(name="maxSessionTtlInSeconds") def max_session_ttl_in_seconds(self) -> int: """ The maximum amount of time that any session on the bastion can remain active. """ return pulumi.get(self, "max_session_ttl_in_seconds") @property @pulumi.getter(name="maxSessionsAllowed") def max_sessions_allowed(self) -> int: """ The maximum number of active sessions allowed on the bastion. """ return pulumi.get(self, "max_sessions_allowed") @property @pulumi.getter def name(self) -> str: """ A filter to return only resources that match the entire name given. """ return pulumi.get(self, "name") @property @pulumi.getter(name="phoneBookEntry") def phone_book_entry(self) -> str: """ The phonebook entry of the customer's team, which can't be changed after creation. Not applicable to `standard` bastions. """ return pulumi.get(self, "phone_book_entry") @property @pulumi.getter(name="privateEndpointIpAddress")
#!/usr/bin/env python2.7 # Copyright 2017 The Fuchsia Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from contextlib import contextmanager from collections import namedtuple import argparse import mmap import os import struct import sys import uuid # Standard ELF constants. ELFMAG = '\x7fELF' EI_CLASS = 4 ELFCLASS32 = 1 ELFCLASS64 = 2 EI_DATA = 5 ELFDATA2LSB = 1 ELFDATA2MSB = 2 EM_386 = 3 EM_ARM = 40 EM_X86_64 = 62 EM_AARCH64 = 183 PT_LOAD = 1 PT_DYNAMIC = 2 PT_INTERP = 3 PT_NOTE = 4 DT_NEEDED = 1 DT_STRTAB = 5 DT_SONAME = 14 NT_GNU_BUILD_ID = 3 SHT_SYMTAB = 2 SHF_ALLOC = 2 class elf_note(namedtuple('elf_note', [ 'name', 'type', 'desc', ])): # An ELF note is identified by (name_string, type_integer). def ident(self): return (self.name, self.type) def is_build_id(self): return self.ident() == ('GNU\0', NT_GNU_BUILD_ID) def build_id_hex(self): if self.is_build_id(): return ''.join(('%02x' % ord(byte)) for byte in self.desc) return None def __repr__(self): return ( 'elf_note(%r, %#x, <%d bytes>)' % (self.name, self.type, len(self.desc))) def gen_elf(): # { 'Struct1': (ELFCLASS32 fields, ELFCLASS64 fields), # 'Struct2': fields_same_for_both, ... } elf_types = { 'Ehdr': ( [ ('e_ident', '16s'), ('e_type', 'H'), ('e_machine', 'H'), ('e_version', 'I'), ('e_entry', 'I'), ('e_phoff', 'I'), ('e_shoff', 'I'), ('e_flags', 'I'), ('e_ehsize', 'H'), ('e_phentsize', 'H'), ('e_phnum', 'H'), ('e_shentsize', 'H'), ('e_shnum', 'H'), ('e_shstrndx', 'H'), ], [ ('e_ident', '16s'), ('e_type', 'H'), ('e_machine', 'H'), ('e_version', 'I'), ('e_entry', 'Q'), ('e_phoff', 'Q'), ('e_shoff', 'Q'), ('e_flags', 'I'), ('e_ehsize', 'H'), ('e_phentsize', 'H'), ('e_phnum', 'H'), ('e_shentsize', 'H'), ('e_shnum', 'H'), ('e_shstrndx', 'H'), ]), 'Phdr': ( [ ('p_type', 'I'), ('p_offset', 'I'), ('p_vaddr', 'I'), ('p_paddr', 'I'), ('p_filesz', 'I'), ('p_memsz', 'I'), ('p_flags', 'I'), ('p_align', 'I'), ], [ ('p_type', 'I'), ('p_flags', 'I'), ('p_offset', 'Q'), ('p_vaddr', 'Q'), ('p_paddr', 'Q'), ('p_filesz', 'Q'), ('p_memsz', 'Q'), ('p_align', 'Q'), ]), 'Shdr': ( [ ('sh_name', 'L'), ('sh_type', 'L'), ('sh_flags', 'L'), ('sh_addr', 'L'), ('sh_offset', 'L'), ('sh_size', 'L'), ('sh_link', 'L'), ('sh_info', 'L'), ('sh_addralign', 'L'), ('sh_entsize', 'L'), ], [ ('sh_name', 'L'), ('sh_type', 'L'), ('sh_flags', 'Q'), ('sh_addr', 'Q'), ('sh_offset', 'Q'), ('sh_size', 'Q'), ('sh_link', 'L'), ('sh_info', 'L'), ('sh_addralign', 'Q'), ('sh_entsize', 'Q'), ]), 'Dyn': ( [ ('d_tag', 'i'), ('d_val', 'I'), ], [ ('d_tag', 'q'), ('d_val', 'Q'), ]), 'Nhdr': [ ('n_namesz', 'I'), ('n_descsz', 'I'), ('n_type', 'I'), ], 'dwarf2_line_header': [ ('unit_length', 'L'), ('version', 'H'), ('header_length', 'L'), ('minimum_instruction_length', 'B'), ('default_is_stmt', 'B'), ('line_base', 'b'), ('line_range', 'B'), ('opcode_base', 'B'), ], 'dwarf4_line_header': [ ('unit_length', 'L'), ('version', 'H'), ('header_length', 'L'), ('minimum_instruction_length', 'B'), ('maximum_operations_per_instruction', 'B'), ('default_is_stmt', 'B'), ('line_base', 'b'), ('line_range', 'b'), ('opcode_base', 'B'), ], } # There is an accessor for each struct, e.g. Ehdr. # Ehdr.read is a function like Struct.unpack_from. # Ehdr.size is the size of the struct. elf_accessor = namedtuple('elf_accessor', ['size', 'read', 'write', 'pack']) # All the accessors for a format (class, byte-order) form one elf, # e.g. use elf.Ehdr and elf.Phdr. elf = namedtuple('elf', elf_types.keys()) def gen_accessors(is64, struct_byte_order): def make_accessor(type, decoder): return elf_accessor( size=decoder.size, read=lambda buffer, offset=0: type._make( decoder.unpack_from(buffer, offset)), write=lambda buffer, offset, x: decoder.pack_into( buffer, offset, *x), pack=lambda x: decoder.pack(*x)) for name, fields in elf_types.iteritems(): if isinstance(fields, tuple): fields = fields[1 if is64 else 0] type = namedtuple(name, [field_name for field_name, fmt in fields]) decoder = struct.Struct( struct_byte_order + ''.join(fmt for field_name, fmt in fields)) yield make_accessor(type, decoder) for elfclass, is64 in [(ELFCLASS32, False), (ELFCLASS64, True)]: for elf_bo, struct_bo in [(ELFDATA2LSB, '<'), (ELFDATA2MSB, '>')]: yield ( (chr(elfclass), chr(elf_bo)), elf(*gen_accessors(is64, struct_bo))) # e.g. ELF[file[EI_CLASS], file[EI_DATA]].Ehdr.read(file).e_phnum ELF = dict(gen_elf()) def get_elf_accessor(file): # If it looks like an ELF file, whip out the decoder ring. if file[:len(ELFMAG)] == ELFMAG: return ELF[file[EI_CLASS], file[EI_DATA]] return None def gen_phdrs(file, elf, ehdr): for pos in xrange(0, ehdr.e_phnum * elf.Phdr.size, elf.Phdr.size): yield elf.Phdr.read(file, ehdr.e_phoff + pos) def gen_shdrs(file, elf, ehdr): for pos in xrange(0, ehdr.e_shnum * elf.Shdr.size, elf.Shdr.size): yield elf.Shdr.read(file, ehdr.e_shoff + pos) cpu = namedtuple( 'cpu', [ 'e_machine', # ELF e_machine int 'llvm', # LLVM triple CPU component 'gn', # GN target_cpu ]) ELF_MACHINE_TO_CPU = { elf: cpu(elf, llvm, gn) for elf, llvm, gn in [ (EM_386, 'i386', 'x86'), (EM_ARM, 'arm', 'arm'), (EM_X86_64, 'x86_64', 'x64'), (EM_AARCH64, 'aarch64', 'arm64'), ] } @contextmanager def mmapper(filename): """A context manager that yields (fd, file_contents) given a file name. This ensures that the mmap and file objects are closed at the end of the 'with' statement.""" fileobj = open(filename, 'rb') fd = fileobj.fileno() if os.fstat(fd).st_size == 0: # mmap can't handle empty files. try: yield fd, '' finally: fileobj.close() else: mmapobj = mmap.mmap(fd, 0, access=mmap.ACCESS_READ) try: yield fd, mmapobj finally: mmapobj.close() fileobj.close() def makedirs(dirs): try: os.makedirs(dirs) except OSError as e: if e.errno != os.errno.EEXIST: raise e # elf_info objects are only created by `get_elf_info` or the `copy` or # `rename` methods. class elf_info(namedtuple( 'elf_info', [ 'filename', 'cpu', # cpu tuple 'notes', # list of (ident, desc): selected notes 'build_id', # string: lowercase hex 'stripped', # bool: Has no symbols or .debug_* sections 'interp', # string or None: PT_INTERP (without \0) 'soname', # string or None: DT_SONAME 'needed', # list of strings: DT_NEEDED ])): def rename(self, filename): assert os.path.samefile(self.filename, filename) # Copy the tuple. clone = self.__class__(filename, *self[1:]) # Copy the lazy state. clone.elf = self.elf if self.get_sources == clone.get_sources: raise Exception("uninitialized elf_info object!") clone.get_sources = self.get_sources return clone def copy(self): return self.rename(self.filename) # This is replaced with a closure by the creator in get_elf_info. def get_sources(self): raise Exception("uninitialized elf_info object!") def strip(self, stripped_filename): """Write stripped output to the given file unless it already exists with identical contents. Returns True iff the file was changed.""" with mmapper(self.filename) as mapped: fd, file = mapped ehdr = self.elf.Ehdr.read(file) stripped_ehdr = ehdr._replace(e_shoff=0, e_shnum=0, e_shstrndx=0) stripped_size = max( phdr.p_offset + phdr.p_filesz for phdr in gen_phdrs(file, self.elf, ehdr) if phdr.p_type == PT_LOAD) assert ehdr.e_phoff + ( ehdr.e_phnum * ehdr.e_phentsize) <= stripped_size def gen_stripped_contents(): yield self.elf.Ehdr.pack(stripped_ehdr) yield file[self.elf.Ehdr.size:stripped_size] def old_file_matches(): old_size = os.path.getsize(stripped_filename) new_size = sum(len(x) for x in gen_stripped_contents()) if old_size != new_size: return False with open(stripped_filename, 'rb') as f: for chunk in gen_stripped_contents(): if f.read(len(chunk)) != chunk: return False return True if os.path.exists(stripped_filename): if old_file_matches(): return False else: os.remove(stripped_filename) # Create the new file with the same mode as the original. with os.fdopen(os.open(stripped_filename, os.O_WRONLY | os.O_CREAT | os.O_EXCL, os.fstat(fd).st_mode & 0777), 'wb') as stripped_file: stripped_file.write(self.elf.Ehdr.pack(stripped_ehdr)) stripped_file.write(file[self.elf.Ehdr.size:stripped_size]) return True def get_elf_info(filename, match_notes=False): file = None elf = None ehdr = None phdrs = None # Yields an elf_note for each note in any PT_NOTE segment. def gen_notes(): def round_up_to(size): return ((size + 3) / 4) * 4 for phdr in phdrs: if phdr.p_type == PT_NOTE: pos = phdr.p_offset while pos < phdr.p_offset + phdr.p_filesz: nhdr = elf.Nhdr.read(file, pos) pos += elf.Nhdr.size name = file[pos:pos + nhdr.n_namesz] pos += round_up_to(nhdr.n_namesz) desc = file[pos:pos + nhdr.n_descsz] pos += round_up_to(nhdr.n_descsz) yield elf_note(name, nhdr.n_type, desc) def gen_sections(): shdrs = list(gen_shdrs(file, elf, ehdr)) if not shdrs: return strtab_shdr = shdrs[ehdr.e_shstrndx] for shdr, i in zip(shdrs, xrange(len(shdrs))): if i == 0: continue assert shdr.sh_name < strtab_shdr.sh_size, ( "%s: invalid sh_name" % filename) yield (shdr, extract_C_string(strtab_shdr.sh_offset + shdr.sh_name)) # Generates '\0'-terminated strings starting at the given offset, # until an empty string. def gen_strings(start): while True: end = file.find('\0', start) assert end >= start, ( "%s: Unterminated string at %#x" % (filename, start)) if start == end: break yield file[start:end] start = end + 1 def extract_C_string(start): for string in gen_strings(start): return string return '' # Returns a string of hex digits (or None). def get_build_id(): build_id = None for note in gen_notes(): # Note that the last build_id note needs to be used due to TO-442. possible_build_id = note.build_id_hex() if possible_build_id: build_id = possible_build_id return build_id # Returns a list of elf_note objects. def get_matching_notes(): if isinstance(match_notes, bool): if match_notes: return list(gen_notes()) else: return [] # If not a bool, it's an iterable of ident pairs. return [note for note in gen_notes() if note.ident() in match_notes] # Returns a string (without trailing '\0'), or None. def get_interp(): # PT_INTERP points directly to a string in the file. for interp in (phdr for phdr in phdrs if phdr.p_type == PT_INTERP): interp = file[interp.p_offset:interp.p_offset + interp.p_filesz] if interp[-1:] == '\0': interp = interp[:-1] return interp return None # Returns a set of strings. def get_soname_and_needed(): # Each DT_NEEDED or DT_SONAME points to a string in the .dynstr table. def GenDTStrings(tag):
<gh_stars>1-10 # Copyright 2017 <NAME> # # Licensed under the MIT License (the License); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at https://opensource.org/licenses/MIT# import glob import re import sys import time from src.io.storage import get_request_items, store_fingerprint, get_number_of_malformed_requests from static import variables from static.arguments import parse_arguments from static.blacklist import Blacklist from static.logger import setup_logger, LOGNAME_START from src.exchange.http import Request, UrlInfo, submit_string from src.static.constants import NO_RESPONSE_CODE, DATA_NONE, LEXICAL, SEMANTIC, SYNTACTIC, DATA_LIST, KNOWN, \ SERVER_NAMES logger = setup_logger() global host_total def add_characteristic(category, name, value, fingerprint, data_type=DATA_NONE): if not fingerprint[category].has_key(name): # TODO maybe remove data type if data_type == 'list': value = [value] fingerprint[category][name] = value return if fingerprint[category][name] == value: return def get_characteristics(test_name, response, fingerprint, host, host_index, NO_RESPONSE=None): # logger.debug("applying %s", test_name, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total}) response_code, response_text = response.return_code() server_name_claimed = response.server_name() if response_code not in [NO_RESPONSE, NO_RESPONSE_CODE]: add_characteristic(LEXICAL, response_code, response_text, fingerprint) add_characteristic(LEXICAL, 'SERVER_NAME_CLAIMED', server_name_claimed, fingerprint) # nginx 404 test # if response_code == '404': # server_name_404 = get_server_name_404(response) # if len(server_name_404) > 0: # add_characteristic(LEXICAL, 'SERVER_NAME_404', server_name_404, fingerprint) if test_name.startswith('malformed_'): add_characteristic(SEMANTIC, test_name, response_code, fingerprint) if response.has_header('Allow'): data = response.header_data('Allow') add_characteristic(SYNTACTIC, 'ALLOW_ORDER', data, fingerprint) if response.has_header('Public'): data = response.header_data('Public') add_characteristic(SYNTACTIC, 'PUBLIC_ORDER', data, fingerprint) if response.has_header('Vary'): data = response.header_data('Vary') add_characteristic(SYNTACTIC, 'VARY_ORDER', data, fingerprint) if response_code not in [NO_RESPONSE_CODE, NO_RESPONSE]: header_names = response.header_names() add_characteristic(SYNTACTIC, 'HEADER_ORDER', header_names, fingerprint, data_type=DATA_LIST) if response.has_header('ETag'): data = response.header_data('ETag') add_characteristic(SYNTACTIC, 'ETag', data, fingerprint) elif response.has_header('Etag'): data = response.header_data('Etag') add_characteristic(SYNTACTIC, 'ETag', data, fingerprint) def default_get(host, host_index, fingerprint): request = Request(host, host_index, logger) response = request.submit if response.response_code == NO_RESPONSE_CODE: raise ValueError('default_get failed') else: get_characteristics('default_get', response, fingerprint, host, host_index) def default_options(host, host_index, fingerprint): request = Request(host, host_index, logger, method='OPTIONS') response = request.submit get_characteristics('default_options', response, fingerprint, host, host_index) def unknown_method(host, host_index, fingerprint): request = Request(host, host_index, logger, method='ABCDEFG') response = request.submit get_characteristics('unknown_method', response, fingerprint, host, host_index) def unauthorized_activity(host, host_index, fingerprint): activities = ('OPTIONS', 'TRACE', 'GET', 'HEAD', 'DELETE', 'PUT', 'POST', 'COPY', 'MOVE', 'MKCOL', 'PROPFIND', 'PROPPATCH', 'LOCK', 'UNLOCK', 'SEARCH') for activity in activities: request = Request(host, host_index, logger, method=activity) response = request.submit get_characteristics('unauthorized_activity_' + activity, response, fingerprint, host, host_index) def empty_uri(host, host_index, fingerprint): request = Request(host, host_index, logger, local_uri='/ABCDEFG') response = request.submit get_characteristics('empty_uri', response, fingerprint, host, host_index) def malformed_method(host, host_index, fingerprint): malformed_methods = get_malformed_methods() for index, method in enumerate(malformed_methods): request = Request(host, host_index, logger) request.method_line = method response = request.submit get_characteristics('MALFORMED_' + ('000' + str(index))[-3:], response, fingerprint, host, host_index) def get_malformed_methods(): activities = 'GET', 'HEAD', 'POST', 'PUT' malformed_methods_list = [] for activity in activities: malformed_methods = ( activity, activity + '/', activity + '/1.0', activity + ' / HTTP/123.45', activity + ' / HTTP/999.99', activity + ' / HTP/1.0', activity + ' / HTT/1.0', activity + ' / HTTP/7.Q', activity + ' / HTTP/1.0X', activity + ' /abcdefghijklmnopqrstuvwxyz/.. HTTP/1.0', activity + ' /./././././././././././././././ HTTP/1.0', activity + ' /.. HTTP/1.0', activity + '\t/\tHTTP/1.0', activity + '\t/\tHTTP/1.0', activity + ' / H', activity + ' / ' + 'HTTP/' + '1' * 1000 + '.0', activity + ' FTP://abcdefghi HTTP/1.0', activity + ' C:\ HTTP/1.0', ' ' * 1000 + activity + ' / HTTP/1.0', '\n' + activity + ' / HTTP/1.0', ) malformed_methods_list += malformed_methods malformed_activity_independent = ( 'GET GET GET', 'HELLO', '%47%45%54 / HTTP/1.0', 'GEX\bT / HTTP/1.0' ) malformed_methods_list += malformed_activity_independent return malformed_methods_list def unavailable_accept(host, host_index, fingerprint): request = Request(host, host_index, logger) request.add_header('Accept', 'abcd/efgh') response = request.submit get_characteristics('unavailable_accept', response, fingerprint, host, host_index) def long_content_length(host, host_index, fingerprint): request = Request(host, host_index, logger) request.add_header('Content-Length', str(sys.maxint)) request.body = 'abcdefgh' response = request.submit get_characteristics('long_content_length', response, fingerprint, host, host_index) def get_fingerprint(host, host_index, blacklist): fingerprint = { LEXICAL: {}, SYNTACTIC: {}, SEMANTIC: {} } url_info = UrlInfo(host) request_items = get_request_items() for name, request_string in request_items.iteritems(): try: response = submit_string(request_string, name, url_info, host_index, logger) get_characteristics(name, response, fingerprint, host, host_index) except ValueError as e: logger.warning("%s", e, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total}) return fingerprint # TODO deprecate fingerprint_methods = [default_get, default_options, unknown_method, unauthorized_activity, empty_uri, malformed_method, unavailable_accept, long_content_length] for method in fingerprint_methods: # logger.debug("processing %s", method.__name__, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total}) try: logger.debug('applying method %s', method.__name__, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total}) method(host, host_index, fingerprint) except ValueError as e: logger.warning("%s", e, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total}) if method == default_get: blacklist.insert(host) logger.info('host added to blacklist', extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total}) break return fingerprint def get_known_fingerprints(args): if args.gather is False: fingerprints = [] directories = [args.known, 'data/output'] for directory in directories: for filepath in glob.glob(directory + '/*'): logger.debug("loading fingerprint %s", filepath, extra=LOGNAME_START) with open(filepath, 'r') as file_handler: f_fingerprint = eval(file_handler.read()) fingerprints.append(f_fingerprint) return fingerprints def get_fingerprint_scores(args, subject, known_fingerprints): scores = [] for known in known_fingerprints: similarity = { 'matches': 0, 'mismatches': 0, 'unknowns': 0 } header_match = subject[LEXICAL].has_key('SERVER_NAME_CLAIMED') \ and known[LEXICAL].has_key('SERVER_NAME_CLAIMED') \ and subject[LEXICAL]['SERVER_NAME_CLAIMED'] == known[LEXICAL]['SERVER_NAME_CLAIMED'] if header_match and args.lazy: certainty = 1 else: similarity = find_similar_lexical(known, similarity, subject) similarity = find_similar_syntactic(known, similarity, subject) similarity = find_similar_semantic(known, similarity, subject) matches = similarity['matches'] total = float(similarity['matches'] + similarity['mismatches']) certainty = matches / total if total > 0 else 0 scores.append([known, similarity, certainty]) return scores def find_similar_lexical(known, similarity, subject): # TODO select appropriate response codes, the more the better response_codes = range(200, 220) + \ range(300, 320) + \ range(400, 420) + \ range(500, 520) for code in response_codes: if known[LEXICAL].has_key(code) and subject[LEXICAL].has_key(code): known_text = known[LEXICAL][code] subject_text = subject[LEXICAL][code] if known_text == '' or subject_text == '': similarity['unknowns'] += 1 elif known_text == subject_text: similarity['matches'] += 1 else: similarity['mismatches'] += 1 return similarity def find_similar_syntactic(known, similarity, subject): similarity = find_similar_allow_order(known, similarity, subject) # similarity = find_similar_etag(known, similarity, subject) return similarity def find_similar_allow_order(known, similarity, subject): known_allows = subject_allows = '' if known[SYNTACTIC].has_key('ALLOW_ORDER'): known_allows = known[SYNTACTIC]['ALLOW_ORDER'] else: return similarity if subject[SYNTACTIC].has_key('ALLOW_ORDER'): subject_allows = subject[SYNTACTIC]['ALLOW_ORDER'] if known_allows and subject_allows: if known_allows == subject_allows: similarity['matches'] += 1 else: similarity['mismatches'] += 1 else: similarity['unknowns'] += 1 return similarity def find_similar_etag(known, similarity, subject): known_etag = subject_etag = '' if known[SYNTACTIC].has_key('ETag'): known_etag = known[SYNTACTIC]['ETag'] if subject[SYNTACTIC].has_key('ETag'): subject_etag = subject[SYNTACTIC]['ETag'] if known_etag == '' or subject_etag == '': similarity['unknowns'] += 1 elif known_etag == subject_etag: similarity['matches'] += 1 else: similarity['mismatches'] += 1 return similarity def find_similar_semantic(known, similarity, subject): for i in range(get_number_of_malformed_requests()): malformed = 'malformed_' + str(i) if known[SEMANTIC].has_key(malformed): known_malformed = known[SEMANTIC][malformed] subject_malformed = subject[SEMANTIC][malformed] if known_malformed == subject_malformed: similarity['matches'] += 1 else: similarity['mismatches'] += 1 else: similarity['unknowns'] += 1 return similarity def score_compare(score_a, score_b): server_a = score_a[0] # matches_a = score_a[1]['matches'] matches_a = score_a[2] server_b = score_b[0] # matches_b = score_b[1]['matches'] matches_b = score_b[2] compared = -cmp(matches_a, matches_b) if compared != 0: return compared else: return -cmp(server_a, server_b) def sort_scores(scores): if len(scores) is 1: return scores scores.sort(score_compare) return scores def print_scores(hostname, scores): lint = "-" * 80 print '\n%s\n%-50s\n%-50s %4s (%4s : %3s : %3s)' % ( lint, hostname[:50], 'name', 'certainty', 'matches', 'mismatches', 'unknowns') for score in scores: a = score[0] b = score[0][LEXICAL] # c = score[0][LEXICAL]['SERVER_NAME_CLAIMED'] name = score[0][LEXICAL]['SERVER_NAME_CLAIMED'] if score[0][LEXICAL].has_key('SERVER_NAME_CLAIMED') else 'NO_BANNER' matches = score[1]['matches'] mismatches = score[1]['mismatches'] unknowns = score[1]['unknowns'] certainty = score[2] print '%-50s %.3f (%2d : %2d : %2d)' % (name, certainty, matches, mismatches, unknowns) print lint def get_hosts(args): hosts = [] if args.input is not None: hosts.append(args.input) else: hosts += [host.strip() for host in args.file.readlines()] return hosts def process_host(args, host, host_index, known_fingerprints, blacklist): f = get_fingerprint(host, host_index, blacklist) url_info = UrlInfo(host) if SERVER_NAMES is True: banner = f[LEXICAL]['SERVER_NAME_CLAIMED'] if isinstance(banner, basestring): filename = banner.split()[0] else: filename = banner[0].split()[0] filename = filename.replace('/', '_') directory = KNOWN else: if url_info.port != 80: filename = url_info.host + ':' + str(url_info.port) else: filename = url_info.host directory = args.output store_fingerprint(directory, f, filename) if args.gather is False: scores = get_fingerprint_scores(args, f, known_fingerprints) scores = sort_scores(scores) print_scores(host, scores) def process_hosts(args, hosts, known_fingerprints, blacklist): blacklist_hosts = blacklist.get_hosts() for host_index, host in enumerate(hosts): try: host_index += 1 logger.info("processing host (%s/%s)", host_index, len(hosts), extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total}) if host not in blacklist_hosts: process_host(args, host, host_index, known_fingerprints, blacklist) else: logger.warning('host is blacklisted', extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total}) except ValueError as e: logger.error(e, extra={'logname': host, 'host_index': host_index, 'host_total': variables.host_total}) if __name__ == '__main__': try: args = parse_arguments() logger = setup_logger(args) hosts
if iii==6: wue_d_wmclimate_gsl+=[wue] nue_d_wmclimate_gsl+=[nue] A_d_wmclimate_gsl+=[A] if iiii==11: if iii==4: wue_d_dmclimate_temps+=[wue] nue_d_dmclimate_temps+=[nue] A_d_dmclimate_temps+=[A] if iiii==12: if iii==5: wue_d_wmclimate_temps+=[wue] nue_d_wmclimate_temps+=[nue] A_d_wmclimate_temps+=[A] if iiii==1: if iii==2: wue_d_dmclimate_dy+=[wue] nue_d_dmclimate_dy+=[nue] A_d_dmclimate_dy+=[A] if iiii==9: if iii==6: wue_d_wmclimate_dy+=[wue] nue_d_wmclimate_dy+=[nue] A_d_wmclimate_dy+=[A] if iiii==2: if iii==1: wue_d_inter_vartemp+=[wue] nue_d_inter_vartemp+=[nue] A_d_inter_vartemp+=[A] if iiii==3: if iii==1: wue_d_inter_varvwc+=[wue] nue_d_inter_varvwc+=[nue] A_d_inter_varvwc+=[A] if iiii==7: if iii==3: wue_d_inter_varna+=[wue] nue_d_inter_varna+=[nue] A_d_inter_varna+=[A] na_lst0+=[na] if iiii==4: if iii==3: wue_d_inter_mmclimate+=[wue] nue_d_inter_mmclimate+=[nue] A_d_inter_mmclimate+=[A] tempss_dm+=[tl] if iiii==5: if iii==4: wue_d_inter_dmclimate+=[wue] nue_d_inter_dmclimate+=[nue] A_d_inter_dmclimate+=[A] if iiii==6: if iii==5: wue_d_inter_wmclimate+=[wue] nue_d_inter_wmclimate+=[nue] A_d_inter_wmclimate+=[A] if iiii==13: if iii==7: wue_d_dy+=[wue] nue_d_dy+=[nue] A_d_dy+=[A] elif xx==1: #------calculate vapor pressure-----# pa_v=611*np.exp((17.27*temp_wm[iiii][time])/(temp_wm[iiii][time]+237.3)) #saturation vapor pressure of air (Pa) ea_str=pa_con_atmfrac(pa_v,3528) #saturation vapor pressure of air (Pa-->umol h20/mol air) ea=rh*ea_str #vapor pressure (umol h2O/mol air) #correct for leaf temperatures using leaf height t_diff=18-0.4*ht if t_diff<0.0: t_diff=0.0 tl=temp_wm[iiii][time]+t_diff #soil depth z=0.4 #---------------Photosynthesis Function---------------# #alter this line of code for when implementing different photosynthesis functions wue, nue, A, E, cs, ci, gsw, gs, gbw, gb, gm, cc,dd =photo(tk_25,ekc,eko,etau,ev,ej,toptv,toptj,na, qeff, PAR,tl,ea,chl,ij,kc25,ko25,o,ca,rh,m,a,frnr,flnr,ra,jm,g0,b,dia,u,q,vwc_min,vwc_max,sm_wm[iiii][time],z) if isinstance(wue, np.ndarray): wue=wue[0] if isinstance(nue, np.ndarray): nue=nue[0] if isinstance(A, np.ndarray): A=A[0] if isinstance(wue, list): wue=wue[0] if isinstance(nue, list): nue=nue[0] if isinstance(A, list): A=A[0] if iiii==0: if iii==0: wue_w_inter+=[wue] nue_w_inter+=[nue] gsw_w_inter+=[gsw] A_w_inter+=[A] E_w_inter+=[E] vpd_w_inter+=[dd] tempss_wm+=[tl] if iiii==8: if iii==2: wue_w_dmclimate_gsl+=[wue] nue_w_dmclimate_gsl+=[nue] A_w_dmclimate_gsl+=[A] if iiii==10: if iii==6: wue_w_wmclimate_gsl+=[wue] nue_w_wmclimate_gsl+=[nue] A_w_wmclimate_gsl+=[A] if iiii==11: if iii==4: wue_w_dmclimate_temps+=[wue] nue_w_dmclimate_temps+=[nue] A_w_dmclimate_temps+=[A] if iiii==12: if iii==5: wue_w_wmclimate_temps+=[wue] nue_w_wmclimate_temps+=[nue] A_w_wmclimate_temps+=[A] if iiii==1: if iii==2: wue_w_dmclimate_dy+=[wue] nue_w_dmclimate_dy+=[nue] A_w_dmclimate_dy+=[A] if iiii==9: if iii==6: wue_w_wmclimate_dy+=[wue] nue_w_wmclimate_dy+=[nue] A_w_wmclimate_dy+=[A] if iiii==2: if iii==1: wue_w_inter_vartemp+=[wue] nue_w_inter_vartemp+=[nue] A_w_inter_vartemp+=[A] if iiii==3: if iii==1: wue_w_inter_varvwc+=[wue] nue_w_inter_varvwc+=[nue] A_w_inter_varvwc+=[A] if iiii==7: if iii==3: wue_w_inter_varna+=[wue] nue_w_inter_varna+=[nue] A_w_inter_varna+=[A] if iiii==4: if iii==3: wue_w_inter_mmclimate+=[wue] nue_w_inter_mmclimate+=[nue] A_w_inter_mmclimate+=[A] tempss_wm+=[tl] if iiii==5: if iii==4: wue_w_inter_dmclimate+=[wue] nue_w_inter_dmclimate+=[nue] A_w_inter_dmclimate+=[A] if iiii==6: if iii==5: wue_w_inter_wmclimate+=[wue] nue_w_inter_wmclimate+=[nue] A_w_inter_wmclimate+=[A] if iiii==13: if iii==7: wue_w_dy+=[wue] nue_w_dy+=[nue] A_w_dy+=[A] na_lst+=[np.mean(na_lst0)] # if time>140 and time<gsf+1: # print wue_d_inter for iii in range(3): if iii==0: nue_d_inter_stats[iii]+=[np.mean(nue_d_inter)] wue_d_inter_stats[iii]+=[np.mean(wue_d_inter)] A_d_inter_stats[iii]+=[np.mean(A_d_inter)] nue_w_inter_stats[iii]+=[np.mean(nue_w_inter)] wue_w_inter_stats[iii]+=[np.mean(wue_w_inter)] A_w_inter_stats[iii]+=[np.mean(A_w_inter)] wue_d_dmclimate_gsl_stats[iii]+=[np.mean(wue_d_dmclimate_gsl)] nue_d_dmclimate_gsl_stats[iii]+=[np.mean(nue_d_dmclimate_gsl)] A_d_dmclimate_gsl_stats[iii]+=[np.mean(A_d_dmclimate_gsl)] wue_d_wmclimate_gsl_stats[iii]+=[np.mean(wue_d_wmclimate_gsl)] nue_d_wmclimate_gsl_stats[iii]+=[np.mean(nue_d_wmclimate_gsl)] A_d_wmclimate_gsl_stats[iii]+=[np.mean(A_d_wmclimate_gsl)] wue_d_dmclimate_dy_stats[iii]+=[np.mean(wue_d_dmclimate_dy)] nue_d_dmclimate_dy_stats[iii]+=[np.mean(nue_d_dmclimate_dy)] A_d_dmclimate_dy_stats[iii]+=[np.mean(A_d_dmclimate_dy)] wue_d_wmclimate_dy_stats[iii]+=[np.mean(wue_d_wmclimate_dy)] nue_d_wmclimate_dy_stats[iii]+=[np.mean(nue_d_wmclimate_dy)] A_d_wmclimate_dy_stats[iii]+=[np.mean(A_d_wmclimate_dy)] wue_d_dmclimate_temps_stats[iii]+=[np.mean(wue_d_dmclimate_temps)] nue_d_dmclimate_temps_stats[iii]+=[np.mean(nue_d_dmclimate_temps)] A_d_dmclimate_temps_stats[iii]+=[np.mean(A_d_dmclimate_temps)] wue_d_wmclimate_temps_stats[iii]+=[np.mean(wue_d_wmclimate_temps)] nue_d_wmclimate_temps_stats[iii]+=[np.mean(nue_d_wmclimate_temps)] A_d_wmclimate_temps_stats[iii]+=[np.mean(A_d_wmclimate_temps)] wue_w_dmclimate_gsl_stats[iii]+=[np.mean(wue_w_dmclimate_gsl)] nue_w_dmclimate_gsl_stats[iii]+=[np.mean(nue_w_dmclimate_gsl)] A_w_dmclimate_gsl_stats[iii]+=[np.mean(A_w_dmclimate_gsl)] wue_w_wmclimate_gsl_stats[iii]+=[np.mean(wue_w_wmclimate_gsl)] nue_w_wmclimate_gsl_stats[iii]+=[np.mean(nue_w_wmclimate_gsl)] A_w_wmclimate_gsl_stats[iii]+=[np.mean(A_w_wmclimate_gsl)] wue_w_dmclimate_dy_stats[iii]+=[np.mean(wue_w_dmclimate_dy)] nue_w_dmclimate_dy_stats[iii]+=[np.mean(nue_w_dmclimate_dy)] A_w_dmclimate_dy_stats[iii]+=[np.mean(A_w_dmclimate_dy)] wue_w_wmclimate_dy_stats[iii]+=[np.mean(wue_w_wmclimate_dy)] nue_w_wmclimate_dy_stats[iii]+=[np.mean(nue_w_wmclimate_dy)] A_w_wmclimate_dy_stats[iii]+=[np.mean(A_w_wmclimate_dy)] wue_w_dmclimate_temps_stats[iii]+=[np.mean(wue_w_dmclimate_temps)] nue_w_dmclimate_temps_stats[iii]+=[np.mean(nue_w_dmclimate_temps)] A_w_dmclimate_temps_stats[iii]+=[np.mean(A_w_dmclimate_temps)] wue_w_wmclimate_temps_stats[iii]+=[np.mean(wue_w_wmclimate_temps)] nue_w_wmclimate_temps_stats[iii]+=[np.mean(nue_w_wmclimate_temps)] A_w_wmclimate_temps_stats[iii]+=[np.mean(A_w_wmclimate_temps)] nue_d_inter_vartemp_stats[iii]+=[np.mean(nue_d_inter_vartemp)] wue_d_inter_vartemp_stats[iii]+=[np.mean(wue_d_inter_vartemp)] A_d_inter_vartemp_stats[iii]+=[np.mean(A_d_inter_vartemp)] nue_w_inter_vartemp_stats[iii]+=[np.mean(nue_w_inter_vartemp)] wue_w_inter_vartemp_stats[iii]+=[np.mean(wue_w_inter_vartemp)] A_w_inter_vartemp_stats[iii]+=[np.mean(A_w_inter_vartemp)] nue_d_inter_varvwc_stats[iii]+=[np.mean(nue_d_inter_varvwc)] wue_d_inter_varvwc_stats[iii]+=[np.mean(wue_d_inter_varvwc)] A_d_inter_varvwc_stats[iii]+=[np.mean(A_d_inter_varvwc)] nue_w_inter_varvwc_stats[iii]+=[np.mean(nue_w_inter_varvwc)] wue_w_inter_varvwc_stats[iii]+=[np.mean(wue_w_inter_varvwc)] A_w_inter_varvwc_stats[iii]+=[np.mean(A_w_inter_varvwc)] nue_d_inter_varna_stats[iii]+=[np.mean(nue_d_inter_varna)] wue_d_inter_varna_stats[iii]+=[np.mean(wue_d_inter_varna)] A_d_inter_varna_stats[iii]+=[np.mean(A_d_inter_varna)] nue_w_inter_varna_stats[iii]+=[np.mean(nue_w_inter_varna)] wue_w_inter_varna_stats[iii]+=[np.mean(wue_w_inter_varna)] A_w_inter_varna_stats[iii]+=[np.mean(A_w_inter_varna)] wue_w_inter_mmclimate_stats[iii]+=[np.mean(wue_w_inter_mmclimate)] nue_w_inter_mmclimate_stats[iii]+=[np.mean(nue_w_inter_mmclimate)] A_w_inter_mmclimate_stats[iii]+=[np.mean(A_w_inter_mmclimate)] wue_d_inter_mmclimate_stats[iii]+=[np.mean(wue_d_inter_mmclimate)] nue_d_inter_mmclimate_stats[iii]+=[np.mean(nue_d_inter_mmclimate)] A_d_inter_mmclimate_stats[iii]+=[np.mean(A_d_inter_mmclimate)] wue_w_inter_dmclimate_stats[iii]+=[np.mean(wue_w_inter_dmclimate)] nue_w_inter_dmclimate_stats[iii]+=[np.mean(nue_w_inter_dmclimate)] A_w_inter_dmclimate_stats[iii]+=[np.mean(A_w_inter_dmclimate)] wue_d_inter_dmclimate_stats[iii]+=[np.mean(wue_d_inter_dmclimate)] nue_d_inter_dmclimate_stats[iii]+=[np.mean(nue_d_inter_dmclimate)] A_d_inter_dmclimate_stats[iii]+=[np.mean(A_d_inter_dmclimate)] wue_w_inter_wmclimate_stats[iii]+=[np.mean(wue_w_inter_wmclimate)] nue_w_inter_wmclimate_stats[iii]+=[np.mean(nue_w_inter_wmclimate)] A_w_inter_wmclimate_stats[iii]+=[np.mean(A_w_inter_wmclimate)] wue_d_inter_wmclimate_stats[iii]+=[np.mean(wue_d_inter_wmclimate)] nue_d_inter_wmclimate_stats[iii]+=[np.mean(nue_d_inter_wmclimate)] A_d_inter_wmclimate_stats[iii]+=[np.mean(A_d_inter_wmclimate)] wue_d_dy_stats[iii]+=[np.mean(wue_d_dy)] nue_d_dy_stats[iii]+=[np.mean(nue_d_dy)] A_d_dy_stats[iii]+=[np.mean(A_d_dy)] wue_w_dy_stats[iii]+=[np.mean(wue_w_dy)] nue_w_dy_stats[iii]+=[np.mean(nue_w_dy)] A_w_dy_stats[iii]+=[np.mean(A_w_dy)] if iii==1: nue_d_inter_stats[iii]+=[np.min(nue_d_inter)] wue_d_inter_stats[iii]+=[np.min(wue_d_inter)] A_d_inter_stats[iii]+=[np.min(A_d_inter)] nue_w_inter_stats[iii]+=[np.min(nue_w_inter)] wue_w_inter_stats[iii]+=[np.min(wue_w_inter)] A_w_inter_stats[iii]+=[np.min(A_w_inter)] wue_d_dmclimate_gsl_stats[iii]+=[np.min(wue_d_dmclimate_gsl)] nue_d_dmclimate_gsl_stats[iii]+=[np.min(nue_d_dmclimate_gsl)] A_d_dmclimate_gsl_stats[iii]+=[np.min(A_d_dmclimate_gsl)] wue_d_wmclimate_gsl_stats[iii]+=[np.min(wue_d_wmclimate_gsl)] nue_d_wmclimate_gsl_stats[iii]+=[np.min(nue_d_wmclimate_gsl)] A_d_wmclimate_gsl_stats[iii]+=[np.min(A_d_wmclimate_gsl)] wue_d_dmclimate_dy_stats[iii]+=[np.min(wue_d_dmclimate_dy)] nue_d_dmclimate_dy_stats[iii]+=[np.min(nue_d_dmclimate_dy)] A_d_dmclimate_dy_stats[iii]+=[np.min(A_d_dmclimate_dy)] wue_d_wmclimate_dy_stats[iii]+=[np.min(wue_d_wmclimate_dy)] nue_d_wmclimate_dy_stats[iii]+=[np.min(nue_d_wmclimate_dy)] A_d_wmclimate_dy_stats[iii]+=[np.min(A_d_wmclimate_dy)] wue_d_dmclimate_temps_stats[iii]+=[np.min(wue_d_dmclimate_temps)] nue_d_dmclimate_temps_stats[iii]+=[np.min(nue_d_dmclimate_temps)] A_d_dmclimate_temps_stats[iii]+=[np.min(A_d_dmclimate_temps)] wue_d_wmclimate_temps_stats[iii]+=[np.min(wue_d_wmclimate_temps)] nue_d_wmclimate_temps_stats[iii]+=[np.min(nue_d_wmclimate_temps)] A_d_wmclimate_temps_stats[iii]+=[np.min(A_d_wmclimate_temps)] wue_w_dmclimate_gsl_stats[iii]+=[np.min(wue_w_dmclimate_gsl)] nue_w_dmclimate_gsl_stats[iii]+=[np.min(nue_w_dmclimate_gsl)] A_w_dmclimate_gsl_stats[iii]+=[np.min(A_w_dmclimate_gsl)] wue_w_wmclimate_gsl_stats[iii]+=[np.min(wue_w_wmclimate_gsl)] nue_w_wmclimate_gsl_stats[iii]+=[np.min(nue_w_wmclimate_gsl)] A_w_wmclimate_gsl_stats[iii]+=[np.min(A_w_wmclimate_gsl)] wue_w_dmclimate_dy_stats[iii]+=[np.min(wue_w_dmclimate_dy)] nue_w_dmclimate_dy_stats[iii]+=[np.min(nue_w_dmclimate_dy)] A_w_dmclimate_dy_stats[iii]+=[np.min(A_w_dmclimate_dy)] wue_w_wmclimate_dy_stats[iii]+=[np.min(wue_w_wmclimate_dy)] nue_w_wmclimate_dy_stats[iii]+=[np.min(nue_w_wmclimate_dy)] A_w_wmclimate_dy_stats[iii]+=[np.min(A_w_wmclimate_dy)] wue_w_dmclimate_temps_stats[iii]+=[np.min(wue_w_dmclimate_temps)] nue_w_dmclimate_temps_stats[iii]+=[np.min(nue_w_dmclimate_temps)] A_w_dmclimate_temps_stats[iii]+=[np.min(A_w_dmclimate_temps)] wue_w_wmclimate_temps_stats[iii]+=[np.min(wue_w_wmclimate_temps)] nue_w_wmclimate_temps_stats[iii]+=[np.min(nue_w_wmclimate_temps)] A_w_wmclimate_temps_stats[iii]+=[np.min(A_w_wmclimate_temps)] nue_d_inter_vartemp_stats[iii]+=[np.min(nue_d_inter_vartemp)] wue_d_inter_vartemp_stats[iii]+=[np.min(wue_d_inter_vartemp)] A_d_inter_vartemp_stats[iii]+=[np.min(A_d_inter_vartemp)] nue_w_inter_vartemp_stats[iii]+=[np.min(nue_w_inter_vartemp)] wue_w_inter_vartemp_stats[iii]+=[np.min(wue_w_inter_vartemp)] A_w_inter_vartemp_stats[iii]+=[np.min(A_w_inter_vartemp)] nue_d_inter_varvwc_stats[iii]+=[np.min(nue_d_inter_varvwc)] wue_d_inter_varvwc_stats[iii]+=[np.min(wue_d_inter_varvwc)] A_d_inter_varvwc_stats[iii]+=[np.min(A_d_inter_varvwc)] nue_w_inter_varvwc_stats[iii]+=[np.min(nue_w_inter_varvwc)] wue_w_inter_varvwc_stats[iii]+=[np.min(wue_w_inter_varvwc)] A_w_inter_varvwc_stats[iii]+=[np.min(A_w_inter_varvwc)] nue_d_inter_varna_stats[iii]+=[np.min(nue_d_inter_varna)] wue_d_inter_varna_stats[iii]+=[np.min(wue_d_inter_varna)] A_d_inter_varna_stats[iii]+=[np.min(A_d_inter_varna)] nue_w_inter_varna_stats[iii]+=[np.min(nue_w_inter_varna)] wue_w_inter_varna_stats[iii]+=[np.min(wue_w_inter_varna)] A_w_inter_varna_stats[iii]+=[np.min(A_w_inter_varna)] wue_w_inter_mmclimate_stats[iii]+=[np.min(wue_w_inter_mmclimate)] nue_w_inter_mmclimate_stats[iii]+=[np.min(nue_w_inter_mmclimate)] A_w_inter_mmclimate_stats[iii]+=[np.min(A_w_inter_mmclimate)] wue_d_inter_mmclimate_stats[iii]+=[np.min(wue_d_inter_mmclimate)] nue_d_inter_mmclimate_stats[iii]+=[np.min(nue_d_inter_mmclimate)] A_d_inter_mmclimate_stats[iii]+=[np.min(A_d_inter_mmclimate)] wue_w_inter_dmclimate_stats[iii]+=[np.min(wue_w_inter_dmclimate)] nue_w_inter_dmclimate_stats[iii]+=[np.min(nue_w_inter_dmclimate)] A_w_inter_dmclimate_stats[iii]+=[np.min(A_w_inter_dmclimate)] wue_d_inter_dmclimate_stats[iii]+=[np.min(wue_d_inter_dmclimate)] nue_d_inter_dmclimate_stats[iii]+=[np.min(nue_d_inter_dmclimate)] A_d_inter_dmclimate_stats[iii]+=[np.min(A_d_inter_dmclimate)] wue_w_inter_wmclimate_stats[iii]+=[np.min(wue_w_inter_wmclimate)] nue_w_inter_wmclimate_stats[iii]+=[np.min(nue_w_inter_wmclimate)] A_w_inter_wmclimate_stats[iii]+=[np.min(A_w_inter_wmclimate)] wue_d_inter_wmclimate_stats[iii]+=[np.min(wue_d_inter_wmclimate)] nue_d_inter_wmclimate_stats[iii]+=[np.min(nue_d_inter_wmclimate)] A_d_inter_wmclimate_stats[iii]+=[np.min(A_d_inter_wmclimate)] wue_d_dy_stats[iii]+=[np.min(wue_d_dy)] nue_d_dy_stats[iii]+=[np.min(nue_d_dy)] A_d_dy_stats[iii]+=[np.min(A_d_dy)] wue_w_dy_stats[iii]+=[np.min(wue_w_dy)] nue_w_dy_stats[iii]+=[np.min(nue_w_dy)] A_w_dy_stats[iii]+=[np.min(A_w_dy)] if iii==2: nue_d_inter_stats[iii]+=[np.max(nue_d_inter)] wue_d_inter_stats[iii]+=[np.max(wue_d_inter)] A_d_inter_stats[iii]+=[np.max(A_d_inter)] nue_w_inter_stats[iii]+=[np.max(nue_w_inter)] wue_w_inter_stats[iii]+=[np.max(wue_w_inter)] A_w_inter_stats[iii]+=[np.max(A_w_inter)] wue_d_dmclimate_gsl_stats[iii]+=[np.max(wue_d_dmclimate_gsl)] nue_d_dmclimate_gsl_stats[iii]+=[np.max(nue_d_dmclimate_gsl)] A_d_dmclimate_gsl_stats[iii]+=[np.max(A_d_dmclimate_gsl)] wue_d_wmclimate_gsl_stats[iii]+=[np.max(wue_d_wmclimate_gsl)] nue_d_wmclimate_gsl_stats[iii]+=[np.max(nue_d_wmclimate_gsl)] A_d_wmclimate_gsl_stats[iii]+=[np.max(A_d_wmclimate_gsl)] wue_d_dmclimate_dy_stats[iii]+=[np.max(wue_d_dmclimate_dy)] nue_d_dmclimate_dy_stats[iii]+=[np.max(nue_d_dmclimate_dy)] A_d_dmclimate_dy_stats[iii]+=[np.max(A_d_dmclimate_dy)] wue_d_wmclimate_dy_stats[iii]+=[np.max(wue_d_wmclimate_dy)] nue_d_wmclimate_dy_stats[iii]+=[np.max(nue_d_wmclimate_dy)] A_d_wmclimate_dy_stats[iii]+=[np.max(A_d_wmclimate_dy)] wue_d_dmclimate_temps_stats[iii]+=[np.max(wue_d_dmclimate_temps)] nue_d_dmclimate_temps_stats[iii]+=[np.max(nue_d_dmclimate_temps)] A_d_dmclimate_temps_stats[iii]+=[np.max(A_d_dmclimate_temps)] wue_d_wmclimate_temps_stats[iii]+=[np.max(wue_d_wmclimate_temps)] nue_d_wmclimate_temps_stats[iii]+=[np.max(nue_d_wmclimate_temps)] A_d_wmclimate_temps_stats[iii]+=[np.max(A_d_wmclimate_temps)] wue_w_dmclimate_gsl_stats[iii]+=[np.max(wue_w_dmclimate_gsl)] nue_w_dmclimate_gsl_stats[iii]+=[np.max(nue_w_dmclimate_gsl)] A_w_dmclimate_gsl_stats[iii]+=[np.max(A_w_dmclimate_gsl)] wue_w_wmclimate_gsl_stats[iii]+=[np.max(wue_w_wmclimate_gsl)] nue_w_wmclimate_gsl_stats[iii]+=[np.max(nue_w_wmclimate_gsl)] A_w_wmclimate_gsl_stats[iii]+=[np.max(A_w_wmclimate_gsl)] wue_w_dmclimate_dy_stats[iii]+=[np.max(wue_w_dmclimate_dy)] nue_w_dmclimate_dy_stats[iii]+=[np.max(nue_w_dmclimate_dy)] A_w_dmclimate_dy_stats[iii]+=[np.max(A_w_dmclimate_dy)] wue_w_wmclimate_dy_stats[iii]+=[np.max(wue_w_wmclimate_dy)] nue_w_wmclimate_dy_stats[iii]+=[np.max(nue_w_wmclimate_dy)] A_w_wmclimate_dy_stats[iii]+=[np.max(A_w_wmclimate_dy)] wue_w_dmclimate_temps_stats[iii]+=[np.max(wue_w_dmclimate_temps)] nue_w_dmclimate_temps_stats[iii]+=[np.max(nue_w_dmclimate_temps)] A_w_dmclimate_temps_stats[iii]+=[np.max(A_w_dmclimate_temps)] wue_w_wmclimate_temps_stats[iii]+=[np.max(wue_w_wmclimate_temps)] nue_w_wmclimate_temps_stats[iii]+=[np.max(nue_w_wmclimate_temps)] A_w_wmclimate_temps_stats[iii]+=[np.max(A_w_wmclimate_temps)] nue_d_inter_vartemp_stats[iii]+=[np.max(nue_d_inter_vartemp)] wue_d_inter_vartemp_stats[iii]+=[np.max(wue_d_inter_vartemp)] A_d_inter_vartemp_stats[iii]+=[np.max(A_d_inter_vartemp)] nue_w_inter_vartemp_stats[iii]+=[np.max(nue_w_inter_vartemp)] wue_w_inter_vartemp_stats[iii]+=[np.max(wue_w_inter_vartemp)] A_w_inter_vartemp_stats[iii]+=[np.max(A_w_inter_vartemp)] nue_d_inter_varvwc_stats[iii]+=[np.max(nue_d_inter_varvwc)] wue_d_inter_varvwc_stats[iii]+=[np.max(wue_d_inter_varvwc)] A_d_inter_varvwc_stats[iii]+=[np.max(A_d_inter_varvwc)] nue_w_inter_varvwc_stats[iii]+=[np.max(nue_w_inter_varvwc)] wue_w_inter_varvwc_stats[iii]+=[np.max(wue_w_inter_varvwc)] A_w_inter_varvwc_stats[iii]+=[np.max(A_w_inter_varvwc)] nue_d_inter_varna_stats[iii]+=[np.max(nue_d_inter_varna)] wue_d_inter_varna_stats[iii]+=[np.max(wue_d_inter_varna)] A_d_inter_varna_stats[iii]+=[np.max(A_d_inter_varna)] nue_w_inter_varna_stats[iii]+=[np.max(nue_w_inter_varna)] wue_w_inter_varna_stats[iii]+=[np.max(wue_w_inter_varna)] A_w_inter_varna_stats[iii]+=[np.max(A_w_inter_varna)] wue_w_inter_mmclimate_stats[iii]+=[np.max(wue_w_inter_mmclimate)] nue_w_inter_mmclimate_stats[iii]+=[np.max(nue_w_inter_mmclimate)] A_w_inter_mmclimate_stats[iii]+=[np.max(A_w_inter_mmclimate)] wue_d_inter_mmclimate_stats[iii]+=[np.max(wue_d_inter_mmclimate)] nue_d_inter_mmclimate_stats[iii]+=[np.max(nue_d_inter_mmclimate)] A_d_inter_mmclimate_stats[iii]+=[np.max(A_d_inter_mmclimate)] wue_w_inter_dmclimate_stats[iii]+=[np.max(wue_w_inter_dmclimate)] nue_w_inter_dmclimate_stats[iii]+=[np.max(nue_w_inter_dmclimate)] A_w_inter_dmclimate_stats[iii]+=[np.max(A_w_inter_dmclimate)] wue_d_inter_dmclimate_stats[iii]+=[np.max(wue_d_inter_dmclimate)] nue_d_inter_dmclimate_stats[iii]+=[np.max(nue_d_inter_dmclimate)] A_d_inter_dmclimate_stats[iii]+=[np.max(A_d_inter_dmclimate)] wue_w_inter_wmclimate_stats[iii]+=[np.max(wue_w_inter_wmclimate)] nue_w_inter_wmclimate_stats[iii]+=[np.max(nue_w_inter_wmclimate)] A_w_inter_wmclimate_stats[iii]+=[np.max(A_w_inter_wmclimate)] wue_d_inter_wmclimate_stats[iii]+=[np.max(wue_d_inter_wmclimate)] nue_d_inter_wmclimate_stats[iii]+=[np.max(nue_d_inter_wmclimate)] A_d_inter_wmclimate_stats[iii]+=[np.max(A_d_inter_wmclimate)] wue_d_dy_stats[iii]+=[np.max(wue_d_dy)] nue_d_dy_stats[iii]+=[np.max(nue_d_dy)] A_d_dy_stats[iii]+=[np.max(A_d_dy)] wue_w_dy_stats[iii]+=[np.max(wue_w_dy)] nue_w_dy_stats[iii]+=[np.max(nue_w_dy)] A_w_dy_stats[iii]+=[np.max(A_w_dy)] tot_temps_dm+=[np.mean(tempss_dm)] tot_temps_wm+=[np.mean(tempss_wm)] #---------------Smooth Model Output---------------# for iii in range(3): nue_d_inter_stats_smooth+=[gs_smooth(nue_d_inter_stats[iii],gs0_d,gsf)] wue_d_inter_stats_smooth+=[gs_smooth(wue_d_inter_stats[iii],gs0_d,gsf)] A_d_inter_stats_smooth+=[gs_smooth(A_d_inter_stats[iii],gs0_d,gsf)] nue_w_inter_stats_smooth+=[gs_smooth(nue_w_inter_stats[iii],gs0_w,gsf)] wue_w_inter_stats_smooth+=[gs_smooth(wue_w_inter_stats[iii],gs0_w,gsf)] A_w_inter_stats_smooth+=[gs_smooth(A_w_inter_stats[iii],gs0_w,gsf)] wue_d_dmclimate_gsl_stats_smooth+=[gs_smooth(wue_d_dmclimate_gsl_stats[iii],gs0_ddy,gsf)] nue_d_dmclimate_gsl_stats_smooth+=[gs_smooth(nue_d_dmclimate_gsl_stats[iii],gs0_ddy,gsf)] A_d_dmclimate_gsl_stats_smooth+=[gs_smooth( A_d_dmclimate_gsl_stats[iii],gs0_ddy,gsf)] wue_d_wmclimate_gsl_stats_smooth+=[gs_smooth(wue_d_wmclimate_gsl_stats[iii],gs0_wdy,gsf)] nue_d_wmclimate_gsl_stats_smooth+=[gs_smooth(nue_d_wmclimate_gsl_stats[iii],gs0_wdy,gsf)] A_d_wmclimate_gsl_stats_smooth+=[gs_smooth(A_d_wmclimate_gsl_stats[iii],gs0_wdy,gsf)] wue_d_dmclimate_dy_stats_smooth+=[gs_smooth(wue_d_dmclimate_dy_stats[iii],gs0_ddy,gsf)] nue_d_dmclimate_dy_stats_smooth+=[gs_smooth(nue_d_dmclimate_dy_stats[iii],gs0_ddy,gsf)] A_d_dmclimate_dy_stats_smooth+=[gs_smooth(A_d_dmclimate_dy_stats[iii],gs0_ddy,gsf)] if iii==0: for i in range(len(wue_d_dmclimate_dy_stats_smooth[0])): if wue_d_dmclimate_dy_stats_smooth[0][i]<0.0: wue_d_dmclimate_dy_stats_smooth[0][i]=0.0 for i in range(len(nue_d_dmclimate_dy_stats_smooth[0])): if nue_d_dmclimate_dy_stats_smooth[0][i]<0.0: nue_d_dmclimate_dy_stats_smooth[0][i]=0.0 for i in range(len(A_d_dmclimate_dy_stats_smooth[0])): if A_d_dmclimate_dy_stats_smooth[0][i]<0.0: A_d_dmclimate_dy_stats_smooth[0][i]=0.0 wue_d_dy_stats_smooth+=[gs_smooth(wue_d_dy_stats[iii],gs0_ddy,gsf)] nue_d_dy_stats_smooth+=[gs_smooth(nue_d_dy_stats[iii],gs0_ddy,gsf)] A_d_dy_stats_smooth+=[gs_smooth(A_d_dy_stats[iii],gs0_ddy,gsf)] if iii==0: for i in range(len(wue_d_dy_stats_smooth[0])): if wue_d_dy_stats_smooth[0][i]<0.0: wue_d_dy_stats_smooth[0][i]=0.0 for i in range(len(nue_d_dy_stats_smooth[0])): if nue_d_dy_stats_smooth[0][i]<0.0: nue_d_dy_stats_smooth[0][i]=0.0 for i in range(len(A_d_dy_stats_smooth[0])): if A_d_dy_stats_smooth[0][i]<0.0: A_d_dy_stats_smooth[0][i]=0.0 wue_d_wmclimate_dy_stats_smooth+=[gs_smooth(wue_d_wmclimate_dy_stats[iii],gs0_wdy,gsf)] nue_d_wmclimate_dy_stats_smooth+=[gs_smooth(nue_d_wmclimate_dy_stats[iii],gs0_wdy,gsf)] A_d_wmclimate_dy_stats_smooth+=[gs_smooth(A_d_wmclimate_dy_stats[iii],gs0_wdy,gsf)] wue_d_dmclimate_temps_stats_smooth+=[gs_smooth(wue_d_dmclimate_temps_stats[iii],gs0_d,gsf)] nue_d_dmclimate_temps_stats_smooth+=[gs_smooth(nue_d_dmclimate_temps_stats[iii],gs0_d,gsf)] A_d_dmclimate_temps_stats_smooth+=[gs_smooth(A_d_dmclimate_temps_stats[iii],gs0_d,gsf)] wue_d_wmclimate_temps_stats_smooth+=[gs_smooth(wue_d_wmclimate_temps_stats[iii],gs0_w,gsf)] nue_d_wmclimate_temps_stats_smooth+=[gs_smooth(nue_d_wmclimate_temps_stats[iii],gs0_w,gsf)] A_d_wmclimate_temps_stats_smooth+=[gs_smooth(A_d_wmclimate_temps_stats[iii],gs0_w,gsf)] wue_w_dmclimate_gsl_stats_smooth+=[gs_smooth(wue_w_dmclimate_gsl_stats[iii],gs0_ddy,gsf)] nue_w_dmclimate_gsl_stats_smooth+=[gs_smooth(nue_w_dmclimate_gsl_stats[iii],gs0_ddy,gsf)] A_w_dmclimate_gsl_stats_smooth+=[gs_smooth(A_w_dmclimate_gsl_stats[iii],gs0_ddy,gsf)] wue_w_wmclimate_gsl_stats_smooth+=[gs_smooth(wue_w_wmclimate_gsl_stats[iii],gs0_wdy,gsf)] nue_w_wmclimate_gsl_stats_smooth+=[gs_smooth(nue_w_wmclimate_gsl_stats[iii],gs0_wdy,gsf)] A_w_wmclimate_gsl_stats_smooth+=[gs_smooth(A_w_wmclimate_gsl_stats[iii],gs0_wdy,gsf)] wue_w_dmclimate_dy_stats_smooth+=[gs_smooth(wue_w_dmclimate_dy_stats[iii],gs0_ddy,gsf)] nue_w_dmclimate_dy_stats_smooth+=[gs_smooth(nue_w_dmclimate_dy_stats[iii],gs0_ddy,gsf)] A_w_dmclimate_dy_stats_smooth+=[gs_smooth(A_w_dmclimate_dy_stats[iii],gs0_ddy,gsf)] if iii==0: for i in range(len(wue_w_dmclimate_dy_stats_smooth[0])): if wue_w_dmclimate_dy_stats_smooth[0][i]<0.0: wue_w_dmclimate_dy_stats_smooth[0][i]=0.0 for i in range(len(nue_w_dmclimate_dy_stats_smooth[0])): if nue_w_dmclimate_dy_stats_smooth[0][i]<0.0: nue_w_dmclimate_dy_stats_smooth[0][i]=0.0 for i in range(len(A_w_dmclimate_dy_stats_smooth[0])): if A_w_dmclimate_dy_stats_smooth[0][i]<0.0: A_w_dmclimate_dy_stats_smooth[0][i]=0.0 wue_w_dy_stats_smooth+=[gs_smooth(wue_w_dy_stats[iii],gs0_wdy,gsf)] nue_w_dy_stats_smooth+=[gs_smooth(nue_w_dy_stats[iii],gs0_wdy,gsf)] A_w_dy_stats_smooth+=[gs_smooth(A_w_dy_stats[iii],gs0_wdy,gsf)] if iii==0: for i in range(len(wue_w_dy_stats_smooth[0])): if wue_w_dy_stats_smooth[0][i]<0.0: wue_w_dy_stats_smooth[0][i]=0.0 for i in range(len(nue_w_dy_stats_smooth[0])): if nue_w_dy_stats_smooth[0][i]<0.0: nue_w_dy_stats_smooth[0][i]=0.0 for i in range(len(A_w_dy_stats_smooth[0])): if A_w_dy_stats_smooth[0][i]<0.0: A_w_dy_stats_smooth[0][i]=0.0 wue_w_wmclimate_dy_stats_smooth+=[gs_smooth(wue_w_wmclimate_dy_stats[iii],gs0_wdy,gsf)] nue_w_wmclimate_dy_stats_smooth+=[gs_smooth(nue_w_wmclimate_dy_stats[iii],gs0_wdy,gsf)] A_w_wmclimate_dy_stats_smooth+=[gs_smooth(A_w_wmclimate_dy_stats[iii],gs0_wdy,gsf)] wue_w_dmclimate_temps_stats_smooth+=[gs_smooth(wue_w_dmclimate_temps_stats[iii],gs0_d,gsf)] nue_w_dmclimate_temps_stats_smooth+=[gs_smooth(nue_w_dmclimate_temps_stats[iii],gs0_d,gsf)] A_w_dmclimate_temps_stats_smooth+=[gs_smooth(A_w_dmclimate_temps_stats[iii],gs0_d,gsf)] wue_w_wmclimate_temps_stats_smooth+=[gs_smooth(wue_w_wmclimate_temps_stats[iii],gs0_w,gsf)] nue_w_wmclimate_temps_stats_smooth+=[gs_smooth(nue_w_wmclimate_temps_stats[iii],gs0_w,gsf)] A_w_wmclimate_temps_stats_smooth+=[gs_smooth(A_w_wmclimate_temps_stats[iii],gs0_w,gsf)] nue_d_inter_vartemp_stats_smooth+=[gs_smooth(nue_d_inter_vartemp_stats[iii],0,364)] wue_d_inter_vartemp_stats_smooth+=[gs_smooth(wue_d_inter_vartemp_stats[iii],0,364)] A_d_inter_vartemp_stats_smooth+=[gs_smooth(A_d_inter_vartemp_stats[iii],0,364)] nue_w_inter_vartemp_stats_smooth+=[gs_smooth(nue_w_inter_vartemp_stats[iii],0,364)] wue_w_inter_vartemp_stats_smooth+=[gs_smooth(wue_w_inter_vartemp_stats[iii],0,364)] A_w_inter_vartemp_stats_smooth+=[gs_smooth(A_w_inter_vartemp_stats[iii],0,364)] nue_d_inter_varvwc_stats_smooth+=[gs_smooth(nue_d_inter_varvwc_stats[iii],0,364)] wue_d_inter_varvwc_stats_smooth+=[gs_smooth(wue_d_inter_varvwc_stats[iii],0,364)] A_d_inter_varvwc_stats_smooth+=[gs_smooth(A_d_inter_varvwc_stats[iii],0,364)] nue_w_inter_varvwc_stats_smooth+=[gs_smooth(nue_w_inter_varvwc_stats[iii],0,364)] wue_w_inter_varvwc_stats_smooth+=[gs_smooth(wue_w_inter_varvwc_stats[iii],0,364)] A_w_inter_varvwc_stats_smooth+=[gs_smooth(A_w_inter_varvwc_stats[iii],0,364)] nue_d_inter_varna_stats_smooth+=[gs_smooth(nue_d_inter_varna_stats[iii],gs0_m,gsf)] wue_d_inter_varna_stats_smooth+=[gs_smooth(wue_d_inter_varna_stats[iii],gs0_m,gsf)] A_d_inter_varna_stats_smooth+=[gs_smooth(A_d_inter_varna_stats[iii],gs0_m,gsf)] nue_w_inter_varna_stats_smooth+=[gs_smooth(nue_w_inter_varna_stats[iii],gs0_m,gsf)] wue_w_inter_varna_stats_smooth+=[gs_smooth(wue_w_inter_varna_stats[iii],gs0_m,gsf)] A_w_inter_varna_stats_smooth+=[gs_smooth(A_w_inter_varna_stats[iii],gs0_m,gsf)] wue_w_inter_mmclimate_stats_smooth+=[gs_smooth(wue_w_inter_mmclimate_stats[iii],gs0_m,gsf)] nue_w_inter_mmclimate_stats_smooth+=[gs_smooth(nue_w_inter_mmclimate_stats[iii],gs0_m,gsf)] A_w_inter_mmclimate_stats_smooth+=[gs_smooth(A_w_inter_mmclimate_stats[iii],gs0_m,gsf)] wue_d_inter_mmclimate_stats_smooth+=[gs_smooth(wue_d_inter_mmclimate_stats[iii],gs0_m,gsf)] nue_d_inter_mmclimate_stats_smooth+=[gs_smooth(nue_d_inter_mmclimate_stats[iii],gs0_m,gsf)] A_d_inter_mmclimate_stats_smooth+=[gs_smooth(A_d_inter_mmclimate_stats[iii],gs0_m,gsf)] wue_w_inter_dmclimate_stats_smooth+=[gs_smooth(wue_w_inter_dmclimate_stats[iii],gs0_d,gsf)] nue_w_inter_dmclimate_stats_smooth+=[gs_smooth(nue_w_inter_dmclimate_stats[iii],gs0_d,gsf)] A_w_inter_dmclimate_stats_smooth+=[gs_smooth(A_w_inter_dmclimate_stats[iii],gs0_d,gsf)] wue_d_inter_dmclimate_stats_smooth+=[gs_smooth(wue_d_inter_dmclimate_stats[iii],gs0_d,gsf)] nue_d_inter_dmclimate_stats_smooth+=[gs_smooth(nue_d_inter_dmclimate_stats[iii],gs0_d,gsf)] A_d_inter_dmclimate_stats_smooth+=[gs_smooth(A_d_inter_dmclimate_stats[iii],gs0_d,gsf)] wue_w_inter_wmclimate_stats_smooth+=[gs_smooth(wue_w_inter_wmclimate_stats[iii],gs0_w,gsf)] nue_w_inter_wmclimate_stats_smooth+=[gs_smooth(nue_w_inter_wmclimate_stats[iii],gs0_w,gsf)] A_w_inter_wmclimate_stats_smooth+=[gs_smooth(A_w_inter_wmclimate_stats[iii],gs0_w,gsf)] wue_d_inter_wmclimate_stats_smooth+=[gs_smooth(wue_d_inter_wmclimate_stats[iii],gs0_w,gsf)] nue_d_inter_wmclimate_stats_smooth+=[gs_smooth(nue_d_inter_wmclimate_stats[iii],gs0_w,gsf)] A_d_inter_wmclimate_stats_smooth+=[gs_smooth(A_d_inter_wmclimate_stats[iii],gs0_w,gsf)] ###------------------------------------------------------###### #dry and wet meadow nue (high temps) #extended growing season scenario # # #dry meadow climate # ax1.plot(days, nue_d_dmclimate_dy_stats_smooth[0], 'r-',linewidth=3) ## ax3.plot(days, A_d_inter_dy_stats_smooth[2], 'r-',linewidth=3) ## ax3.fill_between(days, A_d_inter_stats_smooth[1], A_d_inter_stats_smooth[2],alpha=0.0,color='red') ## ax3.fill_between(days, A_d_inter_dy_stats_smooth[1], A_d_inter_dy_stats_smooth[2],alpha=0.3,color='red') # ax1.plot(days, nue_w_dmclimate_dy_stats_smooth[0], 'b-',linewidth=3) ## ax3.plot(days, A_w_inter_dy_stats_smooth[2], 'b-',linewidth=3) # ax1.plot(days, nue_d_inter_dmclimate_stats_smooth[0], 'r--',linewidth=3) ## ax3.plot(days, A_d_inter_stats_smooth[2], 'r--',linewidth=3) # ax1.plot(days, nue_w_inter_dmclimate_stats_smooth[0], 'b--',linewidth=3) ## ax3.plot(days, A_w_inter_stats_smooth[2], 'b--',linewidth=3) ## ax3.fill_between(days, A_w_inter_stats_smooth[1], A_w_inter_stats_smooth[2],alpha=0.0,color='blue') ## ax3.fill_between(days, A_w_inter_dy_stats_smooth[1], A_w_inter_dy_stats_smooth[2],alpha=0.3,color='blue') ## ax1.legend(loc="upper left",fontsize=20) # ax1.annotate('B', xy=get_axis_limits(ax1),fontsize=40) # ##Wet meadow climate # ax31.plot(days, nue_d_wmclimate_dy_stats_smooth[0], 'r-',linewidth=3) ## ax3.plot(days, A_d_inter_dy_stats_smooth[2], 'r-',linewidth=3) ## ax3.fill_between(days, A_d_inter_stats_smooth[1], A_d_inter_stats_smooth[2],alpha=0.0,color='red') ## ax3.fill_between(days, A_d_inter_dy_stats_smooth[1], A_d_inter_dy_stats_smooth[2],alpha=0.3,color='red') # ax31.plot(days, nue_w_wmclimate_dy_stats_smooth[0], 'b-',linewidth=3) ## ax3.plot(days, A_w_inter_dy_stats_smooth[2], 'b-',linewidth=3) # ax31.plot(days, nue_d_inter_wmclimate_stats_smooth[0], 'r--',linewidth=3) ## ax3.plot(days, A_d_inter_stats_smooth[2], 'r--',linewidth=3) # ax31.plot(days, nue_w_inter_wmclimate_stats_smooth[0], 'b--',linewidth=3) ## ax3.plot(days, A_w_inter_stats_smooth[2], 'b--',linewidth=3) ## ax3.fill_between(days, A_w_inter_stats_smooth[1], A_w_inter_stats_smooth[2],alpha=0.0,color='blue') ## ax3.fill_between(days, A_w_inter_dy_stats_smooth[1], A_w_inter_dy_stats_smooth[2],alpha=0.3,color='blue') ## ax31.legend(loc="upper left",fontsize=20) # ax31.annotate('B', xy=get_axis_limits(ax31),fontsize=40) # # # #longer growing season # #dry meadow climate # ax22.plot(days, nue_d_dmclimate_gsl_stats_smooth[0], 'r-',linewidth=3) ## ax24.plot(days, A_d_inter_dy_gsl_stats_smooth[2], 'r-',linewidth=3) ## ax24.fill_between(days, A_d_inter_stats_smooth[1], A_d_inter_stats_smooth[2],alpha=0.0,color='red') ## ax24.fill_between(days, A_d_inter_dy_gsl_stats_smooth[1], A_d_inter_dy_gsl_stats_smooth[2],alpha=0.3,color='red') # ax22.plot(days, nue_w_dmclimate_gsl_stats_smooth[0], 'b-',linewidth=3) ## ax24.plot(days, A_w_inter_dy_gsl_stats_smooth[2], 'b-',linewidth=3) # ax22.plot(days, nue_d_inter_dmclimate_stats_smooth[0], 'r--',linewidth=3) ## ax24.plot(days, A_d_inter_stats_smooth[2], 'r--',linewidth=3) # ax22.plot(days, nue_w_inter_dmclimate_stats_smooth[0], 'b--',linewidth=3) ## ax24.plot(days, A_w_inter_stats_smooth[2], 'b--',linewidth=3) ## ax24.fill_between(days, A_w_inter_stats_smooth[1], A_w_inter_stats_smooth[2],alpha=0.0,color='blue') ## ax24.fill_between(days, A_w_inter_dy_gsl_stats_smooth[1], A_w_inter_dy_gsl_stats_smooth[2],alpha=0.3,color='blue') ## ax22.legend(loc="upper left",fontsize=20) # ax22.annotate('B', xy=get_axis_limits(ax22),fontsize=40) # ##wet meadow climate # ax30.plot(days, nue_d_wmclimate_gsl_stats_smooth[0], 'r-',linewidth=3) ## ax24.plot(days, A_d_inter_dy_gsl_stats_smooth[2], 'r-',linewidth=3) ## ax24.fill_between(days, A_d_inter_stats_smooth[1], A_d_inter_stats_smooth[2],alpha=0.0,color='red') ## ax24.fill_between(days, A_d_inter_dy_gsl_stats_smooth[1], A_d_inter_dy_gsl_stats_smooth[2],alpha=0.3,color='red') # ax30.plot(days, nue_w_wmclimate_gsl_stats_smooth[0], 'b-',linewidth=3) ## ax24.plot(days, A_w_inter_dy_gsl_stats_smooth[2], 'b-',linewidth=3) # ax30.plot(days, nue_d_inter_wmclimate_stats_smooth[0], 'r--',linewidth=3) ## ax24.plot(days, A_d_inter_stats_smooth[2], 'r--',linewidth=3) # ax30.plot(days, nue_w_inter_wmclimate_stats_smooth[0], 'b--',linewidth=3) ## ax24.plot(days, A_w_inter_stats_smooth[2], 'b--',linewidth=3) ## ax24.fill_between(days, A_w_inter_stats_smooth[1], A_w_inter_stats_smooth[2],alpha=0.0,color='blue') ## ax24.fill_between(days, A_w_inter_dy_gsl_stats_smooth[1], A_w_inter_dy_gsl_stats_smooth[2],alpha=0.3,color='blue') ## ax30.legend(loc="upper left",fontsize=20) # ax30.annotate('B', xy=get_axis_limits(ax30),fontsize=40) # # #hotter temperatures # #dry meadow climate # ax2.plot(days, nue_d_dmclimate_temps_stats_smooth[0], 'r-',linewidth=3) ## ax24.plot(days, A_d_inter_dy_gsl_stats_smooth[2], 'r-',linewidth=3) ## ax24.fill_between(days, A_d_inter_stats_smooth[1],
<gh_stars>0 from math import pi, sin import typing as t from pyglet import gl from pyglet.graphics.shader import ShaderProgram from pyglet.image import AbstractImage, TextureArrayRegion from pyglet.math import Vec2 import pyday_night_funkin.constants as CNST from pyday_night_funkin.core.context import Context from pyday_night_funkin.core.graphics import PNFGroup import pyday_night_funkin.core.graphics.states as s from pyday_night_funkin.core.pnf_animation import AnimationController, PNFAnimation from pyday_night_funkin.core.scene_object import SceneObject from pyday_night_funkin.core.shaders import ShaderContainer from pyday_night_funkin.core.tweens import TWEEN_ATTR from pyday_night_funkin.utils import clamp if t.TYPE_CHECKING: from pyglet.graphics.shader import UniformBufferObject from pyday_night_funkin.types import Numeric EffectBound = t.TypeVar("EffectBound", bound="Effect") _PNF_SPRITE_VERTEX_SHADER_SOURCE = """ #version 450 in vec2 anim_offset; in vec2 frame_offset; in vec2 translate; in vec4 colors; in vec3 tex_coords; in vec2 scale; in vec2 position; in vec2 scroll_factor; in float rotation; out vec4 vertex_colors; out vec3 texture_coords; uniform WindowBlock {{ mat4 projection; mat4 view; }} window; layout (std140) uniform CameraAttrs {{ float zoom; vec2 position; vec2 GAME_DIMENSIONS; }} camera; mat4 m_trans_scale = mat4(1.0); mat4 m_rotation = mat4(1.0); mat4 m_camera_trans_scale = mat4(1.0); mat4 m_camera_pre_trans = mat4(1.0); void main() {{ m_trans_scale[3][0] = translate.x + anim_offset.x + frame_offset.x * scale.x; m_trans_scale[3][1] = translate.y + anim_offset.y + frame_offset.y * scale.y; m_trans_scale[0][0] = scale.x; m_trans_scale[1][1] = scale.y; m_rotation[0][0] = cos(-radians(rotation)); m_rotation[0][1] = sin(-radians(rotation)); m_rotation[1][0] = -sin(-radians(rotation)); m_rotation[1][1] = cos(-radians(rotation)); // Camera transform and zoom scale m_camera_trans_scale[3][0] = (camera.zoom * scroll_factor.x * -camera.position.x) + \\ (camera.GAME_DIMENSIONS.x / 2); m_camera_trans_scale[3][1] = (camera.zoom * scroll_factor.y * -camera.position.y) + \\ (camera.GAME_DIMENSIONS.y / 2); m_camera_trans_scale[0][0] = camera.zoom; m_camera_trans_scale[1][1] = camera.zoom; // Camera pre-scale-transform m_camera_pre_trans[3][0] = -camera.GAME_DIMENSIONS.x / 2; m_camera_pre_trans[3][1] = -camera.GAME_DIMENSIONS.y / 2; gl_Position = window.projection * window.view * m_camera_trans_scale * m_camera_pre_trans * m_trans_scale * m_rotation * vec4(position, 0, 1) ; vertex_colors = colors; texture_coords = tex_coords; }} """ _PNF_SPRITE_FRAGMENT_SHADER_SOURCE = """ #version 450 in vec4 vertex_colors; in vec3 texture_coords; out vec4 final_colors; uniform sampler2D sprite_texture; void main() {{ if (vertex_colors.a < {alpha_limit}) {{ discard; }} final_colors = {color_behavior}; }} """ class PNFSpriteVertexShader(): src = _PNF_SPRITE_VERTEX_SHADER_SOURCE @classmethod def generate(cls) -> str: return cls.src.format() class PNFSpriteFragmentShader(): src = _PNF_SPRITE_FRAGMENT_SHADER_SOURCE class COLOR: BLEND = "texture(sprite_texture, texture_coords.xy) * vertex_colors" SET = "vec4(vertex_colors.rgb, texture(sprite_texture, texture_coords.xy).a)" @classmethod def generate( cls, alpha_limit: float = 0.01, color_behavior: str = COLOR.BLEND, ) -> str: return cls.src.format( alpha_limit=alpha_limit, color_behavior=color_behavior, ) class Movement(): __slots__ = ("velocity", "acceleration") def __init__(self, velocity: Vec2, acceleration: Vec2) -> None: self.velocity = velocity self.acceleration = acceleration # Dumbed down case of code shamelessly stolen from https://github.com/HaxeFlixel/ # flixel/blob/e3c3b30f2f4dfb0486c4b8308d13f5a816d6e5ec/flixel/FlxObject.hx#L738 def update(self, dt: float) -> Vec2: acc_x, acc_y = self.acceleration vel_x, vel_y = self.velocity vel_delta = 0.5 * acc_x * dt vel_x += vel_delta posx_delta = vel_x * dt vel_x += vel_delta vel_delta = 0.5 * acc_y * dt vel_y += vel_delta posy_delta = vel_y * dt vel_y += vel_delta self.velocity = Vec2(vel_x, vel_y) return Vec2(posx_delta, posy_delta) class Effect(): """ "Abstract" effect class intertwined with the PNFSprite. """ def __init__( self, duration: float, on_complete: t.Optional[t.Callable[[], t.Any]] = None, ) -> None: if duration <= 0.0: raise ValueError("Duration may not be negative or zero!") self.on_complete = on_complete self.duration = duration self.cur_time = 0.0 def update(self, dt: float, sprite: "PNFSprite") -> None: raise NotImplementedError("Subclass this") def is_finished(self) -> bool: return self.cur_time >= self.duration class _Tween(Effect): def __init__( self, tween_func: t.Callable, attr_map: t.Dict[str, t.Tuple[t.Any, t.Any]], duration: float, on_complete: t.Optional[t.Callable[[], t.Any]] = None, ) -> None: super().__init__(duration, on_complete) self.tween_func = tween_func self.attr_map = attr_map def update(self, dt: float, sprite: "PNFSprite") -> None: self.cur_time += dt progress = self.tween_func(clamp(self.cur_time, 0, self.duration) / self.duration) for attr_name, (v_ini, v_diff) in self.attr_map.items(): setattr(sprite, attr_name, v_ini + v_diff*progress) # NOTE: Left here since i would need to replace call sites with some # ugly lambda s: setattr(s, "visibility", True) stuff; not really # worth it, see into it if you have time. class Flicker(Effect): """ Effect rapidly turning a sprite's visibility off and on. This is a special case of the more generic `Toggle` effect affecting only a sprite's visibility. """ def __init__( self, interval: float, start_visibility: bool, end_visibility: bool, duration: float, on_complete: t.Optional[t.Callable[[], t.Any]] = None, ) -> None: super().__init__(duration, on_complete) if interval <= 0.0: raise ValueError("Interval may not be negative or zero!") self.interval = interval self.end_visibility = end_visibility self._next_toggle = interval self._visible = start_visibility def update(self, dt: float, sprite: "PNFSprite") -> None: self.cur_time += dt if self.is_finished(): sprite.visible = self.end_visibility return if self.cur_time >= self._next_toggle: while self.cur_time >= self._next_toggle: self._next_toggle += self.interval self._visible = not self._visible sprite.visible = self._visible class Toggle(Effect): """ Periodically calls on/off callbacks on a sprite for a given duration. """ def __init__( self, interval: float, start_active: bool, end_active: bool, duration: float, on_toggle_on: t.Optional[t.Callable[["PNFSprite"], t.Any]] = None, on_toggle_off: t.Optional[t.Callable[["PNFSprite"], t.Any]] = None, on_complete: t.Optional[t.Callable[[], t.Any]] = None, ) -> None: super().__init__(duration, on_complete) if interval <= 0.0: raise ValueError("Interval may not be negative or zero!") self._cur_state = start_active self._invert = -1 if not start_active else 1 self.interval = pi/interval self.end_active = end_active self.on_toggle_on = on_toggle_on self.on_toggle_off = on_toggle_off def update(self, dt: float, sprite: "PNFSprite") -> None: self.cur_time += dt new_state = (sin(self.cur_time * self.interval) * self._invert) > 0 if self._cur_state == new_state: return self._cur_state = new_state if new_state: if self.on_toggle_on is not None: self.on_toggle_on(sprite) else: if self.on_toggle_off is not None: self.on_toggle_off(sprite) class PNFSprite(SceneObject): """ Pretty much *the* core scene object, the sprite! It can show images or animations, do all sort of transforms, have a shader as well as a camera on it and comes with effect support. """ _TWEEN_ATTR_NAME_MAP = { TWEEN_ATTR.X: "x", TWEEN_ATTR.Y: "y", TWEEN_ATTR.ROTATION: "rotation", TWEEN_ATTR.OPACITY: "opacity", TWEEN_ATTR.SCALE: "scale", TWEEN_ATTR.SCALE_X: "scale_x", TWEEN_ATTR.SCALE_Y: "scale_y", } shader_container = ShaderContainer( PNFSpriteVertexShader.generate(), PNFSpriteFragmentShader.generate(), ) def __init__( self, image: t.Optional[AbstractImage] = None, x: "Numeric" = 0, y: "Numeric" = 0, blend_src = gl.GL_SRC_ALPHA, blend_dest = gl.GL_ONE_MINUS_SRC_ALPHA, context: Context = None, usage: t.Literal["dynamic", "stream", "static"] = "dynamic", subpixel: bool = False, program: "ShaderProgram" = None, ) -> None: image = CNST.ERROR_TEXTURE if image is None else image self.animation = AnimationController() # NOTE: Copypaste of this exists at PNFSpriteContainer.__init__, # modify it when modifying this! self.movement: t.Optional[Movement] = None self.effects: t.List["EffectBound"] = [] self._x = x self._y = y self._batch = None self._vertex_list = None self._rotation = 0 self._opacity = 255 self._rgb = (255, 255, 255) self._scale = 1.0 self._scale_x = 1.0 self._scale_y = 1.0 self._scroll_factor = (1.0, 1.0) self._visible = True self._texture = eval("image.get_texture()") # stfu pylance if isinstance(image, TextureArrayRegion): raise NotImplementedError("Hey VSauce, Michael here. What is a TextureArrayRegion?") # program = sprite.get_default_array_shader() else: program = self.shader_container.get_program() self._usage = usage self._subpixel = subpixel self._blend_src = blend_src self._blend_dest = blend_dest self._context = Context( None if context is None else context.batch, None, None if context is None else context.camera, ) self._context.group = PNFGroup( parent = None if context is None else context.group, states = self._build_mutators(program, self._context.camera.ubo) ) # NOTE: Ugly, maybe come up with something better. # Group needs to be set afterwards as dummy cam ubo is needed in build_mutators. self._create_vertex_list() self.image = image def _build_mutators( self, program: "ShaderProgram", cam_ubo: "UniformBufferObject", ): return ( s.ProgramStateMutator(program), s.UBOBindingStateMutator(cam_ubo), s.TextureUnitStateMutator(gl.GL_TEXTURE0), s.TextureStateMutator(self._texture), s.EnableStateMutator(gl.GL_BLEND), s.BlendFuncStateMutator(self._blend_src, self._blend_dest), ) def _create_vertex_list(self): # 0- - - - -3 # |\D> ^ # A \ E # v <C\| # 1----B>---2 usage = self._usage self._vertex_list = self._context.batch.add_indexed( 4, gl.GL_TRIANGLES, self._context.group, [0, 1, 2, 0, 2, 3], "position2f/" + usage, ("anim_offset2f/" + usage, (0, 0) * 4), ("frame_offset2f/" + usage, (0, 0) * 4), ("colors4Bn/" + usage, (*self._rgb, int(self._opacity)) * 4), ("translate2f/" + usage, (self._x, self._y) * 4), ("scale2f/" + usage, (self._scale * self._scale_x, self._scale * self._scale_y) * 4), ("rotation1f/" + usage, (self._rotation,) * 4), ("scroll_factor2f/" + usage, self._scroll_factor * 4), ("tex_coords3f/" + usage, self._texture.tex_coords), ) self._update_position() def set_context(self, parent_context: "Context") -> None: """ This function actually doesn't set a context, it just modifies the existing one and takes all necessary steps for the sprite to be displayed in the new context. """ new_batch = parent_context.batch new_group = parent_context.group new_cam = parent_context.camera old_batch = self._context.batch old_group = self._context.group old_cam = self._context.camera change_batch = new_batch != old_batch rebuild_group = new_cam != old_cam or new_group != old_group.parent if change_batch: self._context.batch = new_batch # if new_batch is not None and old_batch is not None: # self._context.batch = new_batch # else: # # TBH I forgot what these None checks were about. # # If anything is None in here, it will just crash horribly, # # but that doesn't happen when running soooo good enough! # self._vertex_list.delete() # self._context.batch = new_batch # self._create_vertex_list() if rebuild_group: self._context.camera = new_cam self._context.group = PNFGroup( parent = new_group, states = self._build_mutators(old_group.program, new_cam.ubo), ) if change_batch or rebuild_group: old_batch.migrate(self._vertex_list, self._context.group, self._context.batch) def screen_center(self, screen_dims: Vec2, x: bool = True, y: bool = True) -> None: """ Sets the sprite's world position so that it is centered on screen. (Ignoring camera and scroll factors) `x` and `y` can be set to false to only center the sprite along one of the axes. """ if x: self.x = (screen_dims[0] - self.width) // 2 if y: self.y = (screen_dims[1] - self.height) // 2 def get_midpoint(self) -> Vec2: """ Returns the middle point of this sprite, based on its current texture and world position. """ return Vec2( self._x + self.signed_width * 0.5, self._y + self.signed_height * 0.5, ) def get_screen_position(self) -> Vec2: """ Returns the screen position the sprite's origin is displayed at. Note that this may still be inaccurate for shaders and rotation. """ cam = self._context.camera r = Vec2(self._x, self._y) return r - Vec2(cam.x * cam.zoom, cam.y * cam.zoom) def start_tween( self, tween_func: t.Callable[[float], float], attributes: t.Dict[TWEEN_ATTR, t.Any], duration: float, on_complete: t.Callable[[], t.Any] = None, ) -> _Tween: """ # TODO write some very cool doc """ # 0: initial value; 1: difference attr_map = {} for attribute, target_value in attributes.items(): attribute_name = self._TWEEN_ATTR_NAME_MAP[attribute] initial_value = getattr(self, attribute_name) attr_map[attribute_name] = (initial_value, target_value - initial_value) t = _Tween( tween_func, duration = duration, attr_map = attr_map, on_complete = on_complete, ) self.effects.append(t) return t def start_flicker( self, duration: float, interval: float, end_visibility: bool = True, on_complete: t.Callable[[], t.Any] = None, ) -> Flicker: f = Flicker( interval = interval, start_visibility = self.visible, end_visibility = end_visibility, duration = duration, on_complete = on_complete, ) self.effects.append(f) return f def start_toggle( self, duration: float, interval: float, start_status: bool = True, end_status: bool = True, on_toggle_on: t.Optional[t.Callable[["PNFSprite"], t.Any]] = None, on_toggle_off: t.Optional[t.Callable[["PNFSprite"], t.Any]] = None, on_complete: t.Optional[t.Callable[[], t.Any]] = None, ) -> Toggle: t = Toggle( interval, start_status, end_status, duration, on_toggle_on, on_toggle_off, on_complete ) self.effects.append(t) return t def remove_effect(self, *effects: Effect, fail_loudly: bool = False) -> None: """ Removes effects from the sprite. Supply nothing to clear all effects. This will abruptly stop all effects without calling their on_complete callbacks. Supply any amount of effects to have only these removed. If `fail_loudly` is not set to `True`, any errors on removing will be suppressed, otherwise `ValueError` is raised. """ if not effects: self.effects.clear() return try: for
<gh_stars>10-100 # -*- coding: utf-8 -*- """ The module contains functions to evaluate the optical depth, to convert this to observed transmission and to convolve the observed spectrum with the instrumental profile. """ __author__ = '<NAME>' import numpy as np from scipy.signal import fftconvolve, gaussian from numba import jit import re # Regular Expression to match redshift parameter names: # ex: z0_FeII, z0_H2J0, z3_HI, z15_TiII z_matcher = re.compile('z[0-9]+_[A-Z][A-Z]?[0-9]?[a-z]?[I-Z]+[0-9]?[a-z]?') # ==== VOIGT PROFILE =============== def H(a, x): """Voigt Profile Approximation from <NAME> (2006, 2007).""" P = x**2 H0 = np.exp(-x**2) Q = 1.5/x**2 return H0 - a/np.sqrt(np.pi)/P * (H0*H0*(4.*P*P + 7.*P + 4. + Q) - Q - 1) def Voigt(wl, l0, f, N, b, gam, z=0): """ Calculate the optical depth Voigt profile. Parameters ---------- wl : array_like, shape (N) Wavelength grid in Angstroms at which to evaluate the optical depth. l0 : float Rest frame transition wavelength in Angstroms. f : float Oscillator strength. N : float Column density in units of cm^-2. b : float Velocity width of the Voigt profile in cm/s. gam : float Radiation damping constant, or Einstein constant (A_ul) z : float The redshift of the observed wavelength grid `l`. Returns ------- tau : array_like, shape (N) Optical depth array evaluated at the input grid wavelengths `l`. """ # ==== PARAMETERS ================== c = 2.99792e10 # cm/s m_e = 9.1094e-28 # g e = 4.8032e-10 # cgs units # ================================== # Calculate Profile C_a = np.sqrt(np.pi)*e**2*f*l0*1.e-8/m_e/c/b a = l0*1.e-8*gam/(4.*np.pi*b) # dl_D = b/c*l0 wl = wl/(z+1.) # x = (wl - l0)/dl_D + 0.000001 x = (c / b) * (1. - l0/wl) tau = np.float64(C_a) * N * H(a, x) return tau @jit(nopython=True) def convolve_numba(P, kernel): """ Define convolution function for a wavelength dependent kernel. Parameters ---------- P : array_like, shape (N) Intrinsic line profile. kernel : np.array, shape (N, M) Each row of the `kernel` corresponds to the wavelength dependent line-spread function (LSF) evaluated at each pixel of the input profile `P`. Each LSF must be normalized! Returns ------- P_con : np.array, shape (N) Resulting profile after performing convolution with `kernel`. Notes ----- This function is decorated by the `jit` decorator from `numba`_ in order to speed up the calculation. .. _numba: http://numba.pydata.org/ """ N = kernel.shape[1]//2 pad = np.ones(N) P_pad = np.concatenate((pad, P, pad)) P_con = np.zeros_like(P) for i, lsf_i in enumerate(kernel): P_con[i] = np.sum(P_pad[i:i+2*N+1] * lsf_i) return P_con def evaluate_continuum(x, pars, reg_num): """ Evaluate the continuum model using Chebyshev polynomials. All regions are fitted with the same order of polynomials. Parameters ---------- x : array_like, shape (N) Input wavelength grid in Ångstrøm. pars : dict(lmfit.Parameters_) An instance of lmfit.Parameters_ containing the Chebyshev coefficients for each region. reg_num : int The region number, i.e., the index of the region in the list :attr:`VoigtFit.DataSet.regions`. Returns ------- cont_model : array_like, shape (N) The continuum Chebyshev polynomial evaluated at the input wavelengths `x`. """ cheb_parnames = list() p_cont = list() # Find Chebyshev parameters for this region: # They are named like 'R0_cheb_p0, R0_cheb_p1, R1_cheb_p0, etc...' for parname in pars.keys(): if 'R%i_cheb' % reg_num in parname: cheb_parnames.append(parname) # This should be calculated at the point of generating # the parameters, since this is a fixed data structure # Sort the names, to arange the coefficients right: cheb_parnames = sorted(cheb_parnames, key=lambda x: int(x.split('_')[-1].replace('p', ''))) for parname in cheb_parnames: p_cont.append(pars[parname].value) # Calculate Chebyshev polynomium in x-range: cont_model = np.polynomial.Chebyshev(p_cont, domain=(x.min(), x.max())) return cont_model(x) def evaluate_profile(x, pars, lines, kernel, z_sys=None, sampling=3, kernel_nsub=1): """ Evaluate the observed Voigt profile. The calculated optical depth, `tau`, is converted to observed transmission, `f`: .. math:: f = e^{-\\tau} The observed transmission is subsequently convolved with the instrumental broadening profile assumed to be Gaussian with a full-width at half maximum of res. The resolving power is assumed to be constant in velocity space. Parameters ---------- x : array_like, shape (N) Wavelength array in Ångstrøm on which to evaluate the profile. pars : dict(lmfit.Parameters_) An instance of lmfit.Parameters_ containing the line parameters. lines : list(:class:`Line <dataset.Line>`) List of lines to evaluate. Should be a list of :class:`Line <dataset.Line>` objects. kernel : np.array, shape (N, M) or float The convolution kernel for each wavelength pixel. If an array is given, each row of the array must specify the line-spread function (LSF) at the given wavelength pixel. The LSF must be normalized! If a float is given, the resolution FWHM is given in km/s (c/R). In this case the spectral resolution is assumed to be constant in velocity space. z_sys : float or None The systemic redshift, used to determine an effective wavelength range within which to evaluate the profile. This is handy when fitting very large regions, such as HI and metal lines together. sampling : integer [default = 3] The subsampling factor used for defining the input logarithmically space wavelength grid. The number of pixels in the evaluation will be sampling * N, where N is the number of input pixels. The final profile will be interpolated back onto the original wavelength grid defined by `x`. kernel_nsub : integer Kernel subsampling factor relative to the data. This is only used if the resolution is given as a LSF file. Returns ------- profile_obs : array_like, shape (N) Observed line profile convolved with the instrument profile. """ if isinstance(kernel, float): # Create logarithmically binned grid: dx = np.mean(np.diff(x)) xmin = np.log10(x.min() - 50*dx) xmax = np.log10(x.max() + 50*dx) N = int(sampling * len(x)) profile_wl = np.logspace(xmin, xmax, N) # Calculate actual pixel size in km/s: pxs = np.diff(profile_wl)[0] / profile_wl[0] * 299792.458 # Set Gaussian kernel width: kernel = kernel / pxs / 2.35482 elif isinstance(kernel, np.ndarray): N = int(kernel_nsub * len(x)) assert kernel.shape[0] == N # evaluate on the input grid subsampled by `nsub`: if kernel_nsub > 1: profile_wl = np.linspace(x.min(), x.max(), N) else: profile_wl = x.copy() else: err_msg = "Invalid type of `kernel`: %r" % type(kernel) raise TypeError(err_msg) tau = evaluate_optical_depth(profile_wl, pars, lines, z_sys=z_sys) profile = np.exp(-tau) if isinstance(kernel, float): LSF = gaussian(10*int(kernel) + 1, kernel) LSF = LSF/LSF.sum() profile_broad = fftconvolve(profile, LSF, 'same') # Interpolate onto the data grid: profile_obs = np.interp(x, profile_wl, profile_broad) else: profile_broad = convolve_numba(profile, kernel) if kernel_nsub > 1: # Interpolate onto the data grid: profile_obs = np.interp(x, profile_wl, profile_broad) else: profile_obs = profile_broad return profile_obs def evaluate_optical_depth(profile_wl, pars, lines, z_sys=None): """ Evaluate optical depth based on the component parameters in `pars`. Parameters ---------- profile_wl : array_like, shape (N) Wavelength array in Ångstrøm on which to evaluate the profile. pars : dict(lmfit.Parameters_) An instance of lmfit.Parameters_ containing the line parameters. lines : list(:class:`Line <dataset.Line>`) List of lines to evaluate. Should be a list of :class:`Line <dataset.Line>` objects. z_sys : float or None The systemic redshift, used to determine an effective wavelength range within which to evaluate the profile. This is handy when fitting very large regions, such as HI and metal lines together. Returns ------- tau : array_like, shape (N) The resulting optical depth of all `lines` in the wavelength region. """ tau = np.zeros_like(profile_wl) if z_sys is not None: # Determine range in which to evaluate the profile: max_logN = max([val.value for key, val in pars.items() if 'logN' in key]) if max_logN > 19.0: velspan = 20000.*(1. + 1.0*(max_logN-19.)) else: velspan = 20000. # Determine number of components for each ion: components_per_ion = {} for line in lines: if line.active: l0, f, gam = line.get_properties() ion = line.ion z_pars = [] for parname in pars.keys(): parts = parname.split('_') if len(parts) == 2: pid, p_ion = parts if 'z' in pid and p_ion == ion: z_pars.append(parname) components_per_ion[ion] = len(z_pars) for line in lines: if line.active: l0, f, gam = line.get_properties() ion = line.ion n_comp = components_per_ion[ion] if z_sys is not None: l_center = l0*(z_sys + 1.) vel = (profile_wl - l_center)/l_center*299792.458 span = (vel >= -velspan)*(vel <= velspan) else: span = np.ones_like(profile_wl, dtype=bool) ion = ion.replace('*', 'x') for n in
<gh_stars>0 import copy import random import sys from operator import sub, add import gym import numpy as np import math import warnings from od_mstar3.col_set_addition import OutOfTimeError, NoSolutionError from od_mstar3 import od_mstar # from GroupLock import Lock from matplotlib.colors import * from gym.envs.classic_control import rendering import imageio from gym import spaces from parameters import * def make_gif(images, fname): gif = imageio.mimwrite(fname, images, subrectangles=True) print("(make_gif)wrote gif") return gif def opposite_actions(action, isDiagonal=False): if isDiagonal: checking_table = {0: -1, 1: 3, 2: 4, 3: 1, 4: 2} raise NotImplemented else: checking_table = {0: -1, 1: 3, 2: 4, 3: 1, 4: 2} return checking_table[action] def action2dir(action): checking_table = {0: (0, 0), 1: (0, 1), 2: (1, 0), 3: (0, -1), 4: (-1, 0)} return checking_table[action] def dir2action(direction): checking_table = {(0, 0): 0, (0, 1): 1, (1, 0): 2, (0, -1): 3, (-1, 0): 4} return checking_table[direction] def tuple_plus(a, b): """ a + b """ return tuple(map(add, a, b)) def tuple_minus(a, b): """ a - b """ return tuple(map(sub, a, b)) def _heap(ls, max_length): while True: if len(ls) > max_length: ls.pop(0) else: return ls def get_key(dict, value): return [k for k, v in dict.items() if v == value] def getAstarDistanceMap(map: np.array, start: tuple, goal: tuple, isDiagonal: bool = False): """ returns a numpy array of same dims as map with the distance to the goal from each coord :param map: a n by m np array, where -1 denotes obstacle :param start: start_position :param goal: goal_position :return: optimal distance map """ def lowestF(fScore, openSet): # find entry in openSet with lowest fScore assert (len(openSet) > 0) minF = 2 ** 31 - 1 minNode = None for (i, j) in openSet: if (i, j) not in fScore: continue if fScore[(i, j)] < minF: minF = fScore[(i, j)] minNode = (i, j) return minNode def getNeighbors(node): # return set of neighbors to the given node n_moves = 9 if isDiagonal else 5 neighbors = set() for move in range(1, n_moves): # we dont want to include 0 or it will include itself direction = action2dir(move) dx = direction[0] dy = direction[1] ax = node[0] ay = node[1] if (ax + dx >= map.shape[0] or ax + dx < 0 or ay + dy >= map.shape[1] or ay + dy < 0): # out of bounds continue if map[ax + dx, ay + dy] == -1: # collide with static obstacle continue neighbors.add((ax + dx, ay + dy)) return neighbors # NOTE THAT WE REVERSE THE DIRECTION OF SEARCH SO THAT THE GSCORE WILL BE DISTANCE TO GOAL start, goal = goal, start start, goal = tuple(start), tuple(goal) # The set of nodes already evaluated closedSet = set() # The set of currently discovered nodes that are not evaluated yet. # Initially, only the start node is known. openSet = set() openSet.add(start) # For each node, which node it can most efficiently be reached from. # If a node can be reached from many nodes, cameFrom will eventually contain the # most efficient previous step. cameFrom = dict() # For each node, the cost of getting from the start node to that node. gScore = dict() # default value infinity # The cost of going from start to start is zero. gScore[start] = 0 # For each node, the total cost of getting from the start node to the goal # by passing by that node. That value is partly known, partly heuristic. fScore = dict() # default infinity # our heuristic is euclidean distance to goal heuristic_cost_estimate = lambda x, y: math.hypot(x[0] - y[0], x[1] - y[1]) # For the first node, that value is completely heuristic. fScore[start] = heuristic_cost_estimate(start, goal) while len(openSet) != 0: # current = the node in openSet having the lowest fScore value current = lowestF(fScore, openSet) openSet.remove(current) closedSet.add(current) for neighbor in getNeighbors(current): if neighbor in closedSet: continue # Ignore the neighbor which is already evaluated. if neighbor not in openSet: # Discover a new node openSet.add(neighbor) # The distance from start to a neighbor # in our case the distance between is always 1 tentative_gScore = gScore[current] + 1 if tentative_gScore >= gScore.get(neighbor, 2 ** 31 - 1): continue # This is not a better path. # This path is the best until now. Record it! cameFrom[neighbor] = current gScore[neighbor] = tentative_gScore fScore[neighbor] = gScore[neighbor] + heuristic_cost_estimate(neighbor, goal) # parse through the gScores Astar_map = map.copy() for (i, j) in gScore: Astar_map[i, j] = gScore[i, j] return Astar_map class Agent: """ The agent object that contains agent's position, direction dict and position dict, currently only supporting 4-connected region. self.distance_map is None here. Assign values in upper class. ########### WARNING: direction_history[i] means the action taking from i-1 step, resulting in the state of step i, such that len(direction_history) == len(position_history) ########### """ def __init__(self, isDiagonal=False): self._path_count = -1 self.IsDiagonal = isDiagonal self.freeze = 0 self.position, self.position_history, self.ID, self.direction, self.direction_history, \ self.action_history, self.goal_pos, self.distanceMap, self.dones, self.status, self.next_goal, self.next_distanceMap \ = None, [], None, None, [(None, None)], [(None, None)], None, None, 0, None, None, None def reset(self): self._path_count = -1 self.freeze = 0 self.position, self.position_history, self.ID, self.direction, self.direction_history, \ self.action_history, self.goal_pos, self.distanceMap, self.dones, self.status, self.next_goal, self.next_distanceMap \ = None, [], None, None, [(None, None)], [(None, None)], None, None, 0, None, None, None def move(self, pos, status=None): if pos is None: pos = self.position if self.position is not None: assert pos in [self.position, tuple_plus(self.position, (0, 1)), tuple_plus(self.position, (0, -1)), tuple_plus(self.position, (1, 0)), tuple_plus(self.position, (-1, 0)), ], \ "only 1 step 1 cell allowed. Previous pos:" + str(self.position) self.add_history(pos, status) def add_history(self, position, status): assert len(position) == 2 self.status = status self._path_count += 1 self.position = tuple(position) if self._path_count != 0: direction = tuple_minus(position, self.position_history[-1]) action = dir2action(direction) assert action in list(range(4 + 1)), \ "direction not in actionDir, something going wrong" self.direction_history.append(direction) self.action_history.append(action) self.position_history.append(tuple(position)) self.position_history = _heap(self.position_history, 30) self.direction_history = _heap(self.direction_history, 30) self.action_history = _heap(self.action_history, 30) class World: """ Include: basic world generation rules, blank map generation and collision checking. reset_world: Do not add action pruning, reward structure or any other routine for training in this class. Pls add in upper class MAPFEnv """ def __init__(self, map_generator, num_agents, isDiagonal=False): self.num_agents = num_agents self.manual_world = False self.manual_goal = False self.goal_generate_distance = 2 self.map_generator = map_generator self.isDiagonal = isDiagonal self.agents_init_pos, self.goals_init_pos = None, None self.reset_world() self.init_agents_and_goals() def reset_world(self): """ generate/re-generate a world map, and compute its corridor map """ # pick up agents from manual map def scan_for_agents(state_map): agents = {} for i in range(state_map.shape[0]): for j in range(state_map.shape[1]): if state_map[i, j] > 0: agentID = state_map[i, j] agents.update({agentID: (i, j)}) return agents self.state, self.goals_map = self.map_generator() # detect manual agents if (self.state > 0).any(): self.manual_world = True self.agents_init_pos = scan_for_agents(self.state) if self.num_agents is not None and self.num_agents != len(self.agents_init_pos.keys()): warnings.warn("num_agent does not match the actual agent number in manual map! " "num_agent has been set to be consistent with manual map.") self.num_agents = len(self.agents_init_pos.keys()) self.agents = {i: copy.deepcopy(Agent()) for i in range(1, self.num_agents + 1)} # if not manual agents else: assert self.num_agents is not None self.agents = {i: copy.deepcopy(Agent()) for i in range(1, self.num_agents + 1)} # detect manual goals if self.goals_map is not None: self.manual_goal = True self.goals_init_pos = scan_for_agents(self.goals_map) if self.manual_goal else None # if not manual goals else: self.goals_map = np.zeros([self.state.shape[0], self.state.shape[1]]) # self.corridor_map = {} # self.restrict_init_corridor = True # self.visited = [] # self.corridors = {} # self.get_corridors() # Communicaiton channel self.state_comms = np.zeros([self.state.shape[0], self.state.shape[1]], dtype=float) def reset_comms(self): # clear the message in the channel self.state_comms = np.zeros([self.state.shape[0], self.state.shape[1]], dtype=float) def reset_agent(self): """ remove all the agents (with their travel history) and goals in the env, rebase the env into a blank one """ self.agents = {i: copy.deepcopy(Agent()) for i in range(1, self.num_agents + 1)} self.state[self.state > 0] = 0 # remove agents in the map # def get_corridors(self): # """ # in corridor_map , output = list: # list[0] : if In corridor, corridor id ,
<filename>runtime/tests/test_webservice.py import warnings from copy import deepcopy # the following line in hetdesrun.service.webservice causes a DeprecationWarning concerning # imp module usage: # from fastapi import FastAPI # Therefore we ignore such warnings here warnings.filterwarnings("ignore", message="the imp module is deprecated") from starlette.testclient import TestClient from hetdesrun.service.webservice import app from hetdesrun.models.run import ConfigurationInput, ExecutionEngine from hetdesrun.runtime.context import execution_context from hetdesrun.utils import get_uuid_from_seed client = TestClient(app) def test_swagger_ui_available(): response = client.get("/docs") assert response.status_code == 200 assert "swagger-ui" in response.text.lower() def test_access_api_endpoint(): response = client.post( "/codegen", json={ "inputs": [], "outputs": [], "code": "", "function_name": "main", "name": "Testname", "description": "Test Descr.", "category": "Test category", }, ) assert response.status_code == 200 assert "code" in response.json().keys() base_workflow_json = { "code_modules": [ { # ordinary function entry point "uuid": str(get_uuid_from_seed("my_code_module")), "code": 'from hetdesrun.component.registration import register\nfrom hetdesrun.datatypes import DataType\nimport logging\ntest_logger = logging.getLogger(__name__)\n# add your own imports here\n\n\n# ***** DO NOT EDIT LINES BELOW *****\n# These lines may be overwritten if input/output changes.\n@register(\n inputs={"x": DataType.Float, "y": DataType.Float}, outputs={"z": DataType.Float}\n)\ndef main(*, x, y):\n """entrypoint function for this component"""\n test_logger.info("TEST in component function " + __name__)\n # print(1 / 0)\n # ***** DO NOT EDIT LINES ABOVE *****\n # write your function code here.\n pass\n return {"z": x+y}', }, { # async def entrypoint "uuid": str(get_uuid_from_seed("const_giver_module")), "code": 'from hetdesrun.component.registration import register\nfrom hetdesrun.datatypes import DataType\nfrom hetdesrun import logger\n# add your own imports here\n\n\n# ***** DO NOT EDIT LINES BELOW *****\n# These lines may be overwritten if input/output changes.\n@register(\n inputs={}, outputs={"c": DataType.Float}\n)\nasync def main():\n """entrypoint function for this component"""\n logger.info("TEST")\n # ***** DO NOT EDIT LINES ABOVE *****\n # write your function code here.\n pass\n return {"c": 2.0}', }, ], "components": [ { "uuid": str(get_uuid_from_seed("my_component")), "inputs": [ { "name": "x", "type": "FLOAT", "id": str(get_uuid_from_seed("x_in_my_component")), }, { "name": "y", "type": "FLOAT", "id": str(get_uuid_from_seed("y_in_my_component")), }, ], "outputs": [ { "name": "z", "type": "FLOAT", "id": str(get_uuid_from_seed("z_in_my_component")), } ], "code_module_uuid": str(get_uuid_from_seed("my_code_module")), "function_name": "main", }, { "uuid": str(get_uuid_from_seed("my_const_giver")), "inputs": [], "outputs": [ { "name": "c", "type": "FLOAT", "id": str(get_uuid_from_seed("c_in_my_const_giver")), } ], "code_module_uuid": str(get_uuid_from_seed("const_giver_module")), "function_name": "main", }, ], "workflow": { "id": str(get_uuid_from_seed("my_workflow")), "connections": [ { "input_in_workflow_id": str(get_uuid_from_seed("1001")), "input_name": "c", "input_id": str(get_uuid_from_seed("input_c_in_1001")), "output_in_workflow_id": str(get_uuid_from_seed("1000")), "output_name": "x", "output_id": str(get_uuid_from_seed("output_c_in_1000")), }, { "input_in_workflow_id": str(get_uuid_from_seed("1001")), "input_name": "c", "input_id": str(get_uuid_from_seed("input_c_in_1001")), "output_in_workflow_id": str(get_uuid_from_seed("1000")), "output_name": "y", "output_id": str(get_uuid_from_seed("output_y_in_1000")), }, ], "inputs": [], "outputs": [ { "name": "z", "id": str(get_uuid_from_seed("output_z_in_1000")), "type": "FLOAT", "name_in_subnode": "z", "id_of_sub_node": str(get_uuid_from_seed("1000")), } ], "sub_nodes": [ { "id": str(get_uuid_from_seed("1000")), "component_uuid": str(get_uuid_from_seed("my_component")), }, { "id": str(get_uuid_from_seed("1001")), "component_uuid": str(get_uuid_from_seed("my_const_giver")), }, ], }, "configuration": { "name": "string", "engine": "plain", "return_individual_node_results": True, }, "workflow_wiring": { "input_wirings": [], "output_wirings": [ { "workflow_output_name": "z", "adapter_id": 1, "ref_id": "TEST-ID", } ], }, } def run_workflow_with_client(workflow_json): response = client.post("/runtime", json=workflow_json) return response.status_code, response.json() def test_running_workflow(): status_code, output = run_workflow_with_client(base_workflow_json.copy()) assert status_code == 200 assert output["result"] == "ok" node_results = output["node_results"] assert "2.0" in node_results assert "4.0" in node_results series_input_workflow_json = { "code_modules": [ { "uuid": str(get_uuid_from_seed("my_code_module_series")), "code": 'from hetdesrun.component.registration import register\nfrom hetdesrun.datatypes import DataType\nimport logging\ntest_logger = logging.getLogger(__name__)\n# add your own imports here\n\n\n# ***** DO NOT EDIT LINES BELOW *****\n# These lines may be overwritten if input/output changes.\n@register(\n inputs={"x": DataType.Series, "y": DataType.Float}, outputs={"z": DataType.DataFrame}\n)\ndef main(*, x, y):\n """entrypoint function for this component"""\n test_logger.info("TEST in component function " + __name__)\n # print(1 / 0)\n # ***** DO NOT EDIT LINES ABOVE *****\n # write your function code here.\n pass\n return {"z": x.to_frame() + y}', }, { # async def entrypoint "uuid": str(get_uuid_from_seed("const_giver_module")), "code": 'from hetdesrun.component.registration import register\nfrom hetdesrun.datatypes import DataType\nfrom hetdesrun import logger\n# add your own imports here\n\n\n# ***** DO NOT EDIT LINES BELOW *****\n# These lines may be overwritten if input/output changes.\n@register(\n inputs={}, outputs={"c": DataType.Float}\n)\nasync def main():\n """entrypoint function for this component"""\n logger.info("TEST")\n # ***** DO NOT EDIT LINES ABOVE *****\n # write your function code here.\n pass\n return {"c": 2.0}', }, ], "components": [ { "uuid": str(get_uuid_from_seed("my_component")), "inputs": [ { "name": "x", "type": "SERIES", "id": str(get_uuid_from_seed("x_in_my_component")), }, { "name": "y", "type": "FLOAT", "id": str(get_uuid_from_seed("y_in_my_component")), }, ], "outputs": [ { "name": "z", "type": "DATAFRAME", "id": str(get_uuid_from_seed("z_in_my_component")), } ], "code_module_uuid": str(get_uuid_from_seed("my_code_module_series")), "function_name": "main", }, { "uuid": str(get_uuid_from_seed("my_const_giver")), "inputs": [], "outputs": [ { "name": "c", "type": "FLOAT", "id": str(get_uuid_from_seed("c_in_my_const_giver")), } ], "code_module_uuid": str(get_uuid_from_seed("const_giver_module")), "function_name": "main", }, ], "workflow": { "id": str(get_uuid_from_seed("my_workflow")), "connections": [ { "input_in_workflow_id": str(get_uuid_from_seed("1001")), "input_name": "c", "input_id": str(get_uuid_from_seed("input_c_in_1001")), "output_in_workflow_id": str(get_uuid_from_seed("1000")), "output_name": "y", "output_id": str(get_uuid_from_seed("output_y_in_1000")), } ], "inputs": [ { "name": "x", "id": str(get_uuid_from_seed("workflow_input_to_x_in_my_component")), "type": "SERIES", "name_in_subnode": "x", "id_of_sub_node": str(get_uuid_from_seed("1000")), } ], "outputs": [ { "name": "z", "id": str(get_uuid_from_seed("output_z_in_1000")), "type": "DATAFRAME", "name_in_subnode": "z", "id_of_sub_node": str(get_uuid_from_seed("1000")), } ], "sub_nodes": [ { "id": str(get_uuid_from_seed("1000")), "component_uuid": str(get_uuid_from_seed("my_component")), }, { "id": str(get_uuid_from_seed("1001")), "component_uuid": str(get_uuid_from_seed("my_const_giver")), }, ], }, "configuration": {"name": "string", "engine": "plain"}, "workflow_wiring": { "input_wirings": [ { "workflow_input_name": "x", "adapter_id": 1, "ref_id": "TEST-ID", "filters": {"value": [1.0, 2.0, 3.5]}, } ], "output_wirings": [ { "workflow_output_name": "z", "adapter_id": 1, "ref_id": "TEST-ID", } ], }, } def test_workflow_with_series_input_and_dataframe_output(): status_code, output = run_workflow_with_client(series_input_workflow_json.copy()) assert status_code == 200 assert output["result"] == "ok" assert output["output_results_by_output_name"]["z"]["0"] == { "0": 3.0, "1": 4.0, "2": 5.5, } single_node_input_workflow_json = { "code_modules": [ { "uuid": str(get_uuid_from_seed("single_node_code")), "code": ( '''\ from hetdesrun.component.registration import register from hetdesrun.datatypes import DataType import logging test_logger = logging.getLogger(__name__) # add your own imports here # ***** DO NOT EDIT LINES BELOW ***** # These lines may be overwritten if input/output changes. @register( inputs={"x": DataType.DataFrame, "y": DataType.Float}, outputs={"z": DataType.Series} ) def main(*, x, y): """entrypoint function for this component""" test_logger.info("TEST in component funct " + __name__) # print(1 / # ***** NOT EDIT LINES ABOVE ***** # wriyour function code here. pass return {"z": x.squeeze() + y} ''' ), } ], "components": [ { "uuid": str(get_uuid_from_seed("my_component")), "inputs": [ { "name": "x", "type": "DATAFRAME", "id": str(get_uuid_from_seed("x_in_my_component")), }, { "name": "y", "type": "FLOAT", "id": str(get_uuid_from_seed("y_in_my_component")), }, ], "outputs": [ { "name": "z", "type": "SERIES", "id": str(get_uuid_from_seed("z_in_my_component")), } ], "code_module_uuid": str(get_uuid_from_seed("single_node_code")), "function_name": "main", } ], "workflow": { "id": str(get_uuid_from_seed("my_workflow")), "connections": [], "inputs": [ { "name": "x", "id": str(get_uuid_from_seed("workflow_input_to_x_in_my_component")), "type": "DATAFRAME", "name_in_subnode": "x", "id_of_sub_node": str(get_uuid_from_seed("1000")), }, { "name": "y", "id": str(get_uuid_from_seed("workflow_input_to_y_in_my_component")), "type": "FLOAT", "name_in_subnode": "y", "id_of_sub_node": str(get_uuid_from_seed("1000")), }, ], "outputs": [ { "name": "z", "id": str(get_uuid_from_seed("output_z_in_1000")), "type": "SERIES", "name_in_subnode": "z", "id_of_sub_node": str(get_uuid_from_seed("1000")), } ], "sub_nodes": [ { "id": str(get_uuid_from_seed("1000")), "component_uuid": str(get_uuid_from_seed("my_component")), } ], }, "configuration": {"name": "string", "engine": "plain"}, "workflow_wiring": { "input_wirings": [ { "workflow_input_name": "x", "adapter_id": 1, "ref_id": "TEST-ID", "filters": {"value": {"a": [1.0, 2.0, 3.5]}}, }, { "workflow_input_name": "y", "adapter_id": 1, "ref_id": "TEST-ID", "filters": {"value": 2.0}, }, ], "output_wirings": [ { "workflow_output_name": "z", "adapter_id": 1, "ref_id": "TEST-ID", } ], }, } def test_single_node_workflow_with_dataframe_input_and_series_output(): status_code, output = run_workflow_with_client( single_node_input_workflow_json.copy() ) assert status_code == 200 assert output["result"] == "ok" assert output["output_results_by_output_name"]["z"] == { "0": 3.0, "1": 4.0, "2": 5.5, } plot_workflow_json = { "code_modules": [ { # ordinary function entry point "uuid": str(get_uuid_from_seed("my_code_module")), "code": 'from hetdesrun.component.registration import register\nfrom hetdesrun.datatypes import DataType\nimport logging\ntest_logger = logging.getLogger(__name__)\n# add your own imports here\n\n\n# ***** DO NOT EDIT LINES BELOW *****\n# These lines may be overwritten if input/output changes.\n@register(\n inputs={"x": DataType.Float, "y": DataType.Float}, outputs={"z": DataType.PlotlyJson}, is_pure_plot_component=True\n)\ndef main(*, x, y):\n """entrypoint function for this component"""\n test_logger.info("TEST in component function " + __name__)\n # print(1 / 0)\n # ***** DO NOT EDIT LINES ABOVE *****\n # write your function code here.\n pass\n return {"z": {"a": 1.0}}', }, { # async def entrypoint "uuid": str(get_uuid_from_seed("const_giver_module")), "code": 'from hetdesrun.component.registration import register\nfrom hetdesrun.datatypes import DataType\nfrom hetdesrun import logger\n# add your own imports here\n\n\n# ***** DO NOT EDIT LINES BELOW *****\n# These lines may be overwritten if input/output changes.\n@register(\n inputs={}, outputs={"c": DataType.Float}\n)\nasync def main():\n """entrypoint function for this component"""\n logger.info("TEST")\n # ***** DO NOT EDIT LINES ABOVE *****\n # write your function code here.\n pass\n return {"c": 2.0}', }, ], "components": [ { "uuid": str(get_uuid_from_seed("my_component")), "inputs": [ { "name": "x", "type": "FLOAT", "id": str(get_uuid_from_seed("x_in_my_component")), }, { "name": "y", "type": "FLOAT", "id": str(get_uuid_from_seed("y_in_my_component")), }, ], "outputs": [ { "name": "z", "type": "PLOTLYJSON", "id": str(get_uuid_from_seed("z_in_my_component")), } ], "code_module_uuid": str(get_uuid_from_seed("my_code_module")), "function_name": "main", }, { "uuid": str(get_uuid_from_seed("my_const_giver")), "inputs": [], "outputs": [ { "name": "c", "type": "FLOAT", "id": str(get_uuid_from_seed("c_in_my_const_giver")), } ], "code_module_uuid": str(get_uuid_from_seed("const_giver_module")), "function_name": "main", }, ], "workflow": { "id": str(get_uuid_from_seed("my_workflow")), "connections": [ { "input_in_workflow_id": str(get_uuid_from_seed("1001")), "input_name": "c", "input_id": str(get_uuid_from_seed("input_c_in_1001")), "output_in_workflow_id": str(get_uuid_from_seed("1000")), "output_name": "x", "output_id": str(get_uuid_from_seed("output_c_in_1000")), }, { "input_in_workflow_id": str(get_uuid_from_seed("1001")), "input_name": "c", "input_id":
<reponame>TimBeishuizen/Meta-modelling from unittest import TestCase from MetaModels import MetaModel as MM import numpy as np import warnings class TestAbstractModel(TestCase): """ A class to test the AbstractModel meta-model structure """ # Construct an example Abstract meta-model __in_par_intervals = np.mat([[0.5, 1.5], [0.2, 0.4]]) __in_par_means = np.mat([1.2, 0.3]) __in_par_variances = np.mat([0.1, 0.001]) __out_par_intervals = np.mat([[1.5, 2.5], [1.2, 1.4]]) __out_par_means = np.mat([2.2, 1.3]) __out_par_variances = np.mat([0.1, 0.001]) def construct_model(self): """ Constructs a meta-model for testing :return: A meta-model """ return MM.AbstractModel(self.__in_par_intervals, self.__in_par_means, self.__in_par_variances, self.__out_par_intervals, self.__out_par_means, self.__out_par_variances) def test_initialization(self): """ Tests if the initialization is done correctly. It tests all input for the initialization with slight changes to check if they work :return: A confirmation if the test work or not. """ def construct_abstract_model(changed_value, changed_value_place, type_error): """ A frame to test the different kinds of errors that can occur for the initialization of a meta-model :param changed_value: The value for the meta-model to be changed :param changed_value_place: The location for the meta-model to be changed - 1 = in_par_intervals, 2 = in_par_means, 3 = in_par_variances, 4 = out_par_intervals, 5 = out_par_means, 6 = out_par_variances :param type_error: The type of error that should have occurred. """ if changed_value_place == 1: self.assertRaises(type_error, MM.AbstractModel, changed_value, self.__in_par_means, self.__in_par_variances, self.__out_par_intervals, self.__out_par_means, self.__out_par_variances) elif changed_value_place == 2: self.assertRaises(type_error, MM.AbstractModel, self.__in_par_intervals, changed_value, self.__in_par_variances, self.__out_par_intervals, self.__out_par_means, self.__out_par_variances) elif changed_value_place == 3: self.assertRaises(type_error, MM.AbstractModel, self.__in_par_intervals, self.__in_par_means, changed_value, self.__out_par_intervals, self.__out_par_means, self.__out_par_variances) elif changed_value_place == 4: self.assertRaises(type_error, MM.AbstractModel, self.__in_par_intervals, self.__in_par_means, self.__in_par_variances, changed_value, self.__out_par_means, self.__out_par_variances) elif changed_value_place == 5: self.assertRaises(type_error, MM.AbstractModel, self.__in_par_intervals, self.__in_par_means, self.__in_par_variances, self.__out_par_intervals, changed_value, self.__out_par_variances) elif changed_value_place == 6: self.assertRaises(type_error, MM.AbstractModel, self.__in_par_intervals, self.__in_par_means, self.__in_par_variances, self.__out_par_intervals, self.__out_par_means, changed_value) # Control test, to check if a normal case would work try: MM.AbstractModel(self.__in_par_intervals, self.__in_par_means, self.__in_par_variances, self.__out_par_intervals, self.__out_par_means, self.__out_par_variances) except ValueError: self.fail('Initialization threw an input value error') # Test the input parameter intervals construction new_in_par_intervals_1 = 'not a list' new_in_par_intervals_2 = np.mat([[0.5, 1.5], 'not a nested list']) new_in_par_intervals_3a = np.mat([[0.5, 1.5], [0.2, 0.4, 0.5]]) # Too many floats for input parameters new_in_par_intervals_3b = np.mat([[0.5, 1.5], [0.5]]) # Too few floats for input parameters new_in_par_intervals_4 = np.mat([[0.5, 1.5], ['no', 'floats']]) construct_abstract_model(new_in_par_intervals_1, 1, TypeError) construct_abstract_model(new_in_par_intervals_2, 1, TypeError) construct_abstract_model(new_in_par_intervals_3a, 1, TypeError) construct_abstract_model(new_in_par_intervals_3b, 1, TypeError) construct_abstract_model(new_in_par_intervals_4, 1, TypeError) # Test the input parameter means construction new_in_par_means_1 = 'not a list' new_in_par_means_2 = np.mat([1.2, 'no float']) construct_abstract_model(new_in_par_means_1, 2, TypeError) construct_abstract_model(new_in_par_means_2, 2, TypeError) # Test the input parameter variances construction new_in_par_variances_1 = 'not a list' new_in_par_variances_2 = np.mat([0.1, 'no float']) construct_abstract_model(new_in_par_variances_1, 3, TypeError) construct_abstract_model(new_in_par_variances_2, 3, TypeError) # Test the output parameter intervals construction new_out_par_intervals_1 = 'not a list' new_out_par_intervals_2 = np.mat([[1.5, 2.5], 'not a nested list']) new_out_par_intervals_3a = np.mat([[1.5, 2.5], [1.2, 1.3, 1.4]]) # Too many floats for output parameters new_out_par_intervals_3b = np.mat([[1.5, 2.5], [1.2]]) # Too few floats for output parameters new_out_par_intervals_4 = np.mat([[1.5, 2.5], ['no', 'floats']]) construct_abstract_model(new_out_par_intervals_1, 4, TypeError) construct_abstract_model(new_out_par_intervals_2, 4, TypeError) construct_abstract_model(new_out_par_intervals_3a, 4, TypeError) construct_abstract_model(new_out_par_intervals_3b, 4, TypeError) construct_abstract_model(new_out_par_intervals_4, 4, TypeError) # Test the output parameter means construction new_out_par_means_1 = 'not a list' new_out_par_means_2 = np.mat([1.2, 'no float']) construct_abstract_model(new_out_par_means_1, 5, TypeError) construct_abstract_model(new_out_par_means_2, 5, TypeError) # Test the output parameter variances construction new_out_par_variances_1 = 'not a list' new_out_par_variances_2 = np.mat([0.1, 'no float']) construct_abstract_model(new_out_par_variances_1, 6, TypeError) construct_abstract_model(new_out_par_variances_2, 6, TypeError) # Test for different number of input parameters new_in_par_intervals_5a = np.mat([[0.5, 1.5], [0.2, 0.4], [1, 2]]) new_in_par_intervals_5b = np.mat([[0.5, 1.5]]) new_in_par_means_3a = np.mat([1.2, 0.3, 1.4]) new_in_par_means_3b = np.mat([1.2]) new_in_par_variances_3a = np.mat([0.1, 0.001, 0.1]) new_in_par_variances_3b = np.mat([0.1]) construct_abstract_model(new_in_par_intervals_5a, 1, TypeError) construct_abstract_model(new_in_par_intervals_5b, 1, TypeError) construct_abstract_model(new_in_par_means_3a, 2, TypeError) construct_abstract_model(new_in_par_means_3b, 2, TypeError) construct_abstract_model(new_in_par_variances_3a, 3, TypeError) construct_abstract_model(new_in_par_variances_3b, 3, TypeError) # Test for different number of output parameters new_out_par_intervals_5a = np.mat([[1.5, 2.5], [1.2, 1.4], [2, 3]]) new_out_par_intervals_5b = np.mat([[1.5, 2.5]]) new_out_par_means_3a = np.mat([2.2, 1.3, 2.4]) new_out_par_means_3b = np.mat([2.2]) new_out_par_variances_3a = np.mat([0.1, 0.001, 0.1]) new_out_par_variances_3b = np.mat([0.1]) construct_abstract_model(new_out_par_intervals_5a, 4, TypeError) construct_abstract_model(new_out_par_intervals_5b, 4, TypeError) construct_abstract_model(new_out_par_means_3a, 5, TypeError) construct_abstract_model(new_out_par_means_3b, 5, TypeError) construct_abstract_model(new_out_par_variances_3a, 6, TypeError) construct_abstract_model(new_out_par_variances_3b, 6, TypeError) # Test if the intervals are formulated correctly, i.e. lower boundary first, upper boundary second new_in_par_intervals_6 = np.mat([[1.5, 0.5], [0.2, 0.4]]) new_out_par_intervals_6 = np.mat([[1.5, 2.5], [1.4, 1.2]]) construct_abstract_model(new_in_par_intervals_6, 1, ValueError) construct_abstract_model(new_out_par_intervals_6, 4, ValueError) # Test if the input mean and variance values are between the intervals new_in_par_means_4a = np.mat([1.6, 0.3]) new_in_par_means_4b = np.mat([1.2, 0.1]) new_in_par_variances_4 = np.mat([10, 1.0]) construct_abstract_model(new_in_par_means_4a, 2, ValueError) construct_abstract_model(new_in_par_means_4b, 2, ValueError) construct_abstract_model(new_in_par_variances_4, 3, ValueError) # Test if the output mean and variance values are between the intervals new_out_par_means_4a = np.mat([2.6, 1.3]) new_out_par_means_4b = np.mat([2.2, 1.1]) new_out_par_variances_4 = np.mat([10, 0.0001]) construct_abstract_model(new_out_par_means_4a, 5, ValueError) construct_abstract_model(new_out_par_means_4b, 5, ValueError) construct_abstract_model(new_out_par_variances_4, 6, ValueError) def test_get_type(self): """ Tests whether the function AbstractModel.get_type returns the right type :return: The meta-model type tested """ test_model = self.construct_model() self.failUnlessEqual(test_model.get_type(), 'Abstract') def test_get_in_par_intervals(self): """ Tests whether the function AbstractModel.get_in_par_intervals returns the right values :return: The meta-model input parameter intervals tested """ test_model = self.construct_model() np.testing.assert_array_equal(test_model.get_in_par_intervals(), self.__in_par_intervals) def test_get_in_par_means(self): """ Tests whether the function AbstractModel.get_in_par_means returns the right values :return: The meta-model input parameter means tested """ test_model = self.construct_model() np.testing.assert_array_equal(test_model.get_in_par_means(), self.__in_par_means) def test_get_in_par_variances(self): """ Tests whether the function AbstractModel.get_in_par_variances returns the right values :return: The meta-model input parameter variances tested """ test_model = self.construct_model() np.testing.assert_array_equal(test_model.get_in_par_variances(), self.__in_par_variances) def test_get_out_par_intervals(self): """ Tests whether the function AbstractModel.get_out_par_intervals returns the right values :return: The meta-model output parameter intervals tested """ test_model = self.construct_model() np.testing.assert_array_equal(test_model.get_out_par_intervals(), self.__out_par_intervals) def test_get_out_par_means(self): """ Tests whether the function AbstractModel.get_out_par_means returns the right values :return: The meta-model output parameter means tested """ test_model = self.construct_model() np.testing.assert_array_equal(test_model.get_out_par_means(), self.__out_par_means) def test_get_out_par_variances(self): """ Tests whether the function AbstractModel.get_out_par_variances returns the right values :return: The meta-model output parameter variances tested """ test_model = self.construct_model() np.testing.assert_array_equal(test_model.get_out_par_variances(), self.__out_par_variances) def test_simulate(self): """ Tests whether input of AbstractMode.simulate has the correct input. :return: The simulated parameter output tested """ test_model = self.construct_model() # Test if the number of input parameters is the same as for the meta-model raw_input_par_1a = np.mat([1.4, 0.25, 0.1]) raw_input_par_1b = np.mat([1.4]) self.assertRaises(TypeError, test_model.simulate, raw_input_par_1a) self.assertRaises(TypeError, test_model.simulate, raw_input_par_1b) # Test if the input parameters are defined correctly raw_input_par_2a = 'Not a list' raw_input_par_2b = np.mat([1.4, 'Not a float']) self.assertRaises(TypeError, test_model.simulate, raw_input_par_2a) self.assertRaises(TypeError, test_model.simulate, raw_input_par_2b) # Test if the input parameters are in between the intervals and gives a warning if not raw_input_par_3a = np.mat([0.4, 0.25]) raw_input_par_3b = np.mat([1.4, 0.45]) warnings.simplefilter("error") self.assertRaises(UserWarning, test_model.simulate, raw_input_par_3a) self.assertRaises(UserWarning, test_model.simulate, raw_input_par_3b) warnings.simplefilter("ignore") class TestPLSRMetaModel(TestAbstractModel): """ A class to test the PLSR meta-model structure """ # Construct an example PLSR meta-model __sol_mat = np.mat([[1, 1], [0.1, 0.01], [0.1, 0.001]]) __in_par_intervals = np.mat([[0.5, 1.5], [0.2, 0.4]]) __in_par_means = np.mat([1.2, 0.3]) __in_par_variances = np.mat([0.1, 0.001]) __out_par_intervals = np.mat([[1.5, 2.5], [1.2, 1.4]]) __out_par_means = np.mat([2.2, 1.3]) __out_par_variances = np.mat([0.1, 0.001]) def construct_model(self): """ Constructs a meta-model for testing :return: A meta-model """ return MM.PLSRMetaModel(self.__sol_mat, self.__in_par_intervals, self.__in_par_means, self.__in_par_variances, self.__out_par_intervals, self.__out_par_means, self.__out_par_variances) def test_initialization(self): """ A test for the initialization of the PLSRMetaModel :return: A PLSR meta-model initialization tested """ def test_sol_mat(sol_mat, error): """ Tests with a different solution matrix if an error would be raised :param sol_mat: The solution matrix for the error testing :param error: The type of error :return: A result for the test """ self.assertRaises(error, MM.PLSRMetaModel, sol_mat, self.__in_par_intervals, self.__in_par_means, self.__in_par_variances, self.__out_par_intervals, self.__out_par_means, self.__out_par_variances) # Check if not the proper input for the sol mat is used sol_mat_1 = 'not a list' sol_mat_2 = np.mat([[0.5, 1.5], 'not a nested list']) sol_mat_3 = np.mat([[1, 1], [0.1, 0.01], [0.1, 'no float']]) test_sol_mat(sol_mat_1, TypeError) test_sol_mat(sol_mat_2, TypeError) test_sol_mat(sol_mat_3, TypeError) # Check if different sol matrix sizes makes a problem sol_mat_4a = np.mat([[1, 1], [0.1, 0.01], [1, 1], [0.1, 0.01]]) # Wrong column size sol_mat_4b = np.mat([[1, 1], [0.1, 0.01]]) # Wrong column size sol_mat_5a = np.mat([[1, 1], [0.1, 0.01], [0.1, 0.001, 0.002]]) # Wrong row size sol_mat_5b = np.mat([[1, 1], [0.1, 0.01], [0.1]]) # Wrong row size warnings.simplefilter("error") test_sol_mat(sol_mat_4a, UserWarning) test_sol_mat(sol_mat_4b, TypeError) test_sol_mat(sol_mat_5a, TypeError) test_sol_mat(sol_mat_5b, TypeError) warnings.simplefilter("ignore") def test_get_type(self): """ Tests whether the function AbstractModel.get_type returns the right type :return: The meta-model type tested """ test_model = self.construct_model() self.assertEqual(test_model.get_type(), 'PLSR') def test_get_regress_coeff(self): """ Tests whether the function PLSRMetaModel.get_regress_coeff returns
# -*- coding: utf-8 -*- """Tests for the Wikidata parts of the page module.""" # # (C) Pywikibot team, 2008-2017 # # Distributed under the terms of the MIT license. # from __future__ import absolute_import, unicode_literals import copy import json from decimal import Decimal try: from unittest import mock except ImportError: import mock import pywikibot from pywikibot import pagegenerators from pywikibot.page import WikibasePage, ItemPage, PropertyPage, Page from pywikibot.site import Namespace, NamespacesDict from pywikibot.tools import MediaWikiVersion from tests import join_pages_path from tests.aspects import ( unittest, TestCase, WikidataTestCase, DeprecationTestCase, DefaultWikibaseClientTestCase, ) from tests.basepage_tests import ( BasePageMethodsTestBase, BasePageLoadRevisionsCachingTestBase, ) # fetch a page which is very likely to be unconnected, which doesn't have # a generator, and unit tests may be used to test old versions of pywikibot def _get_test_unconnected_page(site): """Get unconnected page from site for tests.""" gen = pagegenerators.NewpagesPageGenerator(site=site, total=10, namespaces=[1, ]) for page in gen: if not page.properties().get('wikibase_item'): return page class WbRepresentationTestCase(WikidataTestCase): """Test methods inherited or extended from _WbRepresentation.""" def setUp(self): """Setup tests.""" super(WbRepresentationTestCase, self).setUp() def _test_hashable(self, representation): """Test that the representation is hashable.""" list_of_dupes = [representation, representation] self.assertEqual(len(set(list_of_dupes)), 1) class TestLoadRevisionsCaching(BasePageLoadRevisionsCachingTestBase, WikidataTestCase): """Test site.loadrevisions() caching.""" def setUp(self): """Setup test.""" self._page = ItemPage(self.get_repo(), 'Q60') super(TestLoadRevisionsCaching, self).setUp() def test_page_text(self): """Test site.loadrevisions() with Page.text.""" self._test_page_text() class TestDeprecatedAttributes(WikidataTestCase, DeprecationTestCase): """Test deprecated lastrevid.""" def test_lastrevid(self): """Test deprecated lastrevid.""" item = ItemPage(self.get_repo(), 'Q60') self.assertFalse(hasattr(item, 'lastrevid')) item.get() self.assertTrue(hasattr(item, 'lastrevid')) self.assertIsInstance(item.lastrevid, int) self.assertDeprecation() self._reset_messages() item.lastrevid = 1 self.assertTrue(hasattr(item, 'lastrevid')) self.assertTrue(hasattr(item, '_revid')) self.assertEqual(item.lastrevid, 1) self.assertEqual(item._revid, 1) self.assertDeprecation() def test_lastrevid_del(self): """Test del with deprecated lastrevid.""" item = ItemPage(self.get_repo(), 'Q60') item.get() self.assertTrue(hasattr(item, 'lastrevid')) self.assertTrue(hasattr(item, '_revid')) del item.lastrevid self.assertFalse(hasattr(item, 'lastrevid')) self.assertFalse(hasattr(item, '_revid')) self.assertDeprecation() class TestGeneral(WikidataTestCase): """General Wikibase tests.""" @classmethod def setUpClass(cls): """Setup test class.""" super(TestGeneral, cls).setUpClass() enwiki = pywikibot.Site('en', 'wikipedia') cls.mainpage = pywikibot.Page(pywikibot.page.Link("Main Page", enwiki)) def testWikibase(self): """Wikibase tests.""" repo = self.get_repo() item_namespace = repo.namespaces[0] self.assertEqual(item_namespace.defaultcontentmodel, 'wikibase-item') item = ItemPage.fromPage(self.mainpage) self.assertIsInstance(item, ItemPage) self.assertEqual(item.getID(), 'Q5296') self.assertEqual(item.title(), 'Q5296') self.assertIn('en', item.labels) self.assertTrue(item.labels['en'].lower().endswith('main page')) self.assertIn('en', item.aliases) self.assertIn('home page', item.aliases['en']) self.assertEqual(item.namespace(), 0) item2 = ItemPage(repo, 'q5296') self.assertEqual(item2.getID(), 'Q5296') item2.get() self.assertTrue(item2.labels['en'].lower().endswith('main page')) prop = PropertyPage(repo, 'Property:P21') self.assertEqual(prop.type, 'wikibase-item') self.assertEqual(prop.namespace(), 120) claim = pywikibot.Claim(repo, 'p21') self.assertRaises(ValueError, claim.setTarget, value="test") claim.setTarget(ItemPage(repo, 'q1')) self.assertEqual(claim._formatValue(), {'entity-type': 'item', 'numeric-id': 1}) def test_cmp(self): """Test WikibasePage.__cmp__.""" self.assertEqual(ItemPage.fromPage(self.mainpage), ItemPage(self.get_repo(), 'q5296')) class TestWikibaseCoordinate(WbRepresentationTestCase): """Test Wikibase Coordinate data type.""" dry = True def test_Coordinate_WbRepresentation_methods(self): """Test inherited or extended methods from _WbRepresentation.""" repo = self.get_repo() coord = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0, globe='moon') self._test_hashable(coord) def test_Coordinate_dim(self): """Test Coordinate dimension.""" repo = self.get_repo() x = pywikibot.Coordinate(site=repo, lat=12.0, lon=13.0, precision=5.0) self.assertEqual(x.precisionToDim(), 544434) self.assertIsInstance(x.precisionToDim(), int) y = pywikibot.Coordinate(site=repo, lat=12.0, lon=13.0, dim=54444) self.assertEqual(y.precision, 0.500005084017101) self.assertIsInstance(y.precision, float) z = pywikibot.Coordinate(site=repo, lat=12.0, lon=13.0) with self.assertRaises(ValueError): z.precisionToDim() def test_Coordinate_plain_globe(self): """Test setting Coordinate globe from a plain-text value.""" repo = self.get_repo() coord = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0, globe='moon') self.assertEqual(coord.toWikibase(), {'latitude': 12.0, 'longitude': 13.0, 'altitude': None, 'precision': 0, 'globe': 'https://www.wikidata.org/entity/Q405'}) def test_Coordinate_entity_uri_globe(self): """Test setting Coordinate globe from an entity uri.""" repo = self.get_repo() coord = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0, globe_item='https://www.wikidata.org/entity/Q123') self.assertEqual(coord.toWikibase(), {'latitude': 12.0, 'longitude': 13.0, 'altitude': None, 'precision': 0, 'globe': 'https://www.wikidata.org/entity/Q123'}) class TestWikibaseCoordinateNonDry(WbRepresentationTestCase): """ Test Wikibase Coordinate data type (non-dry). These can be moved to TestWikibaseCoordinate once DrySite has been bumped to the appropriate version. """ def test_Coordinate_item_globe(self): """Test setting Coordinate globe from an ItemPage.""" repo = self.get_repo() coord = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0, globe_item=ItemPage(repo, 'Q123')) self.assertEqual(coord.toWikibase(), {'latitude': 12.0, 'longitude': 13.0, 'altitude': None, 'precision': 0, 'globe': 'https://www.wikidata.org/entity/Q123'}) def test_Coordinate_get_globe_item_from_uri(self): """Test getting globe item from Coordinate with entity uri globe.""" repo = self.get_repo() q = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0, globe_item='https://www.wikidata.org/entity/Q123') self.assertEqual(q.get_globe_item(), ItemPage(repo, 'Q123')) def test_Coordinate_get_globe_item_from_itempage(self): """Test getting globe item from Coordinate with ItemPage globe.""" repo = self.get_repo() globe = ItemPage(repo, 'Q123') q = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0, globe_item=globe) self.assertEqual(q.get_globe_item(), ItemPage(repo, 'Q123')) def test_Coordinate_get_globe_item_from_plain_globe(self): """Test getting globe item from Coordinate with plain text globe.""" repo = self.get_repo() q = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0, globe='moon') self.assertEqual(q.get_globe_item(), ItemPage(repo, 'Q405')) def test_Coordinate_get_globe_item_provide_repo(self): """Test getting globe item from Coordinate, providing repo.""" repo = self.get_repo() q = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0, globe_item='https://www.wikidata.org/entity/Q123') self.assertEqual(q.get_globe_item(repo), ItemPage(repo, 'Q123')) def test_Coordinate_get_globe_item_different_repo(self): """Test getting globe item in different repo from Coordinate.""" repo = self.get_repo() test_repo = pywikibot.Site('test', 'wikidata') q = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0, globe_item='https://test.wikidata.org/entity/Q123') self.assertEqual(q.get_globe_item(test_repo), ItemPage(test_repo, 'Q123')) def test_Coordinate_equality(self): """Test Coordinate equality with different globe representations.""" repo = self.get_repo() a = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0.1, globe='moon') b = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0.1, globe_item='https://www.wikidata.org/entity/Q405') c = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0.1, globe_item=ItemPage(repo, 'Q405')) d = pywikibot.Coordinate( site=repo, lat=12.0, lon=13.0, precision=0.1, globe_item='https://test.wikidata.org/entity/Q405') self.assertEqual(a, b) self.assertEqual(b, c) self.assertEqual(c, a) self.assertNotEqual(a, d) self.assertNotEqual(b, d) self.assertNotEqual(c, d) class TestWbTime(WbRepresentationTestCase): """Test Wikibase WbTime data type.""" dry = True def test_WbTime_WbRepresentation_methods(self): """Test inherited or extended methods from _WbRepresentation.""" repo = self.get_repo() t = pywikibot.WbTime(site=repo, year=2010, month=0, day=0, hour=12, minute=43) self._test_hashable(t) def test_WbTime_timestr(self): """Test timestr functions of WbTime.""" repo = self.get_repo() t = pywikibot.WbTime(site=repo, year=2010, month=0, day=0, hour=12, minute=43) self.assertEqual(t.toTimestr(), '+00000002010-00-00T12:43:00Z') self.assertEqual(t.toTimestr(force_iso=True), '+2010-01-01T12:43:00Z') t = pywikibot.WbTime(site=repo, year=2010, hour=12, minute=43) self.assertEqual(t.toTimestr(), '+00000002010-01-01T12:43:00Z') self.assertEqual(t.toTimestr(force_iso=True), '+2010-01-01T12:43:00Z') t = pywikibot.WbTime(site=repo, year=-2010, hour=12, minute=43) self.assertEqual(t.toTimestr(), '-00000002010-01-01T12:43:00Z') self.assertEqual(t.toTimestr(force_iso=True), '-2010-01-01T12:43:00Z') def test_WbTime_fromTimestr(self): """Test WbTime creation from UTC date/time string.""" repo = self.get_repo() t = pywikibot.WbTime.fromTimestr('+00000002010-01-01T12:43:00Z', site=repo) self.assertEqual(t, pywikibot.WbTime(site=repo, year=2010, hour=12, minute=43, precision=14)) def test_WbTime_zero_month(self): """Test WbTime creation from date/time string with zero month.""" # ensures we support formats in T123888 / T107870 repo = self.get_repo() t = pywikibot.WbTime.fromTimestr('+00000002010-00-00T12:43:00Z', site=repo) self.assertEqual(t, pywikibot.WbTime(site=repo, year=2010, month=0, day=0, hour=12, minute=43, precision=14)) def test_WbTime_timestamp(self): """Test timestamp functions of WbTime.""" repo = self.get_repo() timestamp = pywikibot.Timestamp.fromISOformat('2010-01-01T12:43:00Z') t = pywikibot.WbTime(site=repo, year=2010, month=0, day=0, hour=12, minute=43) self.assertEqual(t.toTimestamp(), timestamp) # Roundtrip fails as Timestamp and WbTime interpret month=0 differently self.assertNotEqual( t, pywikibot.WbTime.fromTimestamp(timestamp, site=repo)) t = pywikibot.WbTime(site=repo, year=2010, hour=12, minute=43) self.assertEqual(t.toTimestamp(), timestamp) t = pywikibot.WbTime(site=repo, year=-2010, hour=12, minute=43) self.assertRaises(ValueError, t.toTimestamp) t = pywikibot.WbTime(site=repo, year=2010, month=1, day=1, hour=12, minute=43, second=0) self.assertEqual(t.toTimestamp(), timestamp) self.assertEqual( t, pywikibot.WbTime.fromTimestamp(timestamp, site=repo)) def test_WbTime_errors(self): """Test WbTime precision errors.""" repo = self.get_repo() self.assertRaises(ValueError, pywikibot.WbTime, site=repo, precision=15) self.assertRaises(ValueError, pywikibot.WbTime, site=repo, precision='invalid_precision') class TestWbQuantity(WbRepresentationTestCase): """Test Wikibase WbQuantity data type.""" dry = True def test_WbQuantity_WbRepresentation_methods(self): """Test inherited or extended methods from _WbRepresentation.""" repo = self.get_repo() q = pywikibot.WbQuantity(amount=1234, error=1, site=repo) self._test_hashable(q) def test_WbQuantity_integer(self): """Test WbQuantity for integer value.""" repo = self.get_repo() q = pywikibot.WbQuantity(amount=1234, error=1, site=repo) self.assertEqual(q.toWikibase(), {'amount': '+1234', 'lowerBound': '+1233', 'upperBound': '+1235', 'unit': '1', }) q = pywikibot.WbQuantity(amount=5, error=(2, 3), site=repo) self.assertEqual(q.toWikibase(), {'amount': '+5', 'lowerBound': '+2', 'upperBound': '+7', 'unit': '1', }) q = pywikibot.WbQuantity(amount=0, error=(0, 0), site=repo) self.assertEqual(q.toWikibase(), {'amount': '+0', 'lowerBound': '+0', 'upperBound': '+0', 'unit': '1', }) q = pywikibot.WbQuantity(amount=-5, error=(2, 3), site=repo) self.assertEqual(q.toWikibase(), {'amount': '-5', 'lowerBound': '-8', 'upperBound': '-3', 'unit': '1', }) def test_WbQuantity_float_27(self): """Test WbQuantity for float value.""" repo = self.get_repo() q = pywikibot.WbQuantity(amount=0.044405586, error=0.0, site=repo) q_dict = {'amount': '+0.044405586', 'lowerBound': '+0.044405586', 'upperBound': '+0.044405586', 'unit': '1', } self.assertEqual(q.toWikibase(), q_dict) def test_WbQuantity_scientific(self): """Test WbQuantity for scientific notation.""" repo = self.get_repo() q = pywikibot.WbQuantity(amount='1.3e-13', error='1e-14', site=repo) q_dict = {'amount': '+1.3e-13', 'lowerBound': '+1.2e-13', 'upperBound': '+1.4e-13', 'unit': '1', } self.assertEqual(q.toWikibase(), q_dict) def test_WbQuantity_decimal(self): """Test WbQuantity for decimal value.""" repo = self.get_repo() q = pywikibot.WbQuantity(amount=Decimal('0.044405586'), error=Decimal('0.0'), site=repo) q_dict = {'amount': '+0.044405586', 'lowerBound': '+0.044405586', 'upperBound': '+0.044405586', 'unit': '1', } self.assertEqual(q.toWikibase(), q_dict) def test_WbQuantity_string(self): """Test WbQuantity for decimal notation.""" repo = self.get_repo() q = pywikibot.WbQuantity(amount='0.044405586', error='0', site=repo) q_dict = {'amount': '+0.044405586', 'lowerBound': '+0.044405586', 'upperBound': '+0.044405586', 'unit': '1', } self.assertEqual(q.toWikibase(), q_dict) def test_WbQuantity_formatting_bound(self): """Test WbQuantity formatting with bounds.""" repo = self.get_repo() q = pywikibot.WbQuantity(amount='0.044405586', error='0', site=repo) self.assertEqual("%s" % q, '{\n' ' "amount": "+%(val)s",\n' ' "lowerBound": "+%(val)s",\n' ' "unit": "1",\n' ' "upperBound": "+%(val)s"\n' '}' % {'val': '0.044405586'}) self.assertEqual("%r" % q, "WbQuantity(amount=%(val)s, " "upperBound=%(val)s, lowerBound=%(val)s, " "unit=1)" % {'val': '0.044405586'}) def test_WbQuantity_self_equality(self): """Test WbQuantity equality.""" repo = self.get_repo() q = pywikibot.WbQuantity(amount='0.044405586', error='0', site=repo) self.assertEqual(q, q) def test_WbQuantity_fromWikibase(self): """Test WbQuantity.fromWikibase() instantiating.""" repo = self.get_repo() q = pywikibot.WbQuantity.fromWikibase({u'amount': u'+0.0229', u'lowerBound': u'0', u'upperBound': u'1', u'unit': u'1'}, site=repo) # note that the bounds are inputted as INT but are returned as FLOAT self.assertEqual(q.toWikibase(), {'amount': '+0.0229', 'lowerBound': '+0.0000', 'upperBound': '+1.0000', 'unit': '1', }) def test_WbQuantity_errors(self): """Test WbQuantity error handling.""" self.assertRaises(ValueError, pywikibot.WbQuantity, amount=None, error=1) def test_WbQuantity_entity_unit(self): """Test WbQuantity with entity uri unit.""" repo = self.get_repo() q = pywikibot.WbQuantity(amount=1234, error=1, site=repo, unit='https://www.wikidata.org/entity/Q712226') self.assertEqual(q.toWikibase(), {'amount': '+1234', 'lowerBound': '+1233', 'upperBound': '+1235', 'unit': 'https://www.wikidata.org/entity/Q712226', }) def test_WbQuantity_unit_fromWikibase(self): """Test WbQuantity recognising unit from Wikibase output.""" repo =
import os import numpy as np import tensorflow as tf import shutil, sys from datetime import datetime import h5py from xsleepnet import XSleepNet from xsleepnet_config import Config from sklearn.metrics import f1_score from sklearn.metrics import accuracy_score from sklearn.metrics import cohen_kappa_score from datagenerator_from_list_v2 import DataGenerator from scipy.io import loadmat, savemat # Parameters # ================================================== # Misc Parameters tf.app.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") tf.app.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") # My Parameters tf.app.flags.DEFINE_string("eeg_train_data", "../train_data.mat", "Point to directory of input data") tf.app.flags.DEFINE_string("eeg_test_data", "../test_data.mat", "Point to directory of input data") tf.app.flags.DEFINE_string("eog_train_data", "../train_data.mat", "Point to directory of input data") tf.app.flags.DEFINE_string("eog_test_data", "../test_data.mat", "Point to directory of input data") tf.app.flags.DEFINE_string("emg_train_data", "../train_data.mat", "Point to directory of input data") tf.app.flags.DEFINE_string("emg_test_data", "../test_data.mat", "Point to directory of input data") tf.app.flags.DEFINE_string("out_dir", "./output/", "Point to output directory") tf.app.flags.DEFINE_string("checkpoint_dir", "./checkpoint/", "Point to checkpoint directory") # seqsleepnet settings tf.app.flags.DEFINE_float("dropout_rnn", 0.75, "Dropout keep probability (default: 0.75)") tf.app.flags.DEFINE_integer("seq_nfilter", 32, "Sequence length (default: 20)") tf.app.flags.DEFINE_integer("seq_nhidden1", 64, "Sequence length (default: 20)") tf.app.flags.DEFINE_integer("seq_attention_size1", 32, "Sequence length (default: 20)") tf.app.flags.DEFINE_integer("seq_nhidden2", 64, "Sequence length (default: 20)") # deepsleepnet settings tf.app.flags.DEFINE_float("dropout_cnn", 0.5, "Dropout keep probability (default: 0.75)") tf.app.flags.DEFINE_integer("deep_nhidden", 512, "Sequence length (default: 20)") # common settings tf.app.flags.DEFINE_integer("seq_len", 20, "Sequence length (default: 32)") FLAGS = tf.app.flags.FLAGS print("\nParameters:") for attr, value in sorted(FLAGS.__flags.items()): # python3 print("{}={}".format(attr.upper(), value)) print("") # Data Preparatopn # ================================================== # path where some output are stored out_path = os.path.abspath(os.path.join(os.path.curdir,FLAGS.out_dir)) # path where checkpoint models are stored checkpoint_path = os.path.abspath(os.path.join(out_path,FLAGS.checkpoint_dir)) if not os.path.isdir(os.path.abspath(out_path)): os.makedirs(os.path.abspath(out_path)) if not os.path.isdir(os.path.abspath(checkpoint_path)): os.makedirs(os.path.abspath(checkpoint_path)) config = Config() config.dropout_rnn = FLAGS.dropout_rnn config.epoch_seq_len = FLAGS.seq_len config.seq_nfilter = FLAGS.seq_nfilter config.seq_nhidden1 = FLAGS.seq_nhidden1 config.seq_nhidden2 = FLAGS.seq_nhidden2 config.seq_attention_size1 = FLAGS.seq_attention_size1 config.dropout_cnn = FLAGS.dropout_cnn config.deep_nhidden = FLAGS.deep_nhidden eeg_active = ((FLAGS.eeg_train_data != "") and (FLAGS.eeg_test_data != "")) eog_active = ((FLAGS.eog_train_data != "") and (FLAGS.eog_test_data != "")) emg_active = ((FLAGS.emg_train_data != "") and (FLAGS.emg_test_data != "")) if (eeg_active): print("eeg active") # Initalize the data generator seperately for the training, validation, and test sets eeg_train_gen = DataGenerator(os.path.abspath(FLAGS.eeg_train_data), data_shape_1=[config.deep_ntime], data_shape_2=[config.seq_frame_seq_len, config.seq_ndim], seq_len=config.epoch_seq_len, shuffle = False) eeg_test_gen = DataGenerator(os.path.abspath(FLAGS.eeg_test_data), data_shape_1=[config.deep_ntime], data_shape_2=[config.seq_frame_seq_len, config.seq_ndim], seq_len=config.epoch_seq_len, shuffle = False) # data normalization for time-frequency here X2 = eeg_train_gen.X2 X2 = np.reshape(X2,(eeg_train_gen.data_size*eeg_train_gen.data_shape_2[0], eeg_train_gen.data_shape_2[1])) meanX = X2.mean(axis=0) stdX = X2.std(axis=0) X2 = (X2 - meanX) / stdX eeg_train_gen.X2 = np.reshape(X2, (eeg_train_gen.data_size, eeg_train_gen.data_shape_2[0], eeg_train_gen.data_shape_2[1])) X2 = eeg_test_gen.X2 X2 = np.reshape(X2,(eeg_test_gen.data_size*eeg_test_gen.data_shape_2[0], eeg_test_gen.data_shape_2[1])) X2 = (X2 - meanX) / stdX eeg_test_gen.X2 = np.reshape(X2, (eeg_test_gen.data_size, eeg_test_gen.data_shape_2[0], eeg_test_gen.data_shape_2[1])) if (eog_active): print("eog active") # Initalize the data generator seperately for the training, validation, and test sets eog_train_gen = DataGenerator(os.path.abspath(FLAGS.eog_train_data), data_shape_1=[config.deep_ntime], data_shape_2=[config.seq_frame_seq_len, config.seq_ndim], seq_len=config.epoch_seq_len, shuffle = False) eog_test_gen = DataGenerator(os.path.abspath(FLAGS.eog_test_data), data_shape_1=[config.deep_ntime], data_shape_2=[config.seq_frame_seq_len, config.seq_ndim], seq_len=config.epoch_seq_len, shuffle = False) # data normalization for time-frequency here X2 = eog_train_gen.X2 X2 = np.reshape(X2,(eog_train_gen.data_size*eog_train_gen.data_shape_2[0], eog_train_gen.data_shape_2[1])) meanX = X2.mean(axis=0) stdX = X2.std(axis=0) X2 = (X2 - meanX) / stdX eog_train_gen.X2 = np.reshape(X2, (eog_train_gen.data_size, eog_train_gen.data_shape_2[0], eog_train_gen.data_shape_2[1])) X2 = eog_test_gen.X2 X2 = np.reshape(X2,(eog_test_gen.data_size*eog_test_gen.data_shape_2[0], eog_test_gen.data_shape_2[1])) X2 = (X2 - meanX) / stdX eog_test_gen.X2 = np.reshape(X2, (eog_test_gen.data_size, eog_test_gen.data_shape_2[0], eog_test_gen.data_shape_2[1])) if (emg_active): print("emg active") # Initalize the data generator seperately for the training, validation, and test sets emg_train_gen = DataGenerator(os.path.abspath(FLAGS.emg_train_data), data_shape_1=[config.deep_ntime], data_shape_2=[config.seq_frame_seq_len, config.seq_ndim], seq_len=config.epoch_seq_len, shuffle = False) emg_test_gen = DataGenerator(os.path.abspath(FLAGS.emg_test_data), data_shape_1=[config.deep_ntime], data_shape_2=[config.seq_frame_seq_len, config.seq_ndim], seq_len=config.epoch_seq_len, shuffle = False) # data normalization here X2 = emg_train_gen.X2 X2 = np.reshape(X2,(emg_train_gen.data_size*emg_train_gen.data_shape_2[0], emg_train_gen.data_shape_2[1])) meanX = X2.mean(axis=0) stdX = X2.std(axis=0) X2 = (X2 - meanX) / stdX emg_train_gen.X2 = np.reshape(X2, (emg_train_gen.data_size, emg_train_gen.data_shape_2[0], emg_train_gen.data_shape_2[1])) X2 = emg_test_gen.X2 X2 = np.reshape(X2,(emg_test_gen.data_size*emg_test_gen.data_shape_2[0], emg_test_gen.data_shape_2[1])) X2 = (X2 - meanX) / stdX emg_test_gen.X2 = np.reshape(X2, (emg_test_gen.data_size, emg_test_gen.data_shape_2[0], emg_test_gen.data_shape_2[1])) # eeg always active train_generator = eeg_train_gen test_generator = eeg_test_gen if (not(eog_active) and not(emg_active)): train_generator.X1 = np.expand_dims(train_generator.X1, axis=-1) # expand channel dimension train_generator.data_shape_1 = train_generator.X1.shape[1:] test_generator.X1 = np.expand_dims(test_generator.X1, axis=-1) # expand channel dimension test_generator.data_shape_1 = test_generator.X1.shape[1:] print(train_generator.X1.shape) train_generator.X2 = np.expand_dims(train_generator.X2, axis=-1) # expand channel dimension train_generator.data_shape_2 = train_generator.X2.shape[1:] test_generator.X2 = np.expand_dims(test_generator.X2, axis=-1) # expand channel dimension test_generator.data_shape_2 = test_generator.X2.shape[1:] print(train_generator.X2.shape) nchannel = 1 if (eog_active and not(emg_active)): print(train_generator.X1.shape) print(eog_train_gen.X1.shape) train_generator.X1 = np.stack((train_generator.X1, eog_train_gen.X1), axis=-1) # merge and make new dimension train_generator.data_shape_1 = train_generator.X1.shape[1:] test_generator.X1 = np.stack((test_generator.X1, eog_test_gen.X1), axis=-1) # merge and make new dimension test_generator.data_shape_1 = test_generator.X1.shape[1:] print(train_generator.X1.shape) print(train_generator.X2.shape) print(eog_train_gen.X2.shape) train_generator.X2 = np.stack((train_generator.X2, eog_train_gen.X2), axis=-1) # merge and make new dimension train_generator.data_shape_2 = train_generator.X2.shape[1:] test_generator.X2 = np.stack((test_generator.X2, eog_test_gen.X2), axis=-1) # merge and make new dimension test_generator.data_shape_2 = test_generator.X2.shape[1:] print(train_generator.X2.shape) nchannel = 2 if (eog_active and emg_active): print(train_generator.X1.shape) print(eog_train_gen.X1.shape) print(emg_train_gen.X1.shape) train_generator.X1 = np.stack((train_generator.X1, eog_train_gen.X1, emg_train_gen.X1), axis=-1) # merge and make new dimension train_generator.data_shape_1 = train_generator.X1.shape[1:] test_generator.X1 = np.stack((test_generator.X1, eog_test_gen.X1, emg_test_gen.X1), axis=-1) # merge and make new dimension test_generator.data_shape_1 = test_generator.X1.shape[1:] print(train_generator.X1.shape) print(train_generator.X2.shape) print(eog_train_gen.X2.shape) print(emg_train_gen.X2.shape) train_generator.X2 = np.stack((train_generator.X2, eog_train_gen.X2, emg_train_gen.X2), axis=-1) # merge and make new dimension train_generator.data_shape_2 = train_generator.X2.shape[1:] test_generator.X2 = np.stack((test_generator.X2, eog_test_gen.X2, emg_test_gen.X2), axis=-1) # merge and make new dimension test_generator.data_shape_2 = test_generator.X2.shape[1:] print(train_generator.X2.shape) nchannel = 3 config.nchannel = nchannel del eeg_train_gen del eeg_test_gen if (eog_active): del eog_train_gen del eog_test_gen if (emg_active): del emg_train_gen del emg_test_gen # shuffle training data here del train_generator test_batches_per_epoch = np.floor(len(test_generator.data_index) / config.batch_size).astype(np.uint32) print("Test set: {:d}".format(test_generator.data_size)) print("/Test batches per epoch: {:d}".format(test_batches_per_epoch)) with tf.Graph().as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0, allow_growth=False) session_conf = tf.ConfigProto( allow_soft_placement=FLAGS.allow_soft_placement, log_device_placement=FLAGS.log_device_placement, gpu_options=gpu_options) sess = tf.Session(config=session_conf) with sess.as_default(): net = XSleepNet(config=config) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): # Define Training procedure global_step = tf.Variable(0, name="global_step", trainable=False) optimizer = tf.train.AdamOptimizer(config.learning_rate) grads_and_vars = optimizer.compute_gradients(net.loss) train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step) out_dir = os.path.abspath(os.path.join(os.path.curdir,FLAGS.out_dir)) print("Writing to {}\n".format(out_dir)) def dev_step(x1_batch, x2_batch, y_batch): seq_frame_seq_len = np.ones(len(x1_batch)*config.epoch_seq_len,dtype=int) * config.seq_frame_seq_len epoch_seq_len = np.ones(len(x1_batch),dtype=int) * config.epoch_seq_len feed_dict = { net.input_x1: x1_batch, net.input_x2: x2_batch, net.input_y: y_batch, net.dropout_rnn: 1.0, net.epoch_seq_len: epoch_seq_len, net.seq_frame_seq_len: seq_frame_seq_len, net.dropout_cnn: 1.0, net.w1 : 1./3, net.w2 : 1./3, net.w3 : 1./3, net.istraining: 0 } output_loss1, output_loss2, output_loss3, output_loss, total_loss, \ deep_yhat, seq_yhat, joint_yhat, yhat, deep_score, seq_score, joint_score, score = sess.run( [net.deep_loss, net.seq_loss, net.joint_loss, net.output_loss, net.loss, net.deep_predictions, net.seq_predictions, net.joint_predictions, net.predictions, net.deep_scores, net.seq_scores, net.joint_scores, net.score], feed_dict) return output_loss1, output_loss2, output_loss3, output_loss, total_loss, \ deep_yhat, seq_yhat, joint_yhat, yhat, \ deep_score, seq_score, joint_score, score def evaluate(gen): # Validate the model on the entire evaluation test set after each epoch output_loss1 =0 output_loss2 =0 output_loss3 =0 output_loss =0 total_loss = 0 deep_yhat = np.zeros([config.epoch_seq_len, len(gen.data_index)]) seq_yhat = np.zeros([config.epoch_seq_len, len(gen.data_index)]) joint_yhat = np.zeros([config.epoch_seq_len, len(gen.data_index)]) yhat = np.zeros([config.epoch_seq_len, len(gen.data_index)]) deep_score = np.zeros([config.epoch_seq_len, len(test_generator.data_index), config.nclass]) seq_score = np.zeros([config.epoch_seq_len, len(test_generator.data_index), config.nclass]) joint_score = np.zeros([config.epoch_seq_len, len(test_generator.data_index), config.nclass]) score = np.zeros([config.epoch_seq_len, len(test_generator.data_index), config.nclass]) factor = 10 num_batch_per_epoch = np.floor(len(gen.data_index) / (factor*config.batch_size)).astype(np.uint32) test_step = 1 while test_step < num_batch_per_epoch: x1_batch, x2_batch, y_batch, label_batch_ = gen.next_batch(factor*config.batch_size) output_loss1_, output_loss2_, output_loss3_, output_loss_, total_loss_, \ deep_yhat_, seq_yhat_, joint_yhat_, yhat_, \ deep_score_, seq_score_, joint_score_, score_ = dev_step(x1_batch, x2_batch, y_batch) output_loss1 += output_loss1_ output_loss2 += output_loss2_ output_loss3 += output_loss3_ output_loss += output_loss_ total_loss += total_loss_ for n in range(config.epoch_seq_len): deep_yhat[n, (test_step-1)*factor*config.batch_size : test_step*factor*config.batch_size] = deep_yhat_[n] seq_yhat[n, (test_step-1)*factor*config.batch_size : test_step*factor*config.batch_size] = seq_yhat_[n] joint_yhat[n, (test_step-1)*factor*config.batch_size : test_step*factor*config.batch_size] = joint_yhat_[n] yhat[n, (test_step-1)*factor*config.batch_size : test_step*factor*config.batch_size] = yhat_[n] deep_score[n, (test_step-1)*factor*config.batch_size : test_step*factor*config.batch_size,:] = deep_score_[n] seq_score[n, (test_step-1)*factor*config.batch_size : test_step*factor*config.batch_size,:] = seq_score_[n] joint_score[n, (test_step-1)*factor*config.batch_size : test_step*factor*config.batch_size,:] = joint_score_[n] score[n, (test_step-1)*factor*config.batch_size : test_step*factor*config.batch_size,:] = score_[n] test_step += 1 if(gen.pointer < len(gen.data_index)): actual_len, x1_batch, x2_batch, y_batch, label_batch_ = gen.rest_batch(config.batch_size) output_loss1_, output_loss2_, output_loss3_, output_loss_, total_loss_, \ deep_yhat_, seq_yhat_, joint_yhat_, yhat_, \ deep_score_, seq_score_, joint_score_, score_ = dev_step(x1_batch, x2_batch, y_batch) output_loss1 += output_loss1_ output_loss2 += output_loss2_ output_loss3 += output_loss3_ output_loss += output_loss_ total_loss += total_loss_ for n in range(config.epoch_seq_len): deep_yhat[n, (test_step-1)*factor*config.batch_size : len(gen.data_index)] = deep_yhat_[n] seq_yhat[n, (test_step-1)*factor*config.batch_size : len(gen.data_index)] = seq_yhat_[n] joint_yhat[n, (test_step-1)*factor*config.batch_size : len(gen.data_index)] = joint_yhat_[n] yhat[n, (test_step-1)*factor*config.batch_size : len(gen.data_index)] = yhat_[n] deep_score[n, (test_step-1)*factor*config.batch_size : len(gen.data_index),:] = deep_score_[n] seq_score[n, (test_step-1)*factor*config.batch_size : len(gen.data_index),:] = seq_score_[n] joint_score[n, (test_step-1)*factor*config.batch_size : len(gen.data_index),:] = joint_score_[n] score[n, (test_step-1)*factor*config.batch_size : len(gen.data_index),:] = score_[n] deep_yhat = deep_yhat + 1 seq_yhat = seq_yhat + 1 joint_yhat = joint_yhat + 1 yhat = yhat + 1 acc1 = 0 acc2 = 0 acc3 = 0 acc = 0 for n in range(config.epoch_seq_len): acc_n = accuracy_score(deep_yhat[n,:], gen.label[gen.data_index - (config.epoch_seq_len - 1) + n]) # due to zero-indexing acc1 += acc_n for n in range(config.epoch_seq_len): acc_n = accuracy_score(seq_yhat[n,:], gen.label[gen.data_index - (config.epoch_seq_len - 1) + n]) # due to zero-indexing acc2 += acc_n for n in range(config.epoch_seq_len): acc_n = accuracy_score(joint_yhat[n,:], gen.label[gen.data_index - (config.epoch_seq_len - 1) + n]) # due to zero-indexing acc3 += acc_n for n in range(config.epoch_seq_len): acc_n = accuracy_score(yhat[n,:], gen.label[gen.data_index - (config.epoch_seq_len - 1) + n]) # due to zero-indexing acc += acc_n acc1 /= config.epoch_seq_len acc2 /= config.epoch_seq_len acc3 /= config.epoch_seq_len acc /= config.epoch_seq_len return acc1, acc2, acc3, acc, \ deep_yhat, seq_yhat, joint_yhat, yhat, \ deep_score, seq_score, joint_score, score, \ output_loss1, output_loss2, output_loss3, output_loss,
#!/usr/bin/python3 # -*- coding:utf-8 -*- # Project: http://plankton-toolbox.org # Copyright (c) 2010-2018 SMHI, Swedish Meteorological and Hydrological Institute # License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit). import sys import pathlib import os.path import glob import locale from PyQt5 import QtGui from PyQt5 import QtWidgets from PyQt5 import QtCore import toolbox_utils import plankton_core import app_framework import app_tools class LoadDatasetsActivity(app_framework.ActivityBase): """ """ def __init__(self, name, parentwidget): """ """ self._last_used_textfile_name = '' self._last_used_excelfile_name = '' # Load available dataset parsers. self._parser_list = [] self._load_available_parsers() # self._lastusedsharkwebfilename = '' self._lastusedphytowinfilename = '' self._lastusedplanktoncounterfilename = '' # Initialize parent (self._create_content will be called). super(LoadDatasetsActivity, self).__init__(name, parentwidget) # Log available parsers when GUI setup has finished. QtCore.QTimer.singleShot(10, self._log_available_parsers) # Update plankton counter datasets. self._counter_update_dataset_list() # Update plankton counter datasets when changes occured. plankton_core.PlanktonCounterManager().planktonCounterListChanged.connect(self._counter_update_dataset_list) def _load_available_parsers(self): """ """ try: plankton_toolbox_data_path = app_framework.ToolboxUserSettings().get_path_to_plankton_toolbox_data() self._parser_path = str(pathlib.Path(plankton_toolbox_data_path, 'parsers')) self._parser_list = [] for parserpath in glob.glob(self._parser_path + '/*.xlsx'): self._parser_list.append(os.path.basename(parserpath)) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e)) def _log_available_parsers(self): """ """ try: if len(self._parser_list) > 0: toolbox_utils.Logging().log('') # Empty line. toolbox_utils.Logging().log('Available dataset parsers (located in "plankton_toolbox_data/parsers"):') for parserpath in self._parser_list: toolbox_utils.Logging().log('- ' + os.path.basename(parserpath)) else: toolbox_utils.Logging().log('No dataset parsers are found in "/plankton_toolbox_data/parsers". ') # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e)) def _create_content(self): """ """ content = self._create_scrollable_content() contentLayout = QtWidgets.QVBoxLayout() content.setLayout(contentLayout) # Add activity name at top. self._activityheader = app_framework.HeaderQLabel() self._activityheader.setText('<h2>' + self.objectName() + '</h2>') self._activityheader.setTextFormat(QtCore.Qt.RichText) self._activityheader.setAlignment(QtCore.Qt.AlignHCenter) contentLayout.addWidget(self._activityheader) # Add content to the activity. contentLayout.addWidget(self._content_load_dataset()) contentLayout.addWidget(self._content_loaded_datasets(), 10) # contentLayout.addStretch(5) def _content_load_dataset(self): """ """ try: # Active widgets and connections. selectdatabox = QtWidgets.QGroupBox('Import datasets/datafiles', self) tabWidget = QtWidgets.QTabWidget() tabWidget.addTab(self._content_predefined_formats(), 'Predefined formats') tabWidget.addTab(self._content_plankton_counter(), 'Plankton counter samples') tabWidget.addTab(self._content_textfile(), 'Parsers - Text file (*.txt)') tabWidget.addTab(self._content_xlsx(), 'Parsers - Excel files (*.xlsx)') # Layout widgets. layout = QtWidgets.QVBoxLayout() layout.addWidget(tabWidget) selectdatabox.setLayout(layout) # return selectdatabox # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e)) # ===== PLANKTON COUNTER DATASETS ====== def _content_plankton_counter(self): """ """ widget = QtWidgets.QWidget() # counter_datasets_listview = QtWidgets.QListView() self._counter_datasets_model = QtGui.QStandardItemModel() counter_datasets_listview.setModel(self._counter_datasets_model) # self._cleara_metadata_button = app_framework.ClickableQLabel('Clear all') self._cleara_metadata_button.label_clicked.connect(self._counter_uncheck_all_datasets) self._markall_button = app_framework.ClickableQLabel('Mark all') self._markall_button.label_clicked.connect(self._counter_check_all_datasets) self._importcounterdataset_button = QtWidgets.QPushButton('Import marked dataset(s)') self._importcounterdataset_button.clicked.connect(self._counter_import_counter_datasets) self._importcounter_trophic_list_checkbox = QtWidgets.QCheckBox('Update trophic types') self._importcounter_trophic_list_checkbox.setChecked(True) # Layout widgets. hbox1 = QtWidgets.QHBoxLayout() hbox1.addWidget(self._cleara_metadata_button) hbox1.addWidget(self._markall_button) # hbox1.addStretch(10) hbox1.addWidget(self._importcounterdataset_button) hbox1.addWidget(self._importcounter_trophic_list_checkbox) hbox1.addStretch(10) # layout = QtWidgets.QVBoxLayout() layout.addWidget(counter_datasets_listview, 10) layout.addLayout(hbox1) widget.setLayout(layout) # return widget def _counter_update_dataset_list(self): """ """ try: self._counter_datasets_model.clear() for datasetname in sorted(plankton_core.PlanktonCounterManager().get_dataset_names()): for samplename in sorted(plankton_core.PlanktonCounterManager().get_sample_names(datasetname)): item = QtGui.QStandardItem(datasetname + ': ' + samplename) item.setCheckState(QtCore.Qt.Unchecked) item.setCheckable(True) self._counter_datasets_model.appendRow(item) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e)) def _counter_check_all_datasets(self): """ """ try: for rowindex in range(self._counter_datasets_model.rowCount()): item = self._counter_datasets_model.item(rowindex, 0) item.setCheckState(QtCore.Qt.Checked) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e)) def _counter_uncheck_all_datasets(self): """ """ try: for rowindex in range(self._counter_datasets_model.rowCount()): item = self._counter_datasets_model.item(rowindex, 0) item.setCheckState(QtCore.Qt.Unchecked) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e)) def _counter_import_counter_datasets(self): """ """ try: # Create a list with selected datasets. selectedsamples = [] for rowindex in range(self._counter_datasets_model.rowCount()): item = self._counter_datasets_model.item(rowindex, 0) if item.checkState() == QtCore.Qt.Checked: selectedsamples.append(str(item.text())) # if len(selectedsamples) == 0: return # try: toolbox_utils.Logging().log('') # Empty line. toolbox_utils.Logging().log('Importing datasets...') toolbox_utils.Logging().start_accumulated_logging() self._write_to_status_bar('Importing datasets...') for datasetandsample in selectedsamples: datasetandsamplepair = datasetandsample.split(':') dataset_name = datasetandsamplepair[0].strip() sample_name = datasetandsamplepair[1].strip() # print('DEBUG: dataset_name: ' + dataset_name) # print('DEBUG: sample_name: ' + sample_name) update_trophic_type = self._importcounter_trophic_list_checkbox.isChecked() datasetnode = plankton_core.DataImportManager().import_dataset_file(dataset_name = dataset_name, sample_name = sample_name, import_format = 'PlanktonCounter', update_trophic_type=update_trophic_type) # Use datasets-wrapper to emit change notification when dataset list is updated. app_framework.ToolboxDatasets().emit_change_notification() # Add metadata related to imported file. datasetnode.add_metadata('parser', '-') datasetnode.add_metadata('file_name', datasetandsample) datasetnode.add_metadata('file_path', '-') datasetnode.add_metadata('import_column', '-') datasetnode.add_metadata('export_column', '-') # except Exception as e: toolbox_utils.Logging().error('Plankton conter file import failed on exception: ' + str(e)) QtWidgets.QMessageBox.warning(self, 'Plankton conter file loading.\n', 'Plankton conter file import failed on exception.\n' + str(e)) raise finally: datasetcount = len(plankton_core.Datasets().get_datasets()) self._write_to_status_bar('Imported datasets: ' + str(datasetcount)) toolbox_utils.Logging().log_all_accumulated_rows() toolbox_utils.Logging().log('Importing datasets done. Number of imported datasets: ' + str(datasetcount)) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e)) # ===== PREDEFINED FORMATS ====== def _content_predefined_formats(self): """ """ widget = QtWidgets.QWidget() # - Select dataset parsers: self._predefined_format_combo = QtWidgets.QComboBox() self._predefined_format_combo.setSizeAdjustPolicy(QtWidgets.QComboBox.AdjustToContents) # self._textfile_parser_list.currentIndexChanged(int)'), self._textfile_parser_selected) # - Add available dataset parsers. # self._predefinedformat_list = ['PTBX Archive Format (*.zip) (Not implemented)', # 'PTBX Archive Format (http://sharkdata.se) (Not implemented)', # 'PTBX Archive Format (http://test.sharkdata.se) (Not implemented)', # 'Darwin Core Archive (Not implemented)', # 'Darwin Core Archive - EurOBIS (Not implemented)', # 'PhytoWin (*.csv)'] self._predefinedformat_list = ['Plankton counter sample(s) (*.xlsx)', 'SHARKweb download(s) (*.txt)', # 'Phytoplankton-archive (*.csv)' ] self._predefined_format_combo.addItems(self._predefinedformat_list) self._predefined_format_combo.setCurrentIndex(0) # 0 = 'Plankton counter sample(s) (*.xlsx)' # Load dataset. self._predefined_getdataset_button = QtWidgets.QPushButton('Import datasets/datafiles...') self._predefined_getdataset_button.clicked.connect(self._import_predefined_datasets) self._predefined_trophic_list_checkbox = QtWidgets.QCheckBox('Update trophic types') self._predefined_trophic_list_checkbox.setChecked(True) # Layout widgets. form1 = QtWidgets.QGridLayout() gridrow = 0 label1 = QtWidgets.QLabel('Format:') stretchlabel = QtWidgets.QLabel('') form1.addWidget(label1, gridrow, 0, 1, 1) form1.addWidget(self._predefined_format_combo, gridrow, 1, 1, 1) form1.addWidget(stretchlabel, gridrow,2, 1, 9) # hbox1 = QtWidgets.QHBoxLayout() # hbox1.addStretch(10) hbox1.addWidget(self._predefined_getdataset_button) hbox1.addWidget(self._predefined_trophic_list_checkbox) hbox1.addStretch(10) # layout = QtWidgets.QVBoxLayout() # layout.addWidget(introlabel) layout.addLayout(form1) layout.addStretch(1) layout.addLayout(hbox1) widget.setLayout(layout) # return widget def _import_predefined_datasets(self): """ """ try: selectedformat = self._predefined_format_combo.currentText() if selectedformat == 'Plankton counter sample(s) (*.xlsx)': self._load_plankton_counter_excel() elif selectedformat == 'SHARKweb download(s) (*.txt)': self._load_sharkweb_datasets() # elif selectedformat == 'Phytoplankton-archive (*.csv)': # self._load_phytowin_datasets() else: QtWidgets.QMessageBox.information(self, "Information", 'Not implemented yet.') # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e)) def _load_plankton_counter_excel(self): """ """ try: try: toolbox_utils.Logging().log('') # Empty line. toolbox_utils.Logging().log('Importing datasets...') toolbox_utils.Logging().start_accumulated_logging() self._write_to_status_bar('Importing datasets...') # Show select file dialog box. Multiple files can be selected. namefilter = 'Plankton counter samples (*.xlsx);;All files (*.*)' filenames, _filters = QtWidgets.QFileDialog.getOpenFileNames( self, 'Load plankton counter sample file(s). ', self._lastusedplanktoncounterfilename, namefilter) # Check if user pressed ok or cancel. if filenames: for filename in filenames: self._lastusedplanktoncounterfilename = filename update_trophic_type = self._predefined_trophic_list_checkbox.isChecked() datasetnode = plankton_core.DataImportManager().import_dataset_file(filename, import_format = 'PlanktonCounterExcel', update_trophic_type=update_trophic_type) # Use datasets-wrapper to emit change notification when dataset list is updated. app_framework.ToolboxDatasets().emit_change_notification() # Add metadata related to imported file. datasetnode.add_metadata('parser', '-') datasetnode.add_metadata('file_name', os.path.basename(filename)) datasetnode.add_metadata('file_path', filename) datasetnode.add_metadata('import_column', '-') datasetnode.add_metadata('export_column', '-') # except Exception as e: toolbox_utils.Logging().error('Plankton counter sample import failed on exception: ' + str(e)) QtWidgets.QMessageBox.warning(self, 'Text file loading.\n', 'Plankton counter sample import failed on exception.\n' + str(e)) raise finally: datasetcount = len(plankton_core.Datasets().get_datasets()) self._write_to_status_bar('Imported datasets: ' + str(datasetcount)) toolbox_utils.Logging().log_all_accumulated_rows() toolbox_utils.Logging().log('Importing datasets done. Number of imported datasets: ' + str(datasetcount)) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception: (' + debug_info + '): ' + str(e)) def _load_sharkweb_datasets(self): """ """ try: try: toolbox_utils.Logging().log('') # Empty line. toolbox_utils.Logging().log('Importing datasets...') toolbox_utils.Logging().start_accumulated_logging() self._write_to_status_bar('Importing datasets...') # Show select file dialog box. Multiple files can be selected. namefilter = 'SHARKweb files (*.txt);;All files (*.*)' filenames, _filters = QtWidgets.QFileDialog.getOpenFileNames( self, 'Load SHARKweb file(s). ', self._lastusedsharkwebfilename, namefilter) # Check if user pressed ok or cancel. if filenames: for filename in filenames: self._lastusedsharkwebfilename = filename update_trophic_type = self._predefined_trophic_list_checkbox.isChecked() datasetnode = plankton_core.DataImportManager().import_dataset_file(filename, import_format = 'SHARKweb', update_trophic_type=update_trophic_type) # Use datasets-wrapper to emit change notification when dataset list is updated. app_framework.ToolboxDatasets().emit_change_notification() # Add metadata related to imported file. datasetnode.add_metadata('parser', '-') datasetnode.add_metadata('file_name', os.path.basename(filename)) datasetnode.add_metadata('file_path', filename) datasetnode.add_metadata('import_column', '-') datasetnode.add_metadata('export_column', '-') # except Exception as e: toolbox_utils.Logging().error('SHARKweb file import failed on exception: ' + str(e)) QtWidgets.QMessageBox.warning(self, 'Text file loading.\n', 'SHARKweb file import failed on exception.\n' + str(e)) raise finally: datasetcount = len(plankton_core.Datasets().get_datasets()) self._write_to_status_bar('Imported datasets: ' + str(datasetcount)) toolbox_utils.Logging().log_all_accumulated_rows() toolbox_utils.Logging().log('Importing datasets done. Number of imported datasets: ' + str(datasetcount)) # except Exception as e: debug_info = self.__class__.__name__ + ', row ' + str(sys._getframe().f_lineno) toolbox_utils.Logging().error('Exception:
cs.add(input()) print(len(cs)) # es30: Set .discard(), .remove() & .pop() def es30(): n = int(input()) s = set(map(int, input().split())) n_op = int(input()) for _ in range(n_op): line = input() if ' ' in line: op, val = line.split() if op == 'remove': s.remove(int(val)) else: s.discard(int(val)) else: s.pop() print(sum(s)) # es31: Set .union() Operation def es31(): # Enter your code here. Read input from STDIN. Print output to STDOUT input() e = set(input().split()) input() f = set(input().split()) print(len(e|f)) # es32: The Captain's Room def es32(): # Enter your code here. Read input from STDIN. Print output to STDOUT k = input() rooms = list(map(int, input().split())) captain = set(rooms) found = set() for r in list(rooms): if r in found: captain.discard(r) else: found.add(r) print(captain.pop()) # es 33: Set Mutations def es33(): # Enter your code here. Read input from STDIN. Print output to STDOUT input() a = set(map(int,input().split())) n_op = int(input()) for _ in range(n_op): op, _ = input().split() b = set(map(int,input().split())) if op == 'intersection_update': a.intersection_update(b) elif op == 'update': a.update(b) elif op == 'symmetric_difference_update': a.symmetric_difference_update(b) else: a.difference_update(b) print(sum(a)) # es34: Set .symmetric_difference() Operation def es34(): # Enter your code here. Read input from STDIN. Print output to STDOUT input() e = set(input().split()) input() f = set(input().split()) print(len(e^f)) # es35: Set .difference() Operation def es35(): # Enter your code here. Read input from STDIN. Print output to STDOUT input() e = set(input().split()) input() f = set(input().split()) print(len(e-f)) # es36: Set .intersection() Operation def es36(): # Enter your code here. Read input from STDIN. Print output to STDOUT input() e = set(input().split()) input() f = set(input().split()) print(len(e&f)) # es37: Collections.namedtuple() def es37(): # Enter your code here. Read input from STDIN. Print output to STDOUT from collections import namedtuple n = int(input()) Studente = namedtuple('Studente', input()) print(sum([float(Studente(*input().split()).MARKS) for _ in range(n)])/n) # es38: DefaultDict Tutorial def es38(): # Enter your code here. Read input from STDIN. Print output to STDOUT from collections import defaultdict n, m = map(int, input().split()) a = defaultdict(set) for i in range(n): w = input() a[w].add(i+1) for _ in range(m): w = input() print(f"{' '.join(map(str, sorted(a[w])))}" if len(a[w])>0 else "-1") # es389: collections.Counter() def es39(): # Enter your code here. Read input from STDIN. Print output to STDOUT from collections import Counter input() warehouse = Counter(map(int, input().split())) c = int(input()) tot = 0 for _ in range(c): size, val = map(int, input().split()) if warehouse[size] > 0: tot += val warehouse[size] -= 1 print(tot) # es40: Check Strict Superset def es40(): # Enter your code here. Read input from STDIN. Print output to STDOUT a = set(map(int, input().split())) n = int(input()) ret = True for _ in range(n): b = set(map(int, input().split())) if len(a)>len(b) and b.intersection(a)==b: continue ret = False break print(ret) # es41: Check Subset def es41(): # Enter your code here. Read input from STDIN. Print output to STDOUT tc = int(input()) for _ in range(tc): input() a = set(map(int, input().split())) input() b = set(map(int, input().split())) print(a.intersection(b)==a) # es42: Company Logo def es42(): import math import os import random import re import sys from collections import Counter if __name__ == '__main__': c = Counter(input()) max_three = [(None, 0), (None, 0), (None, 0)] count = 0 for k,v in sorted(c.items(), key=lambda t: (-1*t[1], ord(t[0]))): print(f"{k} {v}") count += 1 if count >= 3: break # es43: Piling Up! def es43(): # Enter your code here. Read input from STDIN. Print output to STDOUT t = int(input()) for _ in range(t): n = int(input()) blocks = list(map(int, input().split())) m = max(blocks[0], blocks[-1]) out = "Yes" for i in range(1, n-1): if blocks[i] > m: out = "No" break print(out) # es44: Collections.Deque() def es44(): # Enter your code here. Read input from STDIN. Print output to STDOUT from collections import deque n = int(input()) d = deque() for _ in range(n): line = input() if ' ' in line: word, val = line.split() if word=='append': d.append(val) else: d.appendleft(val) else: if line=='pop': d.pop() else: d.popleft() print(' '.join(d)) # es45: Word Order def es45(): # Enter your code here. Read input from STDIN. Print output to STDOUT from collections import OrderedDict words = OrderedDict() n = int(input()) for _ in range(n): w = input() words[w] = words.get(w, 0) + 1 print(f"{len(words)}\n{' '.join(map(str, words.values()))}") # es46: Collections.OrderedDict() def es46(): # Enter your code here. Read input from STDIN. Print output to STDOUT from collections import OrderedDict summary = OrderedDict() for _ in range(int(input())): line = input().split() prod = ' '.join(line[:-1]) price = int(line[-1]) summary[prod] = summary.get(prod, 0) + price for k in summary: print(f"{k} {summary[k]}") # es47: Calendar Module def es47(): # Enter your code here. Read input from STDIN. Print output to STDOUT import calendar mm, dd, yyyy = map(int,input().split()) week = ['MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY'] print(week[calendar.weekday(yyyy, mm, dd)]) # es48: Time Delta def es48(): import math import os import random import re import sys import datetime # Complete the time_delta function below. #Day dd Mon yyyy hh:mm:ss +xxxx def time_delta(t1, t2): dt1 = datetime_object = datetime.datetime.strptime(t1, "%a %d %b %Y %H:%M:%S %z") dt2 = datetime_object = datetime.datetime.strptime(t2, "%a %d %b %Y %H:%M:%S %z").astimezone(dt1.tzinfo) return str(abs(int((dt1-dt2).total_seconds()))) if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') t = int(input()) for t_itr in range(t): t1 = input() t2 = input() delta = time_delta(t1, t2) fptr.write(delta + '\n') fptr.close() # es49: Exceptions def es49(): # Enter your code here. Read input from STDIN. Print output to STDOUT t = int(input()) for _ in range(t): try: a, b = map(int, input().split()) print(a//b) except (ZeroDivisionError, ValueError) as e: print(f"Error Code: {e}") # es50: Zipped! def es50(): # Enter your code here. Read input from STDIN. Print output to STDOUT n, x = map(int, input().split()) marks = [list(map(float,input().split())) for _ in range(x)] zipped = zip(*marks) #print(list(zipped)) avgs = [sum(z)/x for z in zipped] for avg in avgs: print("{:.1f}".format(avg)) # es51: Athlete Sort def es51(): import math import os import random import re import sys if __name__ == '__main__': nm = input().split() n = int(nm[0]) m = int(nm[1]) arr = [] for _ in range(n): arr.append(list(map(int, input().rstrip().split()))) k = int(input()) for l in sorted(arr, key=lambda line: line[k]): print(' '.join(map(str, l))) # es52: ginortS def es52(): # Enter your code here. Read input from STDIN. Print output to STDOUT print(''.join(sorted(sorted(input()), key=lambda c: (c.islower(), c.isalpha(), c.isdigit() and int(c)%2==1), reverse=True))) # es53: Map and Lambda functions def es53(): cube = lambda x: x**3# complete the lambda function fib_base = [0, 1] def fibonacci(n): # return a list of fibonacci numbers if n == 0: return [] if n==1: return [fib_base[0]] if n == 2: return fib_base ret = fib_base[:] for i in range(1, n-1): ret.append(ret[i-1]+ret[i]) return ret if __name__ == '__main__': n = int(input()) print(list(map(cube, fibonacci(n)))) # es54: Birthday Candles def es54(): import math import os import random import re import sys # # Complete the 'birthdayCakeCandles' function below. # # The function is expected to return an INTEGER. # The function accepts INTEGER_ARRAY candles as parameter. # from collections import Counter def birthdayCakeCandles(candles): # Write your code here return Counter(candles)[max(candles)] if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'], 'w') candles_count = int(input().strip()) candles = list(map(int, input().rstrip().split())) result = birthdayCakeCandles(candles) fptr.write(str(result) + '\n') fptr.close() # es55: Detecting floating point numbers def es55(): # Enter your code here. Read input from STDIN. Print output to STDOUT import re for _ in range(int(input())): line = input() rm = re.match(r'(\+|-)?\d*\.\d+$', line) print(bool(rm)) # es56: re.Split() def es56(): regex_pattern = r"\.|," # Do not delete 'r'. import re print("\n".join(re.split(regex_pattern, input()))) # es 57: Group(), Groups() & Groupdict() def es57(): # Enter your code here. Read input from STDIN. Print output to STDOUT import re pattern = r".*?(?P<ch>[A-Za-z0-9])(?P=ch)+" match = re.match(pattern, input()) print(match.groupdict().get('ch', ['-1'])[0] if match is not None else '-1') # es58: Re.findall() & Re.finditer() def es58(): # Enter your code here. Read input from STDIN. Print output to STDOUT import re reg = r"(?<=[^aeiouAEIOU])([aeiouAEIOU][aeiouAEIOU]+)(?=[^aeiouAEIOU])" mathces = re.findall(reg,input()) if len(mathces) == 0: print(-1) else: for m in mathces: print(m) # es59: Re.start() & Re.end() def es59(): # Enter your code here. Read input from STDIN. Print output to STDOUT import
<filename>projects/project2/multiagent/multiAgents.py<gh_stars>0 # multiAgents.py # <NAME> # aderbiqu # CSE571 Fall 2020 # -------------- # Licensing Information: You are free to use or extend these projects for # educational purposes provided that (1) you do not distribute or publish # solutions, (2) you retain this notice, and (3) you provide clear # attribution to UC Berkeley, including a link to http://ai.berkeley.edu. # # Attribution Information: The Pacman AI projects were developed at UC Berkeley. # The core projects and autograders were primarily created by <NAME> # (<EMAIL>) and <NAME> (<EMAIL>). # Student side autograding was added by <NAME>, <NAME>, and # <NAME> (<EMAIL>). from util import manhattanDistance from game import Directions import random, util from game import Agent class ReflexAgent(Agent): """ A reflex agent chooses an action at each choice point by examining its alternatives via a state evaluation function. The code below is provided as a guide. You are welcome to change it in any way you see fit, so long as you don't touch our method headers. """ def getAction(self, gameState): """ You do not need to change this method, but you're welcome to. getAction chooses among the best options according to the evaluation function. Just like in the previous project, getAction takes a GameState and returns some Directions.X for some X in the set {North, South, West, East, Stop} """ # Collect legal moves and successor states legalMoves = gameState.getLegalActions() # Choose one of the best actions scores = [self.evaluationFunction(gameState, action) for action in legalMoves] bestScore = max(scores) bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore] chosenIndex = random.choice(bestIndices) # Pick randomly among the best "Add more of your code here if you want to" return legalMoves[chosenIndex] def evaluationFunction(self, currentGameState, action): """ Evaluation function for pacman reflex agent """ new_gs = currentGameState.generatePacmanSuccessor(action) new_pos = new_gs.getPacmanPosition() # New agent position new_posX, new_posY = new_pos # Broken into coordinates curr_food = currentGameState.getFood() # Where the current food is new_food = new_gs.getFood() # Where the new food is new_ghosts = new_gs.getGhostStates() # New ghost states h,w = len(new_food[0][:]),len(new_food[:]) # dimensions of food grid score_escape_ghost = -1000 # Set weight score_eat_ghost = 1500 # Set weight score_eat_food = 100 # Set weight score_eat_cap = 100 # Set weight if action == "Stop": return -500 # We care more about Pacman being on the move eating dots than staying put. score_ghost = 0.0 # Manage life or death scenarios first for ghost in new_ghosts: # Iterate through ghosts dist = manhattanDistance(new_pos, ghost.getPosition())# Get distance to ghost if ghost.scaredTimer > 0: # Ghost is scared. Time to go chomp chomp if dist == 0: score_ghost += score_eat_ghost # We will eat ghost next turn here. Incentivize! elif dist < 3: score_ghost += score_eat_ghost/2 # Ghost is nearby, eat if possible; make half the effort to catch the ghost else: # Ghost dangerous! spooky if dist < 2: score_ghost += score_escape_ghost # Run away! :o Let's get out of here. Take away points. score_food = 0.0 # Prioritize eating food for x in range(w): # loop through food map width for y in range(h): # loop through food map height if(curr_food[x][y]): # If food dist = manhattanDistance(new_pos, (x,y)) # Check the distance if(dist == 0): score_food += score_eat_food # Looks like we'll eat some points! else: score_food += 1.0/(dist * dist * dist) # Inscentivize pacman to move towards food at diminished rate score_cap = 0.0 # Initialize Capsule Score for cap in currentGameState.getCapsules(): # Look for capsules dist = manhattanDistance(new_pos, cap) # Get distance to capsule if(dist == 0): score_cap += score_eat_cap # Looks like we can get the capsule next turn. Incentivize! else: score_cap += 1.0/dist # Diminishing inscentivizaztiony return score_ghost * 1 + score_food * 1 + score_cap * 1 # Return the calculated score def scoreEvaluationFunction(currentGameState): """ This default evaluation function just returns the score of the state. The score is the same one displayed in the Pacman GUI. This evaluation function is meant for use with adversarial search agents (not reflex agents). """ return currentGameState.getScore() class MultiAgentSearchAgent(Agent): """ This class provides some common elements to all of your multi-agent searchers. Any methods defined here will be available to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent. You *do not* need to make any changes here, but you can if you want to add functionality to all your adversarial search agents. Please do not remove anything, however. Note: this is an abstract class: one that should not be instantiated. It's only partially specified, and designed to be extended. Agent (game.py) is another abstract class. """ def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'): self.index = 0 # Pacman is always agent index 0 self.evaluationFunction = util.lookup(evalFn, globals()) self.depth = int(depth) class MinimaxAgent(MultiAgentSearchAgent): """ Your minimax agent (question 2) """ def minimax(self, gameState, depth, agentIndex): """ Returns the minimax action from the current gameState using self.depth and self.evaluationFunction. """ POS_LARGE = 100000000 # A stupid large positive number NEG_LARGE = POS_LARGE * -1 # A stupid large negative number ba = 'null' # Initialize best action if gameState.isWin() or gameState.isLose(): return self.evaluationFunction(gameState) # Return immediately if game is over if agentIndex == 0: # checks index of agent. is zero bv = NEG_LARGE # Set this to very large neg vue actions = gameState.getLegalActions(agentIndex) # Obtain list of legal actions for state for action in actions: # Iterate through actions n = gameState.generateSuccessor(agentIndex, action) # create successors testing action v = self.minimax(n, depth, agentIndex + 1) # take minimum if (bv < v): # If best value < existing value bv, ba = v, action # Set new best value, action = existing ba, v if (depth == 1): return ba # base case else: # Otherwise bv = POS_LARGE # Initialize bv to large number agents, actions = gameState.getNumAgents(), gameState.getLegalActions(agentIndex) # get agents and actions for action in actions: # Iterate through allowable actions n = gameState.generateSuccessor(agentIndex, action) # Get successor game state node if agentIndex == agents - 1: # Check index if depth == self.depth: v = self.evaluationFunction(n) # If prev, value = evaluationFunction else: v = self.minimax(n, depth+1, 0) # Otherwise, continue down minimax else: v = self.minimax(n, depth, agentIndex+1) # Otherwise, continue to minimax if bv > v: bv, ba = v, action # If best value is better than existing value, set new max return bv # Return best value def getAction(self, gameState): """ Returns the minimax action from the current gameState using self.depth and self.evaluationFunction. """ return self.minimax(gameState, 1, 0) class AlphaBetaAgent(MultiAgentSearchAgent): """ Your minimax agent with alpha-beta pruning (question 3) """ def abpruning(self, gameState, depth, agentIndex,a,b): """ Returns the minimax action from the current gameState using self.depth and self.evaluationFunction. """ POS_LARGE = 100000000 NEG_LARGE = POS_LARGE * -1 ba = 'null' # Initialize best action if gameState.isWin() or gameState.isLose(): return self.evaluationFunction(gameState) # Return immediately if game is over if agentIndex == 0: # checks index of agent. is zero bv = NEG_LARGE # Set this to very large neg vue actions = gameState.getLegalActions(agentIndex) # Obtain list of legal actions for state for action in actions: # Iterate through actions n = gameState.generateSuccessor(agentIndex, action) # create successors testing action v = self.abpruning(n, depth, agentIndex + 1, a,b) # take minimum if v > b: return v # Read Minimax for documentation on tree if v > bv: bv, ba = v, action a = max(bv, a) if (depth == 1): return ba else: bv = POS_LARGE agents, actions = gameState.getNumAgents(), gameState.getLegalActions(agentIndex) for action in actions: n = gameState.generateSuccessor(agentIndex, action) if agentIndex == agents - 1: if depth == self.depth: v = self.evaluationFunction(n) else: v = self.abpruning(n, depth+1, 0,a,b) else: v = self.abpruning(n, depth, agentIndex+1,a,b) if v < a: return v if bv > v: bv, ba = v, action b = min(bv, b) return bv def getAction(self, gameState): """ Returns the minimax action using
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._enums import * __all__ = [ 'ObservabilityConfigurationTag', 'ObservabilityConfigurationTraceConfiguration', 'ServiceAuthenticationConfiguration', 'ServiceCodeConfiguration', 'ServiceCodeConfigurationValues', 'ServiceCodeRepository', 'ServiceEgressConfiguration', 'ServiceEncryptionConfiguration', 'ServiceHealthCheckConfiguration', 'ServiceImageConfiguration', 'ServiceImageRepository', 'ServiceInstanceConfiguration', 'ServiceKeyValuePair', 'ServiceNetworkConfiguration', 'ServiceObservabilityConfiguration', 'ServiceSourceCodeVersion', 'ServiceSourceConfiguration', 'ServiceTag', 'VpcConnectorTag', ] @pulumi.output_type class ObservabilityConfigurationTag(dict): def __init__(__self__, *, key: Optional[str] = None, value: Optional[str] = None): if key is not None: pulumi.set(__self__, "key", key) if value is not None: pulumi.set(__self__, "value", value) @property @pulumi.getter def key(self) -> Optional[str]: return pulumi.get(self, "key") @property @pulumi.getter def value(self) -> Optional[str]: return pulumi.get(self, "value") @pulumi.output_type class ObservabilityConfigurationTraceConfiguration(dict): """ Describes the configuration of the tracing feature within an AWS App Runner observability configuration. """ def __init__(__self__, *, vendor: 'ObservabilityConfigurationTraceConfigurationVendor'): """ Describes the configuration of the tracing feature within an AWS App Runner observability configuration. :param 'ObservabilityConfigurationTraceConfigurationVendor' vendor: The implementation provider chosen for tracing App Runner services. """ pulumi.set(__self__, "vendor", vendor) @property @pulumi.getter def vendor(self) -> 'ObservabilityConfigurationTraceConfigurationVendor': """ The implementation provider chosen for tracing App Runner services. """ return pulumi.get(self, "vendor") @pulumi.output_type class ServiceAuthenticationConfiguration(dict): """ Authentication Configuration """ @staticmethod def __key_warning(key: str): suggest = None if key == "accessRoleArn": suggest = "access_role_arn" elif key == "connectionArn": suggest = "connection_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in ServiceAuthenticationConfiguration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ServiceAuthenticationConfiguration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ServiceAuthenticationConfiguration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, access_role_arn: Optional[str] = None, connection_arn: Optional[str] = None): """ Authentication Configuration :param str access_role_arn: Access Role Arn :param str connection_arn: Connection Arn """ if access_role_arn is not None: pulumi.set(__self__, "access_role_arn", access_role_arn) if connection_arn is not None: pulumi.set(__self__, "connection_arn", connection_arn) @property @pulumi.getter(name="accessRoleArn") def access_role_arn(self) -> Optional[str]: """ Access Role Arn """ return pulumi.get(self, "access_role_arn") @property @pulumi.getter(name="connectionArn") def connection_arn(self) -> Optional[str]: """ Connection Arn """ return pulumi.get(self, "connection_arn") @pulumi.output_type class ServiceCodeConfiguration(dict): """ Code Configuration """ @staticmethod def __key_warning(key: str): suggest = None if key == "configurationSource": suggest = "configuration_source" elif key == "codeConfigurationValues": suggest = "code_configuration_values" if suggest: pulumi.log.warn(f"Key '{key}' not found in ServiceCodeConfiguration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ServiceCodeConfiguration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ServiceCodeConfiguration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, configuration_source: 'ServiceCodeConfigurationConfigurationSource', code_configuration_values: Optional['outputs.ServiceCodeConfigurationValues'] = None): """ Code Configuration :param 'ServiceCodeConfigurationConfigurationSource' configuration_source: Configuration Source """ pulumi.set(__self__, "configuration_source", configuration_source) if code_configuration_values is not None: pulumi.set(__self__, "code_configuration_values", code_configuration_values) @property @pulumi.getter(name="configurationSource") def configuration_source(self) -> 'ServiceCodeConfigurationConfigurationSource': """ Configuration Source """ return pulumi.get(self, "configuration_source") @property @pulumi.getter(name="codeConfigurationValues") def code_configuration_values(self) -> Optional['outputs.ServiceCodeConfigurationValues']: return pulumi.get(self, "code_configuration_values") @pulumi.output_type class ServiceCodeConfigurationValues(dict): """ Code Configuration Values """ @staticmethod def __key_warning(key: str): suggest = None if key == "buildCommand": suggest = "build_command" elif key == "runtimeEnvironmentVariables": suggest = "runtime_environment_variables" elif key == "startCommand": suggest = "start_command" if suggest: pulumi.log.warn(f"Key '{key}' not found in ServiceCodeConfigurationValues. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ServiceCodeConfigurationValues.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ServiceCodeConfigurationValues.__key_warning(key) return super().get(key, default) def __init__(__self__, *, runtime: 'ServiceCodeConfigurationValuesRuntime', build_command: Optional[str] = None, port: Optional[str] = None, runtime_environment_variables: Optional[Sequence['outputs.ServiceKeyValuePair']] = None, start_command: Optional[str] = None): """ Code Configuration Values :param 'ServiceCodeConfigurationValuesRuntime' runtime: Runtime :param str build_command: Build Command :param str port: Port :param str start_command: Start Command """ pulumi.set(__self__, "runtime", runtime) if build_command is not None: pulumi.set(__self__, "build_command", build_command) if port is not None: pulumi.set(__self__, "port", port) if runtime_environment_variables is not None: pulumi.set(__self__, "runtime_environment_variables", runtime_environment_variables) if start_command is not None: pulumi.set(__self__, "start_command", start_command) @property @pulumi.getter def runtime(self) -> 'ServiceCodeConfigurationValuesRuntime': """ Runtime """ return pulumi.get(self, "runtime") @property @pulumi.getter(name="buildCommand") def build_command(self) -> Optional[str]: """ Build Command """ return pulumi.get(self, "build_command") @property @pulumi.getter def port(self) -> Optional[str]: """ Port """ return pulumi.get(self, "port") @property @pulumi.getter(name="runtimeEnvironmentVariables") def runtime_environment_variables(self) -> Optional[Sequence['outputs.ServiceKeyValuePair']]: return pulumi.get(self, "runtime_environment_variables") @property @pulumi.getter(name="startCommand") def start_command(self) -> Optional[str]: """ Start Command """ return pulumi.get(self, "start_command") @pulumi.output_type class ServiceCodeRepository(dict): """ Source Code Repository """ @staticmethod def __key_warning(key: str): suggest = None if key == "repositoryUrl": suggest = "repository_url" elif key == "sourceCodeVersion": suggest = "source_code_version" elif key == "codeConfiguration": suggest = "code_configuration" if suggest: pulumi.log.warn(f"Key '{key}' not found in ServiceCodeRepository. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ServiceCodeRepository.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ServiceCodeRepository.__key_warning(key) return super().get(key, default) def __init__(__self__, *, repository_url: str, source_code_version: 'outputs.ServiceSourceCodeVersion', code_configuration: Optional['outputs.ServiceCodeConfiguration'] = None): """ Source Code Repository :param str repository_url: Repository Url """ pulumi.set(__self__, "repository_url", repository_url) pulumi.set(__self__, "source_code_version", source_code_version) if code_configuration is not None: pulumi.set(__self__, "code_configuration", code_configuration) @property @pulumi.getter(name="repositoryUrl") def repository_url(self) -> str: """ Repository Url """ return pulumi.get(self, "repository_url") @property @pulumi.getter(name="sourceCodeVersion") def source_code_version(self) -> 'outputs.ServiceSourceCodeVersion': return pulumi.get(self, "source_code_version") @property @pulumi.getter(name="codeConfiguration") def code_configuration(self) -> Optional['outputs.ServiceCodeConfiguration']: return pulumi.get(self, "code_configuration") @pulumi.output_type class ServiceEgressConfiguration(dict): """ Network egress configuration """ @staticmethod def __key_warning(key: str): suggest = None if key == "egressType": suggest = "egress_type" elif key == "vpcConnectorArn": suggest = "vpc_connector_arn" if suggest: pulumi.log.warn(f"Key '{key}' not found in ServiceEgressConfiguration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ServiceEgressConfiguration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ServiceEgressConfiguration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, egress_type: 'ServiceEgressConfigurationEgressType', vpc_connector_arn: Optional[str] = None): """ Network egress configuration :param 'ServiceEgressConfigurationEgressType' egress_type: Network egress type. :param str vpc_connector_arn: The Amazon Resource Name (ARN) of the App Runner VpcConnector. """ pulumi.set(__self__, "egress_type", egress_type) if vpc_connector_arn is not None: pulumi.set(__self__, "vpc_connector_arn", vpc_connector_arn) @property @pulumi.getter(name="egressType") def egress_type(self) -> 'ServiceEgressConfigurationEgressType': """ Network egress type. """ return pulumi.get(self, "egress_type") @property @pulumi.getter(name="vpcConnectorArn") def vpc_connector_arn(self) -> Optional[str]: """ The Amazon Resource Name (ARN) of the App Runner VpcConnector. """ return pulumi.get(self, "vpc_connector_arn") @pulumi.output_type class ServiceEncryptionConfiguration(dict): """ Encryption configuration (KMS key) """ @staticmethod def __key_warning(key: str): suggest = None if key == "kmsKey": suggest = "kms_key" if suggest: pulumi.log.warn(f"Key '{key}' not found in ServiceEncryptionConfiguration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ServiceEncryptionConfiguration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ServiceEncryptionConfiguration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, kms_key: str): """ Encryption configuration (KMS key) :param str kms_key: The KMS Key """ pulumi.set(__self__, "kms_key", kms_key) @property @pulumi.getter(name="kmsKey") def kms_key(self) -> str: """ The KMS Key """ return pulumi.get(self, "kms_key") @pulumi.output_type class ServiceHealthCheckConfiguration(dict): """ Health check configuration """ @staticmethod def __key_warning(key: str): suggest = None if key == "healthyThreshold": suggest = "healthy_threshold" elif key == "unhealthyThreshold": suggest = "unhealthy_threshold" if suggest: pulumi.log.warn(f"Key '{key}' not found in ServiceHealthCheckConfiguration. Access the value via the '{suggest}' property getter instead.") def __getitem__(self, key: str) -> Any: ServiceHealthCheckConfiguration.__key_warning(key) return super().__getitem__(key) def get(self, key: str, default = None) -> Any: ServiceHealthCheckConfiguration.__key_warning(key) return super().get(key, default) def __init__(__self__, *, healthy_threshold: Optional[int] = None, interval: Optional[int] = None, path: Optional[str] = None, protocol: Optional['ServiceHealthCheckConfigurationProtocol'] = None, timeout: Optional[int] = None, unhealthy_threshold: Optional[int] = None): """ Health check configuration :param int healthy_threshold: Health check Healthy Threshold :param int interval: Health check Interval :param str path: Health check Path :param 'ServiceHealthCheckConfigurationProtocol' protocol: Health Check Protocol :param int timeout: Health check Timeout :param int unhealthy_threshold: Health check Unhealthy Threshold """ if healthy_threshold is not None: pulumi.set(__self__, "healthy_threshold", healthy_threshold) if interval is not None: pulumi.set(__self__, "interval", interval) if path is not None: pulumi.set(__self__, "path", path) if protocol is not None: pulumi.set(__self__, "protocol", protocol) if timeout is not None: pulumi.set(__self__, "timeout", timeout) if unhealthy_threshold is not None: pulumi.set(__self__, "unhealthy_threshold", unhealthy_threshold) @property @pulumi.getter(name="healthyThreshold") def healthy_threshold(self) -> Optional[int]: """ Health check Healthy Threshold """ return pulumi.get(self, "healthy_threshold") @property @pulumi.getter def interval(self) -> Optional[int]: """ Health check Interval """ return pulumi.get(self, "interval") @property @pulumi.getter def path(self) -> Optional[str]: """ Health check Path """ return pulumi.get(self, "path") @property @pulumi.getter def protocol(self) -> Optional['ServiceHealthCheckConfigurationProtocol']: """ Health Check Protocol """ return pulumi.get(self, "protocol") @property @pulumi.getter def timeout(self) -> Optional[int]: """ Health check Timeout
<gh_stars>10-100 # TALON: Techonology-Agnostic Long Read Analysis Pipeline # Author: <NAME> #------------------------------------------------------------------------------ import edge as Edge import edgetree as EdgeTree import sam_transcript as SamTranscript import transcript as Transcript import pdb class MatchTracker(object): """ Stores information to track full and partial matches to a query transcript. Attributes: n_edges: Number of edges i the query transcript. Full matches must contain exactly this number of edges. edge_matches: List of sets. The set at index i corresponds to the annotation edge IDs that matched to edge[i] transcript_matches: List of sets. The set at index i corresponds to the annotation transcript IDs that matched to edge_matches[i] full_matches: collection of transcript matches that satisfy the conditions to fully match the query (i.e. all edges match) partial_matches: collection of transcript IDs that were partial matches to the query transcript (i.e. some edges match) """ def __init__(self, query_transcript): self.query_transcript = query_transcript self.n_edges = len(query_transcript.exons) + len(query_transcript.introns) self.edge_matches = [[] for i in range(self.n_edges)] self.transcript_matches = [] self.full_matches = [] self.partial_matches = [] def match_all_edges(self, exon_tree, intron_tree): """ Iterates over provided edges and finds edge matches for each of them where possible. These matches are stored in the Tracker object. As we go, keep track of which transcripts could match each edge as well. """ query_transcript = self.query_transcript chromosome = query_transcript.chromosome strand = query_transcript.strand all_edges = query_transcript.get_all_edges() n_edges = len(all_edges) for i in range(0, n_edges): if i % 2 == 0: # exon edge_tree = exon_tree else: edge_tree = intron_tree q_edge = all_edges[i] cutoff_5, cutoff_3 = set_cutoffs_permissiveEnds(i, n_edges, strand) matches = get_edge_matches(q_edge, edge_tree, cutoff_5, cutoff_3) # Iterate over matching edges we found and pull transcript ID set # associated with each to add to the transcript match set transcript_matches = set() for match in matches: match_edge_id = match.obj_id transcript_ids = edge_tree.edges[match_edge_id].transcript_ids transcript_matches = transcript_matches.union(transcript_ids) self.edge_matches[i] = matches self.transcript_matches.append(transcript_matches) return def compute_match_sets(self, transcript_dict): """ Use the edge_matches field of the MatchTracker to figure out which transcript matches are full matches and which are partial. The transcript_dict is needed right now to check how many edges the match transcript has. """ if len(self.transcript_matches) != self.n_edges: raise ValueError('Cannot run compute_match_sets until all ' + \ ' query edges have been processed.') tmp_full_matches = set.intersection(*(self.transcript_matches)) full_matches = set() # Screen the full matches to make sure they have the correct number of # edges for match in tmp_full_matches: #if match not in transcript_dict: n_edges_match = len(transcript_dict[match].get_all_edges()) if self.n_edges == n_edges_match: full_matches.add(match) partial_matches = set.union(*(self.transcript_matches))^full_matches self.full_matches = list(full_matches) self.partial_matches = list(partial_matches) return def get_best_edge_matches(self): """ Iterates over each each edge and compares the edge matches in order to find the best one for each. This is done by computing the differences at the 3' and 5' ends. It isn't necessary to use different rules by edge context at this point because the edge matches were already selected under those rules. Logic: 1) If diff_3 = diff_5 = 0, that is the best. 2) Next best is diff_3 = 0. 3) Next best is diff_5 = 0. 4) After that, just minimize tot_diff This function returns a list, with each element consisting of an edge object (the best match), or None if there was no match. """ best_matches = [] for i in range(0,self.n_edges): curr_matches = self.edge_matches[i] if len(curr_matches) == 0: best_matches.append(None) continue best_match = None best_diff_3 = 1000000 best_diff_5 = 1000000 best_tot_diff = 1000000 match_tuples = [] for match in curr_matches: diff_5 = match.diff_5 diff_3 = match.diff_3 tot_diff = abs(diff_3) + abs(diff_5) match_diffs = (match.obj_id, diff_5, diff_3, tot_diff) match_tuples.append(match_diffs) # First, sort by 3' dist and secondarily, by 5' dist match_tuples.sort(key=lambda x: (abs(x[2]), abs(x[1]))) if match_tuples[0][2] == 0: best_match = match_tuples[0][0] best_matches.append(best_match) continue # Try sorting by 5' first and 3' second match_tuples.sort(key=lambda x: (abs(x[1]), abs(x[2]))) if match_tuples[0][1] == 0: best_match = match_tuples[0][0] best_matches.append(best_match) continue # That failing, sort by tot_dist match_tuples.sort(key=lambda x: abs(x[3])) best_match = match_tuples[0][0] best_matches.append(best_match) return best_matches def get_best_full_match(self, transcripts): """ Iterates over the full matches in the tracker and determines which one is the best fit to the query transcript. This is done by computing the differences at the 3' and 5' ends for each. Logic: 1) If diff_3 = diff_5 = 0, that is the best. 2) Next best is diff_3 = 0. 3) Next best is diff_5 = 0. 4) After that, just minimize tot_diff If there are no full matches, it returns None. Args: transcripts: dictionary mapping transcript_id -> transcript object. Necessary in order to get from the transcript ids stored in the match tracker to the objects themselves. """ query = self.query_transcript if len(self.full_matches) == 0: return None, ["NA", "NA"] best_match = None best_diff_3 = 1000000 best_diff_5 = 1000000 best_tot_diff = 1000000 query_pos = [query.start, query.end] strand = query.strand for match_id in self.full_matches: match = transcripts[match_id] match_pos = [match.start, match.end] diff_5, diff_3 = get_difference(query_pos, match_pos, strand) tot_diff = abs(diff_3) + abs(diff_5) if diff_5 == diff_3 == 0: return match, [diff_5, diff_3] elif diff_3 == 0: best_match = match best_diff_3 = diff_3 best_diff_5 = diff_5 best_tot_diff = tot_diff elif diff_5 == 0: if best_diff_3 != 0: best_match = match best_diff_3 = diff_3 best_diff_5 = diff_5 best_tot_diff = tot_diff elif tot_diff < best_tot_diff: best_match = match best_diff_3 = diff_3 best_diff_5 = diff_5 best_tot_diff = tot_diff return best_match, [best_diff_5, best_diff_3] def get_best_partial_match(self, transcripts): """ Iterates over the partial matches in the tracker and determines which one is the best fit to the query transcript. This is done by first computing the number of matching edges, and using differences at the 3' and 5' ends as a tiebreaker. The purpose of selecting a partial match is mainly to choose the best gene match for the query so that we know which gene to assign the novel transcript to. It is not intended to be the end all be all of transcript similarity since it doesn't consider the configuration of the edge matches. Best match: max number of matching edges Tiebreaker Logic: 1) If diff_3 = diff_5 = 0, that is the best. 2) Next best is diff_3 = 0. 3) Next best is diff_5 = 0. 4) After that, just minimize tot_diff If there are no partial matches, it returns None. Args: transcripts: dictionary mapping transcript_id -> transcript object. Necessary in order to get from the transcript ids stored in the match tracker to the objects themselves. """ query = self.query_transcript if len(self.partial_matches) == 0: return None best_match = None best_diff_3 = 1000000 best_diff_5 = 1000000 best_tot_diff = 1000000 best_n_edges = 0 query_pos = [query.start, query.end] strand = query.strand for match_id in self.partial_matches: # Count the number of edges that match between query & partial match match = transcripts[match_id] match_pos = [match.start, match.end] diff_5, diff_3 = get_difference(query_pos, match_pos, strand) tot_diff = abs(diff_3) + abs(diff_5) shared_edges = 0 for i in range(0,self.n_edges): if match_id in self.transcript_matches[i]: shared_edges += 1 if shared_edges > best_n_edges: best_match = match best_n_edges = shared_edges best_diff_3 = diff_3 best_diff_5 = diff_5 elif shared_edges == best_n_edges: # Apply tiebreaker rules tiebreakers = [ shared_edges > best_n_edges, (diff_5 == diff_3 == 0), (diff_3 == 0 and best_diff_3 != 0), (diff_5 == 0 and best_diff_5 != 0 and best_diff_3 != 0), (best_diff_3 != 0 and best_diff_5 != 0 and tot_diff < best_tot_diff) ] if any(tiebreakers): best_match = match best_n_edges = shared_edges best_diff_3 = diff_3 best_diff_5 = diff_5 return best_match class Match(object): """ Describes the relationship of a query interval to an annotated object such as an edge or transcript. Attributes: chromosome: Chromosome of query and match start: start position of query end: end position of query strand: strand of query and match obj_id: identifier associated with the annotated object diff_5: 5' end difference between
SlabEx("CRC Error in PC to Board link") if response != ACK: raise SlabEx("Unknown Board Response") ''' Start command Parameters: code : Code of command ''' def startCommand(code): startTx() startRx() sendByte(ord(code)) ''' Check Magic Check magic code in an opened serial connection returns 1 if board is correct or 0 otherwise ''' def checkMagic(): global ser # First we flush ser.flushInput() startCommand('M') # Magic request sendCRC() # End of command # Check that it responds if not Linux if not linux: time.sleep(0.1) if ser.inWaiting() < 5: return 0 read = getByte() if read != ACK: return 0 # Check all magic bytes for char in magic: # Obtain the byte value of received character read = getByte() # Exit if the magic does not match if read != char: return 0 # Check CRC checkCRC() # If we arrive here, magic is good return 1 ''' Open a serial connection with the given port Includes the Linux especific operations ''' def openSerial(com_port): global ser ser = serial.Serial(port=com_port,baudrate=BAUD_RATE) # Settings required for Linux in the Nucleo boards if linux: ser.setDTR(False) ser.setRTS(True) ''' Detect and open COM port Only returns if the board is detected ''' def detectCom(): global ser global com_port # Check if there is a saved last port try: with open(fprefix + LAST_COM_FILE,'rb') as f: com_port = pickle.load(f) except: pass else: try: message(1,"Trying last valid port " + str(com_port)) openSerial(com_port) except: pass else: if checkMagic(): message(1,"Board detected") return message(1,"Last used port is not valid") message(1,"Searching for port") # Get a list of ports to tests if sys.platform.startswith('win'): ports = ['COM%s' % (i + 1) for i in range(256)] elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'): # this excludes your current terminal "/dev/tty" ports = glob.glob('/dev/tty[A-Za-z]*') elif sys.platform.startswith('darwin'): ports = glob.glob('/dev/tty.*') else: raise SlabEx('Platform not supported in autodetect') # Test each port for p in ports: try: message(2,"Testing port " +str(p)) # Port to try openSerial(p) if checkMagic(): message(1,"Board detected at port " + str(p)) com_port = p return ser.close() except (OSError, serial.SerialException): pass raise SlabEx('COM Autodetect Fail') #################### PRIVATE FUNCTIONS ########################### ''' Convert a U16 value in float Truncates to the limits Parameter: value : U16 value between 0 an 65535 Returns float between 0.0 and 9.9998 ''' def u16toFloat(value): if value < 0: value = 0 if value > 65535: value = 65535 return value / 65536.0 ''' Convert a float value to U16 Truncates in the limits Parameter: value : Float value between 0.0 and 9.9998 Returns u16 between ''' def floatToU16(fvalue): value = int(fvalue*65536.0) if value < 0: value = 0 if value > 65535: value = 65535 return value ''' Send a message to screen Parameters: level : 1 Normal for verbose 2 2 Extensive for verbose 3 ''' def message(level,message): # Check level errors if level < 1 or level > 2: raise SlabEx("Internal Error : Bad message level") # Print message depending on level if level < verbose: print(message) ''' Calibrates one reading 0.0...1.0 Returns real ratiometric from read ADC ratiometric If data is outside of the calibration table, it returns the data without any calibration. Parameters: input : Float value to calibrate in table2 domain list1 : List of correct values list2 : List of incorrect values Returns the calibrated value ''' def dc_cal(input,list1,list2): if len(list1) < 2: return input prevx = -1.0 # Lower limit for x (calibrated) prevy = -1.0 # Lower limit for y (uncalibrated) for x,y in zip(list1,list2): if input <= y: # Locate upper limit if prevx == -1: # If out of table... return input # ...don't calibrate # Calbrate the value alpha = (input - prevy)/(y - prevy) value = prevx + alpha*(x - prevx) return value else: prevx = x # New lower limit prevy = y # Don't calibrate if we are out of the table return input ''' Get firmware string This command don't use CRC ''' def getFirmwareString(): startCommand('F') cad = "" # Check that it responds if not Linux if not linux: time.sleep(0.2); nchar = ser.inWaiting() if nchar < 1 : return "Unknown" for i in range(0,nchar): car = ser.read() if (not car == '\n') and (not car == '\r'): if PY3: cad = cad + car.decode("utf-8") else: cad = cad + str(car) return cad else: car = ser.read() while not car == '\n': cad = cad + str(car) car = ser.read() ser.read() # Flush '\r' return cad ''' Read one pin name ''' def readPinName(): name = "" while True: car = chr(getByte()) if car == '$': raise SlabEx("Unexpected end of pin list") if car == '|': return name name = name + str(car) ''' Identifies the connected board ''' def getBoardData(): global board_name global ndacs,nadcs,buff_size,max_sample,min_sample,vdd global dacPinList,adcPinList,dioPinList global maxSFfresponse global vref,dac_bits,adc_bits global ndio print("Getting board data") # Get firmware string board_name = getFirmwareString() message(1,"Connected to " + board_name) startCommand('I') # Get board capabilities sendCRC() # End of command # Check response time.sleep(0.1) flush = 0 if not ser.inWaiting() == 25: # Data size + 2 (ACK and CRC) message(1,"Unexpected Board Data") flush = 1 # Check ACK checkACK() # Get data ndacs = getByte() nadcs = getByte() buff_size = getU16() max_sample = getFloat() min_sample = getFloat() vdd = getFloat() maxSFfresponse = getFloat() vref = getFloat() dac_bits = getByte() adc_bits = getByte() ndio = getByte() rState = getByte() if flush: # Flush buffer ser.flushInput() else: checkCRC() if rState: message(1,"Board at reset state") else: message(1,"Board out of reset state") # Get pin list dacPinList=[] adcPinList=[] dioPinList=[] startCommand('L') sendCRC() # End of command checkACK() for i in range(0,ndacs): dacPinList.append(readPinName()) for i in range(0,nadcs): adcPinList.append(readPinName()) for i in range(0,ndio): dioPinList.append(readPinName()) # Get the final $ car = chr(getByte()) if car != '$': raise SlabEx("[P] Bad termination in pin list") checkCRC() # Flush buffer ser.flushInput() ''' Ratiometric read of one analog ADC channel Does not perform any calibration Parameters: n :Channel can be a number 1,2,3,4 Returns the ratiometric reading between 0.0 and 1.0 ''' def readChannel(n): if not opened: raise SlabEx("Not connected to board") if n > nadcs: raise SlabEx("Invalid ADC number") acum = 0.0 ''' # This code has been moved to the hardware board for i in range(0,dcroundings): startCommand('A') sendByte(n); sendCRC() # End of command checkACK() value = getU16() checkCRC() fvalue = u16toFloat(value) acum = acum + fvalue fvalue = acum / dcroundings ''' startCommand('A') sendByte(n); sendCRC() # End of command checkACK() value = getU16() checkCRC() fvalue = u16toFloat(value) return fvalue ''' Ratiometric write of one analog DAC channel Does not perform any calibration Parameters: n : DAC to set 1,2 (or 3 if three DACs) value : Value to set 0.0 to 1.0 ''' def writeChannel(n,value): if not opened: raise SlabEx("Not connected to board") if n > ndacs: raise SlabEx("Invalid DAC number") data = ratio2counts(value) startCommand('D') sendByte(n) sendU16(data) sendCRC() checkACK() checkCRC() ''' Convert ratiometric level to counts Generate exception if out of range Parameter: ratio : Value between 0.0 and 1.0 Returns uint 16 ''' def ratio2counts(ratio): if ratio < -0.001: raise SlabEx("Ratiometric value cannot be below 0.0") if ratio > 1.001: raise SlabEx("Ratiometric value cannot be above 1.0") data = floatToU16(ratio) return data ''' Convert voltage value to ratiometric value ''' def voltage2ratio(value): if value < -0.001: raise SlabEx("Voltage value cannot be below 0 V") if value > vref*1.001: raise SlabEx("Voltage value cannot be above Vref") return value/vref; ''' Convert voltage value to counts ''' def voltage2counts(value): return ratio2counts(voltage2ratio(value)) ''' Message when SciPy is not loaded and we cannot plot ''' def cannotPlot(exception=False): if not exception: message(1,"") message(1,"SciPy not loaded. Cannot plot") message(1,"") else: raise SlabEx("SciPy not loaded. Cannot plot") ''' Generate an exception if scipy is not loaded ''' def checkSciPy(): if not scipy: raise SlabEx("SciPy not loaded. Cannot execute") #################### HELP CODE ########################### ''' @help@ help(topic) Gives help information Optional parameters: topic
cons2, cons3, cons50, cons127, cons64, ) rule6656 = ReplacementRule(pattern6656, replacement6656) pattern6657 = Pattern( Integral( (x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1)) * acoth(S(1) / tan(x_ * WC("b", S(1)) + WC("a", S(0)))), x_, ), cons2, cons3, cons50, cons127, cons64, ) rule6657 = ReplacementRule(pattern6657, replacement6657) pattern6658 = Pattern( Integral( atanh( WC("c", S(0)) + WC("d", S(1)) * tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons1947, ) rule6658 = ReplacementRule(pattern6658, replacement6658) pattern6659 = Pattern( Integral( acoth( WC("c", S(0)) + WC("d", S(1)) * tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons1947, ) rule6659 = ReplacementRule(pattern6659, replacement6659) pattern6660 = Pattern( Integral( atanh( WC("c", S(0)) + WC("d", S(1)) / tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons1948, ) rule6660 = ReplacementRule(pattern6660, replacement6660) pattern6661 = Pattern( Integral( acoth( WC("c", S(0)) + WC("d", S(1)) / tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons1948, ) rule6661 = ReplacementRule(pattern6661, replacement6661) pattern6662 = Pattern( Integral( atanh( WC("c", S(0)) + WC("d", S(1)) * tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons1949, ) rule6662 = ReplacementRule(pattern6662, replacement6662) pattern6663 = Pattern( Integral( acoth( WC("c", S(0)) + WC("d", S(1)) * tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons1949, ) rule6663 = ReplacementRule(pattern6663, replacement6663) pattern6664 = Pattern( Integral( atanh( WC("c", S(0)) + WC("d", S(1)) / tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons1950, ) rule6664 = ReplacementRule(pattern6664, replacement6664) pattern6665 = Pattern( Integral( acoth( WC("c", S(0)) + WC("d", S(1)) / tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons1950, ) rule6665 = ReplacementRule(pattern6665, replacement6665) pattern6666 = Pattern( Integral( (x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1)) * atanh( WC("c", S(0)) + WC("d", S(1)) * tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons50, cons127, cons64, cons1947, ) rule6666 = ReplacementRule(pattern6666, replacement6666) pattern6667 = Pattern( Integral( (x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1)) * acoth( WC("c", S(0)) + WC("d", S(1)) * tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons50, cons127, cons64, cons1947, ) rule6667 = ReplacementRule(pattern6667, replacement6667) pattern6668 = Pattern( Integral( (x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1)) * atanh( WC("c", S(0)) + WC("d", S(1)) / tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons50, cons127, cons64, cons1948, ) rule6668 = ReplacementRule(pattern6668, replacement6668) pattern6669 = Pattern( Integral( (x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1)) * acoth( WC("c", S(0)) + WC("d", S(1)) / tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons50, cons127, cons64, cons1948, ) rule6669 = ReplacementRule(pattern6669, replacement6669) pattern6670 = Pattern( Integral( (x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1)) * atanh( WC("c", S(0)) + WC("d", S(1)) * tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons50, cons127, cons64, cons1949, ) rule6670 = ReplacementRule(pattern6670, replacement6670) pattern6671 = Pattern( Integral( (x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1)) * acoth( WC("c", S(0)) + WC("d", S(1)) * tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons50, cons127, cons64, cons1949, ) rule6671 = ReplacementRule(pattern6671, replacement6671) pattern6672 = Pattern( Integral( (x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1)) * atanh( WC("c", S(0)) + WC("d", S(1)) / tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons50, cons127, cons64, cons1950, ) rule6672 = ReplacementRule(pattern6672, replacement6672) pattern6673 = Pattern( Integral( (x_ * WC("f", S(1)) + WC("e", S(0))) ** WC("m", S(1)) * acoth( WC("c", S(0)) + WC("d", S(1)) / tan(x_ * WC("b", S(1)) + WC("a", S(0))) ), x_, ), cons2, cons3, cons8, cons29, cons50, cons127, cons64, cons1950, ) rule6673 = ReplacementRule(pattern6673, replacement6673) pattern6674 = Pattern(Integral(atanh(u_), x_), cons1232) rule6674 = ReplacementRule(pattern6674, replacement6674) pattern6675 = Pattern(Integral(acoth(u_), x_), cons1232) rule6675 = ReplacementRule(pattern6675, replacement6675) pattern6676 = Pattern( Integral( (x_ * WC("d", S(1)) + WC("c", S(0))) ** WC("m", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * atanh(u_)), x_, ), cons2, cons3, cons8, cons29, cons19, cons68, cons1232, cons1772, cons1849, ) rule6676 = ReplacementRule(pattern6676, replacement6676) pattern6677 = Pattern( Integral( (x_ * WC("d", S(1)) + WC("c", S(0))) ** WC("m", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * acoth(u_)), x_, ), cons2, cons3, cons8, cons29, cons19, cons68, cons1232, cons1772, cons1849, ) rule6677 = ReplacementRule(pattern6677, replacement6677) pattern6678 = Pattern( Integral(v_ * (WC("a", S(0)) + WC("b", S(1)) * atanh(u_)), x_), cons2, cons3, cons1232, cons1951, cons1952, CustomConstraint(With6678), ) rule6678 = ReplacementRule(pattern6678, replacement6678) pattern6679 = Pattern( Integral(v_ * (WC("a", S(0)) + WC("b", S(1)) * acoth(u_)), x_), cons2, cons3, cons1232, cons1953, cons1954, CustomConstraint(With6679), ) rule6679 = ReplacementRule(pattern6679, replacement6679) pattern6680 = Pattern(Integral(asech(x_ * WC("c", S(1))), x_), cons8, cons8) rule6680 = ReplacementRule(pattern6680, replacement6680) pattern6681 = Pattern(Integral(acsch(x_ * WC("c", S(1))), x_), cons8, cons8) rule6681 = ReplacementRule(pattern6681, replacement6681) pattern6682 = Pattern( Integral((WC("a", S(0)) + WC("b", S(1)) * asech(x_ * WC("c", S(1)))) ** n_, x_), cons2, cons3, cons8, cons4, cons1581, ) rule6682 = ReplacementRule(pattern6682, replacement6682) pattern6683 = Pattern( Integral((WC("a", S(0)) + WC("b", S(1)) * acsch(x_ * WC("c", S(1)))) ** n_, x_), cons2, cons3, cons8, cons4, cons1581, ) rule6683 = ReplacementRule(pattern6683, replacement6683) pattern6684 = Pattern( Integral((WC("a", S(0)) + WC("b", S(1)) * asech(x_ * WC("c", S(1)))) / x_, x_), cons2, cons3, cons8, cons14, ) rule6684 = ReplacementRule(pattern6684, replacement6684) pattern6685 = Pattern( Integral((WC("a", S(0)) + WC("b", S(1)) * acsch(x_ * WC("c", S(1)))) / x_, x_), cons2, cons3, cons8, cons14, ) rule6685 = ReplacementRule(pattern6685, replacement6685) pattern6686 = Pattern( Integral( x_ ** WC("m", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * asech(x_ * WC("c", S(1)))), x_, ), cons2, cons3, cons8, cons19, cons68, ) rule6686 = ReplacementRule(pattern6686, replacement6686) pattern6687 = Pattern( Integral( x_ ** WC("m", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * acsch(x_ * WC("c", S(1)))), x_, ), cons2, cons3, cons8, cons19, cons68, ) rule6687 = ReplacementRule(pattern6687, replacement6687) pattern6688 = Pattern( Integral( x_ ** WC("m", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * asech(x_ * WC("c", S(1)))) ** n_, x_, ), cons2, cons3, cons8, cons4, cons20, ) rule6688 = ReplacementRule(pattern6688, replacement6688) pattern6689 = Pattern( Integral( x_ ** WC("m", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * acsch(x_ * WC("c", S(1)))) ** n_, x_, ), cons2, cons3, cons8, cons4, cons20, ) rule6689 = ReplacementRule(pattern6689, replacement6689) pattern6690 = Pattern( Integral( x_ ** WC("m", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * asech(x_ * WC("c", S(1)))) ** WC("n", S(1)), x_, ), cons2, cons3, cons8, cons19, cons4, cons1856, ) rule6690 = ReplacementRule(pattern6690, replacement6690) pattern6691 = Pattern( Integral( x_ ** WC("m", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * acsch(x_ * WC("c", S(1)))) ** WC("n", S(1)), x_, ), cons2, cons3, cons8, cons19, cons4, cons1856, ) rule6691 = ReplacementRule(pattern6691, replacement6691) pattern6692 = Pattern( Integral( (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * asech(x_ * WC("c", S(1)))), x_, ), cons2, cons3, cons8, cons29, cons50, cons1745, ) rule6692 = ReplacementRule(pattern6692, With6692) pattern6693 = Pattern( Integral( (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * acsch(x_ * WC("c", S(1)))), x_, ), cons2, cons3, cons8, cons29, cons50, cons1745, ) rule6693 = ReplacementRule(pattern6693, With6693) pattern6694 = Pattern( Integral( (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * asech(x_ * WC("c", S(1)))) ** WC("n", S(1)), x_, ), cons2, cons3, cons8, cons29, cons50, cons4, cons40, ) rule6694 = ReplacementRule(pattern6694, replacement6694) pattern6695 = Pattern( Integral( (x_ ** S(2) * WC("e", S(1)) + WC("d", S(0))) ** WC("p", S(1)) * (WC("a", S(0)) + WC("b", S(1)) * acsch(x_ * WC("c", S(1)))) ** WC("n", S(1)), x_, ), cons2, cons3,
in polygonObj.getSiblings()] # These should all be initialized through the .getSiblings method # Process this object and its siblings for polygonStruct in [polygonObj] + polygonSiblingObjs: # Get info on this polygon object's display list displayListLength, displayListPointer = polygonStruct.getValues()[4:6] determinedListLength = globalDatFile.getStructLength( displayListPointer ) / 0x20 # Check the current display list length (when disabling) to make sure the value can be properly switched back if clearDisplayList and displayListLength != determinedListLength: msg( 'Warning! The display list length of ' + polygonStruct.name + ' was not the expected calculated value; ' 'The current value is {}, while it was expected to be {}. '.format( displayListLength, determinedListLength ) + \ "This means if you want to be able to restore this value later, you'll need to write the current value " 'down, so you can restore it manually in the Structural Analysis tab.', 'Unexpected Display List Length' ) if clearDisplayList: globalDatFile.updateStructValue( polygonStruct, 4, 0 ) else: globalDatFile.updateStructValue( polygonStruct, 4, determinedListLength ) updateProgramStatus( 'Polygon Structs Updated' ) def opacityEntryUpdated( newValue ): """ Handles events from the transparency Entry widget, when its value is changed. This just validates the input, and updates the value on the slider. newValue will initially be a string of a float. """ # Validate the input and convert it from a string to a decimal integer try: newValue = float( newValue.replace( '%', '' ) ) except: if newValue == '': newValue = 0 else: return False if newValue < 0 or newValue > 100: return False # Set the slider to the current value newValue = newValue / 10 Gui.modelPropertiesPane.interior.opacityScale.set( newValue ) return True def opacityScaleUpdated( newValue ): """ Handles events from the transparency Slider widget, when its value is changed. The slider value ranges between 0 and 10, (so that it's intervals when clicking in the trough jump a decent amount). The purpose of this function is just to update the value in the Entry widget. 'newValue' will initially be a string of a float. """ newValue = round( float(newValue), 2 ) # If this is not the Entry widget causing a change in the value, update it too if Gui.root.focus_get() != Gui.modelPropertiesPane.interior.opacityEntry: # Set the entry widget to the current value (temporarily disable the validation function, so it's not called) Gui.modelPropertiesPane.interior.opacityEntry.configure( validate='none') Gui.modelPropertiesPane.interior.opacityEntry.delete( 0, 'end' ) Gui.modelPropertiesPane.interior.opacityEntry.insert( 0, str(newValue*10) + '%' ) Gui.modelPropertiesPane.interior.opacityEntry.configure( validate='key') def setModelTransparencyLevel(): """ Calling function of the "Set" button under the Model tab's Transparency Control. """ opacityValue = Gui.modelPropertiesPane.interior.opacityScale.get() / 10 # Update the transparency value, and set required flags for this in the Material Struct for materialStruct in Gui.modelPropertiesPane.interior.materialStructs: matColorsOffset = materialStruct.getValues( 'Material_Colors_Pointer' ) matColorsStruct = globalDatFile.initSpecificStruct( hsdStructures.MaterialColorObjDesc, matColorsOffset, materialStruct.offset ) if matColorsStruct: # If the Material Struct doesn't have its colors struct, we probably don't need to worry about modifying it # Change the transparency value within the struct values and file data, and record that the change was made globalDatFile.updateStructValue( matColorsStruct, -2, opacityValue ) if opacityValue < 1.0: # Set the required flags (RENDER_NO_ZUPDATE and RENDER_XLU; i.e. bits 29 and 30) globalDatFile.updateFlag( materialStruct, 1, 29, True ) # RENDER_NO_ZUPDATE globalDatFile.updateFlag( materialStruct, 1, 30, True ) # RENDER_XLU # else: # globalDatFile.updateFlag( materialStruct, 1, 29, False ) # globalDatFile.updateFlag( materialStruct, 1, 30, False ) if opacityValue < 1.0: # Set flags required for this in the Joint Struct(s) modifiedJoints = [] # Tracks which joint flags we've already updated, to reduce redundancy # Iterate over the display objects of this texture, get their parent joint objects, and modify their flag for displayObj in Gui.modelPropertiesPane.interior.displayObjects: parentJointOffsets = displayObj.getParents() for parentStructOffset in parentJointOffsets: jointStruct = globalDatFile.initSpecificStruct( hsdStructures.JointObjDesc, parentStructOffset ) if jointStruct and parentStructOffset not in modifiedJoints: # Change the bit within the struct values and file data, and record that the change was made globalDatFile.updateFlag( jointStruct, 1, 19, True ) # XLU #globalDatFile.updateFlag( jointStruct, 1, 28, True ) # ROOT_OPA globalDatFile.updateFlag( jointStruct, 1, 29, True ) # ROOT_XLU modifiedJoints.append( parentStructOffset ) updateProgramStatus( 'Transparency Updated' ) class EnumOptionMenu( ttk.OptionMenu ): def __init__( self, parent, structures, fieldIndex ): self.structures = structures self.fieldIndex = fieldIndex if type( structures ) == list: structure = structures[0] else: # It's just one structure object structure = structures # Get the current value of the enumeration self.currentEnum = structure.getValues()[fieldIndex] self.fieldName = structure.fields[fieldIndex] # Enumerations must be provided by the structure class self.enumerations = structure.enums[self.fieldName] # Retrieves a dictionary of the form key=enumInt, value=enumNameString self.optionNames = self.enumerations.values() defaultOption = self.enumerations[self.currentEnum] textVar = Tk.StringVar() # Required to init the optionmenu ttk.OptionMenu.__init__( self, parent, textVar, defaultOption, *self.optionNames, command=self.optionSelected ) def optionSelected( self, newOption ): # Convert the option name to the enumeration value newEnum = self.optionNames.index( newOption ) if newEnum == self.currentEnum: return # Nothing to do here # Replace the data in the file and structure for each one updateName = self.fieldName.replace( '\n', ' ' ) descriptionOfChange = updateName + ' modified in ' + globalDatFile.fileName if type( self.structures ) == list: for structure in self.structures: globalDatFile.updateStructValue( structure, self.fieldIndex, newEnum, descriptionOfChange ) else: # The offsets attribute is just a single struct (the usual case) globalDatFile.updateStructValue( self.structures, self.fieldIndex, newEnum, descriptionOfChange ) updateProgramStatus( updateName + ' Updated' ) def populateTexPropertiesTab( wraplength, width, height, thisImageType ): """ Populates the Properties tab of the DAT Texture Tree interface. At this point, the pane has already been cleared. """ propertiesPane = Gui.texturePropertiesPane.interior texStructs = Gui.modelPropertiesPane.interior.textureStructs matStructs = Gui.modelPropertiesPane.interior.materialStructs pixStructs = [] # Pixel Processing structures vertPadding = 10 # Make sure there are Texture Structs to edit if not texStructs: noTexStructText = ( 'No Texture Structs found; there are no editable properties. If this texture is part of ' 'a material animation, find the default texture for that animation and edit that instead.' ) ttk.Label( propertiesPane, text=noTexStructText, wraplength=wraplength ).pack( pady=vertPadding*2 ) return # Collect offsets that we'll need for the HexEditEntries. # Also, get the flags data, and check if they're the same across all tex structs for this texture. matFlagOffsets = [ matStruct.offset+4 for matStruct in matStructs ] texFlagFieldOffsets = [] pixelProcFlagOffsets = [] blendingOffsets = [] wrapModeSoffsets = [] wrapModeToffsets = [] reapeatSoffsets = [] reapeatToffsets = [] matFlagsData = set() texFlagsData = set() pixFlagsData = set() blendingData = set() wrapSData = set() wrapTData = set() repeatSData = set() repeatTData = set() # Populate the above lists with the actual hex data from the file for texStruct in texStructs: texFlagFieldOffsets.append( texStruct.offset + 0x40 ) wrapModeSoffsets.append( texStruct.offset + 0x34 ) wrapModeToffsets.append( texStruct.offset + 0x38 ) reapeatSoffsets.append( texStruct.offset + 0x3C ) reapeatToffsets.append( texStruct.offset + 0x3D ) texFlagsData.add( hexlify(texStruct.data[0x40:0x44]) ) wrapSData.add( hexlify(texStruct.data[0x34:0x38]) ) wrapTData.add( hexlify(texStruct.data[0x38:0x3C]) ) repeatSData.add( hexlify(texStruct.data[0x3C:0x3D]) ) repeatTData.add( hexlify(texStruct.data[0x3D:0x3E]) ) for matStructure in matStructs: matFlagsData.add( hexlify(matStructure.data[0x4:0x8]) ) # Check if there's a valid pointer to a Pixel Proc. structure, and get flags from it if there is if matStructure.offset + 0x14 in globalDatFile.pointerOffsets: pixelProcStructOffset = matStructure.getValues()[-1] pixProcStruct = globalDatFile.initSpecificStruct( hsdStructures.PixelProcObjDesc, pixelProcStructOffset, matStructure.offset ) if pixProcStruct: pixStructs.append( pixProcStruct ) pixelProcFlagOffsets.append( pixelProcStructOffset ) pixFlagsData.add( hexlify(globalDatFile.getData(pixelProcStructOffset, 1)) ) blendingOffsets.append( pixelProcStructOffset + 4 ) blendingData.add( ord(globalDatFile.getData(pixelProcStructOffset+4, 1)) ) displayDifferingDataWarning = False # Describe the number of Texture Structs found if len( texStructs ) == 1: texCountLabel = ttk.Label( propertiesPane, text='These controls will edit 1 set of structures.', wraplength=wraplength ) else: texCountLabelText = 'These controls will edit {} sets of structures.\nTo edit individual structs, use the Structural Analysis tab.'.format( len(texStructs) ) texCountLabel = ttk.Label( propertiesPane, text=texCountLabelText, wraplength=wraplength ) texCountLabel.pack( pady=(vertPadding*2, 0) ) ttk.Separator( propertiesPane, orient='horizontal' ).pack( fill='x', padx=24, pady=(vertPadding*2, 0) ) flagsFrame = Tk.Frame( propertiesPane ) if len( pixFlagsData ) > 0: # Add blending options ttk.Label( flagsFrame, text='Blending Mode:' ).grid( column=0, row=0, sticky='e' ) if len( blendingData ) > 1: # Add a 2 px border around the widget using a Frame (the widget itself doesn't support a border) optionMenuBorderFrame = Tk.Frame( flagsFrame, background='orange' ) blendingMenu = EnumOptionMenu( optionMenuBorderFrame, pixStructs, 4 ) blendingMenu.pack( padx=2, pady=2 ) optionMenuBorderFrame.grid( column=1, row=0, columnspan=2, padx=7 ) displayDifferingDataWarning = True else: blendingMenu = EnumOptionMenu( flagsFrame, pixStructs[0], 4 ) blendingMenu.grid( column=1, row=0, columnspan=2, padx=7 ) # Add widgets for the Pixel Processing Flags label, hex edit Entry, and Flags 'Decode' button ttk.Label( flagsFrame, text='Pixel Processing Flags:' ).grid( column=0, row=1, sticky='e' ) hexEntry = HexEditEntry( flagsFrame, pixelProcFlagOffsets, 1, 'B', 'Pixel Processing Flags' ) hexEntry.insert( 0, next(iter(pixFlagsData)).upper() ) hexEntry.bind( '<Return>', updateEntryHex ) hexEntry.grid( column=1, row=1, padx=7, pady=1 ) Gui.texturePropertiesPane.flagWidgets.append( hexEntry ) if len( pixFlagsData ) > 1: hexEntry['highlightbackground'] = 'orange' hexEntry['highlightthickness'] = 2 displayDifferingDataWarning = True flagsLabel = ttk.Label( flagsFrame, text='Decode', foreground='#00F', cursor='hand2' ) flagsLabel.grid( column=2, row=1, pady=0 ) flagsLabel.bind( '<1>', lambda e, s=pixStructs[0], fO=pixelProcFlagOffsets: FlagDecoder(s, fO, 0) ) else: ttk.Label( flagsFrame, text='Pixel Processing is not used on this texture.', wraplength=wraplength ).grid( column=0, row=0, columnspan=3, pady=(0, vertPadding) ) # Add widgets for the Render Mode Flags label, hex edit Entry, and Flags 'Decode' button ttk.Label( flagsFrame, text='Render Mode Flags:' ).grid( column=0, row=2, sticky='e' ) hexEntry = HexEditEntry( flagsFrame, matFlagOffsets, 4, 'I', 'Render Mode Flags' ) hexEntry.grid( column=1, row=2, padx=7, pady=1 ) Gui.texturePropertiesPane.flagWidgets.append( hexEntry ) if len( matFlagsData ) == 0: hexEntry['state'] = 'disabled' else: hexEntry.insert( 0, next(iter(matFlagsData)).upper() ) hexEntry.bind( '<Return>', updateEntryHex ) flagsLabel = ttk.Label( flagsFrame, text='Decode',
some stations didnt have a solution (e.g insufficient cross-correlations) it will assign a correction of zero. Meaning than in the worst case scenario the data will stay the same as at the beginning. params ------- method: Can be "lstsq" for performing a least-squares inversion. Or "weighted_lstsq" for doing a weighted least-squares inversion. The weighting is done based on the station separation. Returns ------- None. But the results are saved within the Clock_Drift object. The a's and b's for each station are now stored in the corresponding station objects. """ try: A_dum = self.matrix_A.copy() Tobs_dum = self.df["t_app[s]"].copy() except BaseException: self.build_matrices() A_dum = self.matrix_A.copy() Tobs_dum = self.df["t_app[s]"].copy() if method == "lstsq": print("Calculating a and b for each station.") x, _, rank, _, = np.linalg.lstsq(A_dum, Tobs_dum, rcond=rcond) elif method == "weighted_lstsq": print("Calculating a and b for each station.") print("The weighting is done based on the station separation.") # Now we define the data weighting vector W W = [] for i in range(len(self.matrix_A.index)): station1_code = self.matrix_A.index[i].split("_")[0] station2_code = self.matrix_A.index[i].split("_")[1] station1 = self.get_station(station1_code) station2 = self.get_station(station2_code) # Station separation. # Great circle distance in m using WGS84 ellipsoid. cpl_dist = gps2dist_azimuth( station1.latitude, station1.longitude, station2.latitude, station2.longitude, )[0] W.append(cpl_dist) W = np.array(W) A_dum = self.matrix_A Tobs_dum = self.df["t_app[s]"].copy() # Aw = np.dot(W, A_dum) # * np.sqrt(W[:,np.newaxis]) # Bw = np.dot(W, Tobs_dum) # * np.sqrt(W) W = np.sqrt(np.diag(W)) Aw = np.dot(W, A_dum) Bw = np.dot(Tobs_dum, W) x, _, rank, _, = np.linalg.lstsq(Aw, Bw, rcond=rcond) else: msg = "You have to choose an inversion method that can be 'lstsq'" msg += "for least squares inversion or 'weighted_lstsq' for " msg += "weighted least squares inversion." raise Exception(msg) column_names = [] for i in self.matrix_A.columns: column_names.append(i.replace("*t_{N_lps}", "")) sol = pd.DataFrame(columns=column_names) sol.loc["values"] = x # This list will be used to verify that all stations have solutions. stations_with_solutions = [] for value, header in zip(x, column_names): if "a" in header: station_code = header.replace("a (", "").replace(")", "") stations_with_solutions.append(station_code) station = self.get_station(station_code) station.a.append(value) continue if "b" in header: station_code = header.replace("b (", "").replace(")", "") station = self.get_station(station_code) station.b.append(value) self.solution = sol self.iteration += 1 # Make the correction be equal to zero for stations without # measurements. for station in self.stations: if station.needs_correction: if station.code not in stations_with_solutions: station.a.append(0) station.b.append(0) def _solve_eq(self, rcond=None): """ It inverts the matrix and creates a dataframe containing the stations with solutions. If after the inversion some stations didnt have a solution (e.g insufficient cross-correlations) it will assign a correction of zero. Meaning than in the worst case scenario the data will stay the same as at the beginning Returns ------- None. """ try: A_dum = self.matrix_A.copy() Tobs_dum = self.df["t_app[s]"].copy() except BaseException: self.build_matrices() A_dum = self.matrix_A.copy() Tobs_dum = self.df["t_app[s]"].copy() print("Inverting the matrix and calculating a and b for each station.") x = np.linalg.lstsq(A_dum, Tobs_dum, rcond=rcond)[0] column_names = [] for i in self.matrix_A.columns: column_names.append(i.replace("*t_{N_lps}", "")) sol = pd.DataFrame(columns=column_names) sol.loc["values"] = x # This list will be used to verify that all stations have solutions. stations_with_solutions = [] for value, header in zip(x, column_names): if "a" in header: station_code = header.replace("a (", "").replace(")", "") stations_with_solutions.append(station_code) station = self.get_station(station_code) station.a.append(value) continue if "b" in header: station_code = header.replace("b (", "").replace(")", "") station = self.get_station(station_code) station.b.append(value) self.solution = sol self.iteration += 1 # Make the correction be equal to zero for stations without # measurements. for station in self.stations: if station.needs_correction: if station.code not in stations_with_solutions: station.a.append(0) station.b.append(0) # 3 # TODO: Add next step iteration to double check that steps are not # repeated. ########################################################################## def calculate_estimated_shift(self, correlation, iteration=-1): """ Method to calculate the estimated time shift using the a and b values of a given station. Parameters ---------- correlation: TYPE DESCRIPTION. iteration: TYPE, optional DESCRIPTION. The default is -1. Returns ------- estimate: TYPE DESCRIPTION. """ station1 = self.get_station(correlation.station1_code) station2 = self.get_station(correlation.station2_code) a_val_sta1, a_val_sta2 = 0, 0 b_val_sta1, b_val_sta2 = 0, 0 if station1.needs_correction: a_val_sta1 = float(station1.a[iteration]) b_val_sta1 = float(station1.b[iteration]) if station2.needs_correction: a_val_sta2 = float(station2.a[iteration]) b_val_sta2 = float(station2.b[iteration]) dt_ins_station1 = a_val_sta1 * correlation.t_N_lps + b_val_sta1 dt_ins_station2 = a_val_sta2 * correlation.t_N_lps + b_val_sta2 estimate = 2 * (dt_ins_station1 - dt_ins_station2) correlation.estimated_shift = estimate return estimate def no_corr_per_avg_date(self, station, days_apart=60, plot=True): """ Function to calculated how many t_apps could be observed of the different cross-correlations for a given station. Parameters ---------- station: ocloc.Station DESCRIPTION. days_apart: int How many days apart should the function consider to be the same correlation period plot: TYPE, optional DESCRIPTION. The default is True. Returns ------- Adds attribute to the stations that keeps track of how many stations have observed t_app. The results are saved in station.no_corr_per_avg_date """ obs_t = obspy.UTCDateTime avg_dates = sorted( dates_of_correlations_with_t_app(self, station.code) ) if len(avg_dates) == 0: station.no_corr_per_avg_date = {"No available correlations": 0} return dates_unique = {avg_dates[0]: 1} for date in avg_dates[1:]: new_key = True # Check if date already in dictionary and if so then add to its # count. if dates_unique.get(date) is not None: dates_unique[date] += 1 new_key = False continue # Check if the difference between the dates and the date we are # checking is smaller than the days_apart argument given as an # input. for date_counted in dates_unique.keys(): if abs(obs_t(date_counted) - obs_t(date)) < days_apart * 86400: dates_unique[date_counted] += 1 new_key = False break if new_key: dates_unique[date] = 1 station.no_corr_per_avg_date = dates_unique # #################################3 Plot results##################### if plot: # Calculate correlations without t_app avg_dates_witout_t_app = sorted( dates_of_correlations_without_t_app(self, station.code) ) if len(avg_dates_witout_t_app)>0: dates_witout_t_app = {avg_dates_witout_t_app[0]: 1} for date in avg_dates_witout_t_app[1:]: new_key = True # Check if date already in dictionary and if so then add to # its count. if dates_witout_t_app.get(date) is not None: dates_witout_t_app[date] += 1 new_key = False continue # Check if the difference between the dates and the date we # are checking is smaller than the days_apart argument given # as an input. for date_counted in dates_witout_t_app.keys(): diff_date = abs(obs_t(date_counted) - obs_t(date)) if diff_date < days_apart * 86400: dates_witout_t_app[date_counted] += 1 new_key = False break if new_key: dates_witout_t_app[date] = 1 if len(dates_witout_t_app) == len(dates_unique): w = 0.15 X = np.arange(len(dates_witout_t_app.keys())) plt.figure(dpi=300) plt.bar( X - w, dates_unique.values(), label="Dates with t_app", width=0.25, ) plt.bar( X + w, dates_witout_t_app.values(), label="Dates without t_app", width=0.25, ) plt.xticks(X, list(dates_witout_t_app.keys())) plt.title(station.code) plt.xlabel("Average date +/- " + str(days_apart) + " days") plt.ylabel("Number of cross-correlations") plt.legend() plt.show() else: w = 0. X = np.arange(len(dates_unique.keys())) f, axs = plt.subplots(2, 1, sharey=True, dpi=300) axs[0].bar( X - w, dates_unique.values(), label="Dates with t_app", width=0.25, ) axs[0].set_xticks(X, list(dates_unique.keys())) X = np.arange(len(dates_witout_t_app.keys())) axs[1].bar( X + w, dates_witout_t_app.values(), label="Dates without t_app", width=0.25, ) axs[1].set_xticks(X, list(dates_witout_t_app.keys())) axs[0].set_title(station.code) for ax in axs: ax.set_xlabel("Average date +/- " + str(days_apart) + " days") ax.set_ylabel("# of crosscorrelations") ax.legend() plt.show() else: w = 0. X = np.arange(len(dates_unique.keys())) f, axs = plt.subplots(2, 1, sharey=True, dpi=300) axs[0].bar( X - w, dates_unique.values(), label="Dates with t_app", width=0.25, ) axs[0].set_xticks(X, list(dates_unique.keys())) X = np.arange(len(dates_witout_t_app.keys())) axs[1].bar( X + w, dates_witout_t_app.values(), label="Dates without t_app", width=0.25, ) axs[1].set_xticks(X, list(dates_witout_t_app.keys())) axs[0].set_title(station.code) for ax in axs: ax.set_xlabel("Average date +/- " + str(days_apart) + " days") ax.set_ylabel("# of crosscorrelations") ax.legend() plt.show() def plot_correlations_of_stationpair( self, station1_code, station2_code, iteration=-1, min_t=-50, max_t=30 ): """ Parameters ---------- station1_code: TYPE DESCRIPTION. station2_code: TYPE DESCRIPTION. min_t: TYPE, optional DESCRIPTION. The default is -50. max_t: TYPE, optional DESCRIPTION. The default is 30. Returns ------- None. """ correlation_list = self.get_correlations_of_stationpair( station1_code, station2_code ) # station1 = self.get_station(correlation_list[0].station1_code) # station2 = self.get_station(correlation_list[0].station2_code) # ref_t = self.reference_time # As some traces were processed differently we will separate the plots # into the groups of traces that share the same processing parameters. for params in self.processing_parameters: correlations = correlations_with_parameters( correlation_list, params ) f, ax1 = plt.subplots(1, 1, sharey=True, dpi=300) for correlation in correlations: # average_date = correlation.average_date freqmin = correlation.processing_parameters.freqmin freqmax = correlation.processing_parameters.freqmax tr = read_correlation_file(correlation.file_path) t1, data = trim_correlation_trace( tr, min_t, max_t, freqmin, freqmax ) ax1.plot( t1, data, label=str(tr.stats.average_date)[:10], alpha=0.7 ) ax1.set_title( "Correlations of station pair: " + tr.stats.station_pair
''' Read input from cryspy.in ''' import configparser import os from . import io_stat def readin(): # ---------- read cryspy.in if not os.path.isfile('cryspy.in'): raise IOError('Could not find cryspy.in file') config = configparser.ConfigParser() config.read('cryspy.in') # ---------- basic # ------ global declaration global algo, calc_code, tot_struc, natot global atype, nat, nstage, njob, jobcmd, jobfile # ------ read intput variables algo = config.get('basic', 'algo') if algo not in ['RS', 'BO', 'LAQA', 'EA']: raise NotImplementedError('algo must be RS, BO, LAQA, or EA') calc_code = config.get('basic', 'calc_code') if algo == 'LAQA': if not calc_code == 'VASP': raise NotImplementedError('LAQA: only VASP for now') if calc_code not in ['VASP', 'QE', 'soiap', 'LAMMPS']: raise NotImplementedError( 'calc_code must be VASP, QE, soiap, or LAMMPS') tot_struc = config.getint('basic', 'tot_struc') if tot_struc <= 0: raise ValueError('tot_struc <= 0, check tot_struc') natot = config.getint('basic', 'natot') if natot <= 0: raise ValueError('natot <= 0, check natot') atype = config.get('basic', 'atype') atype = [a for a in atype.split()] # list nat = config.get('basic', 'nat') nat = [int(x) for x in nat.split()] # character --> integer if not len(nat) == len(atype): raise ValueError('not len(nat) == len(atype), check atype and nat') if not sum(nat) == natot: raise ValueError('not sum(nat) == natot, check natot and nat') nstage = config.getint('basic', 'nstage') if nstage <= 0: raise ValueError('nstage <= 0, check nstage') if algo == 'LAQA': if not nstage == 1: raise ValueError('nstage shoud be 1 in LAQA') njob = config.getint('basic', 'njob') if njob <= 0: raise ValueError('njob <= 0, check njob') jobcmd = config.get('basic', 'jobcmd') jobfile = config.get('basic', 'jobfile') # ---------- BO if algo == 'BO': # ------ global declaration global nselect_bo, score, num_rand_basis, cdev, dscrpt global fp_rmin, fp_rmax, fp_npoints, fp_sigma global max_select_bo, manual_select_bo # ------ read intput variables nselect_bo = config.getint('BO', 'nselect_bo') if nselect_bo <= 0: raise ValueError('nselect_bo <= 0, check nselect_bo') elif tot_struc < nselect_bo: raise ValueError('tot_struc < nselect_bo, check nselect_bo') score = config.get('BO', 'score') if score == 'TS' or score == 'EI' or score == 'PI': pass else: raise ValueError('score must be TS, EI, or PI, check score') try: num_rand_basis = config.getint('BO', 'num_rand_basis') except configparser.NoOptionError: num_rand_basis = 0 try: cdev = config.getfloat('BO', 'cdev') except configparser.NoOptionError: cdev = 0.001 dscrpt = config.get('BO', 'dscrpt') if dscrpt == 'FP': pass else: raise NotImplementedError('Now FP only') # -- parameters for f-fingerprint try: fp_rmin = config.getfloat('BO', 'fp_rmin') except configparser.NoOptionError: fp_rmin = 0.5 try: fp_rmax = config.getfloat('BO', 'fp_rmax') except configparser.NoOptionError: fp_rmax = 5.0 if fp_rmin < 0.0: raise ValueError('fp_rmin < 0, check fp_rmin') if fp_rmax < fp_rmin: raise ValueError('fp_rmax < fp_rmin, check fp_rmin and fp_rmax') try: fp_npoints = config.getint('BO', 'fp_npoints') except configparser.NoOptionError: fp_npoints = 50 if fp_npoints <= 0: raise ValueError('fp_npoints <= 0, check fp_npoints') try: fp_sigma = config.getfloat('BO', 'fp_sigma') except configparser.NoOptionError: fp_sigma = 0.2 if fp_sigma < 0: raise ValueError('fp_sigma < 0, check fp_sigma') # -- BO option try: max_select_bo = config.getint('BO', 'max_select_bo') except configparser.NoOptionError: max_select_bo = 0 if max_select_bo < 0: raise ValueError('max_select_bo must be non-negative int') try: manual_select_bo = config.get('BO', 'manual_select_bo') manual_select_bo = [int(x) for x in manual_select_bo.split()] except configparser.NoOptionError: manual_select_bo = [] if manual_select_bo: for i in manual_select_bo: if not 0 <= i < tot_struc: raise ValueError('manual_select_bo must be' ' non-negative int' ' and less than tot_struc') # ---------- LAQA if algo == 'LAQA': # ------ global declaration global nselect_laqa, weight_laqa # ------ read intput variables nselect_laqa = config.getint('LAQA', 'nselect_laqa') try: weight_laqa = config.getfloat('LAQA', 'weight_laqa') except configparser.NoOptionError: weight_laqa = 1.0 # ---------- EA # EA part is written below option section # ---------- lattice # ------ global declaration global minlen, maxlen, dangle, mindist # ------ read intput variables minlen = config.getfloat('lattice', 'minlen') maxlen = config.getfloat('lattice', 'maxlen') dangle = config.getfloat('lattice', 'dangle') if minlen <= 0.0: raise ValueError('minlen must be positive') if minlen > maxlen: raise ValueError('minlen > maxlen') if dangle <= 0.0: raise ValueError('dangle < 0.0, dangle must be positive') mindist = [] for i in range(len(atype)): tmp = config.get('lattice', 'mindist_{}'.format(i+1)) tmp = [float(x) for x in tmp.split()] # character --> float if not len(tmp) == len(atype): raise ValueError('not len(mindist_{}) == len(atype)'.format(i+1)) mindist.append(tmp) # -- check symmetric matrix for i in range(len(mindist)): for j in range(len(mindist)): if i < j: if not mindist[i][j] == mindist[j][i]: raise ValueError('mindist is not symmetric. ({}, {}) -->' ' {}, ({}, {}) --> {}'.format( i, j, mindist[i][j], j, i, mindist[j][i])) # ---------- global declaration for comman part in calc_code global kppvol, kpt_flag, force_gamma # ---------- VASP if calc_code == 'VASP': # ------ read intput variables kpt_flag = True kppvol = config.get('VASP', 'kppvol') kppvol = [int(x) for x in kppvol.split()] # character --> int if not len(kppvol) == nstage: raise ValueError('not len(kppvol) == nstage,' ' check kppvol and nstage') try: force_gamma = config.getboolean('VASP', 'force_gamma') except configparser.NoOptionError: force_gamma = False # ---------- QE elif calc_code == 'QE': # ------ global declaration global qe_infile, qe_outfile # ------ read intput variables kpt_flag = True qe_infile = config.get('QE', 'qe_infile') qe_outfile = config.get('QE', 'qe_outfile') kppvol = config.get('QE', 'kppvol') kppvol = [int(x) for x in kppvol.split()] # character --> int if not len(kppvol) == nstage: raise ValueError('not len(kppvol) == nstage,' ' check kppvol and nstage') try: force_gamma = config.getboolean('QE', 'force_gamma') except configparser.NoOptionError: force_gamma = False # ---------- soiap elif calc_code == 'soiap': # ------ global declaration global soiap_infile, soiap_outfile, soiap_cif # ------ read intput variables soiap_infile = config.get('soiap', 'soiap_infile') soiap_outfile = config.get('soiap', 'soiap_outfile') soiap_cif = config.get('soiap', 'soiap_cif') kpt_flag = False force_gamma = False # ---------- lammps elif calc_code == 'LAMMPS': # ------ global declaration global lammps_infile, lammps_outfile, lammps_potential, lammps_data # ------ read intput variables lammps_infile = config.get('LAMMPS', 'lammps_infile') lammps_outfile = config.get('LAMMPS', 'lammps_outfile') try: lammps_potential = config.get('LAMMPS', 'lammps_potential') lammps_potential = lammps_potential.split() except configparser.NoOptionError: lammps_potential = None lammps_data = config.get('LAMMPS', 'lammps_data') kpt_flag = False force_gamma = False else: raise NotImplementedError('calc_code must be VASP, QE, soiap,' ' or LAMMPS') # ---------- option # ------ global declaration global maxcnt, stop_chkpt, symprec, spgnum global load_struc_flag, stop_next_struc, recalc global append_struc_ea global energy_step_flag, struc_step_flag, fs_step_flag # ------ read intput variables try: maxcnt = config.getint('option', 'maxcnt') except (configparser.NoOptionError, configparser.NoSectionError): maxcnt = 50 try: stop_chkpt = config.getint('option', 'stop_chkpt') except (configparser.NoOptionError, configparser.NoSectionError): stop_chkpt = 0 try: symprec = config.getfloat('option', 'symprec') except (configparser.NoOptionError, configparser.NoSectionError): symprec = 0.001 try: spgnum = config.get('option', 'spgnum') except (configparser.NoOptionError, configparser.NoSectionError): spgnum = 'all' if spgnum == '0': spgnum = 0 elif spgnum == 'all': pass else: spgnum = spglist(spgnum) try: load_struc_flag = config.getboolean('option', 'load_struc_flag') except (configparser.NoOptionError, configparser.NoSectionError): load_struc_flag = False try: stop_next_struc = config.getboolean('option', 'stop_next_struc') except (configparser.NoOptionError, configparser.NoSectionError): stop_next_struc = False try: recalc = config.get('option', 'recalc') recalc = [int(x) for x in recalc.split()] # character --> integer except (configparser.NoOptionError, configparser.NoSectionError): recalc = [] if recalc: for i in recalc: if not 0 <= i < tot_struc: raise ValueError('recalc must be non-negative int' ' and less than tot_struc') try: append_struc_ea = config.getboolean('option', 'append_struc_ea') except (configparser.NoOptionError, configparser.NoSectionError): append_struc_ea = False try: energy_step_flag = config.getboolean('option', 'energy_step_flag') # -- only VASP or QE for now if calc_code in ['soiap', 'LAMMPS']: energy_step_flag = False except (configparser.NoOptionError, configparser.NoSectionError): energy_step_flag = False try: struc_step_flag = config.getboolean('option', 'struc_step_flag') # -- only VASP or QE for now if calc_code in ['soiap', 'LAMMPS']: struc_step_flag = False except (configparser.NoOptionError, configparser.NoSectionError): struc_step_flag = False try: fs_step_flag = config.getboolean('option', 'fs_step_flag') # -- only VASP or QE for now if calc_code in ['soiap', 'LAMMPS']: fs_step_flag = False except (configparser.NoOptionError, configparser.NoSectionError): fs_step_flag = False if algo == 'LAQA': fs_step_flag = True # ---------- EA if algo == 'EA' or append_struc_ea: # ------ global declaration global n_pop, n_crsov, n_perm, n_strain, n_rand, n_elite global fit_reverse, n_fittest global slct_func, t_size, a_rlt, b_rlt global crs_lat, crs_func, nat_diff_tole, ntimes, sigma_st, maxcnt_ea global maxgen_ea # global restart_gen # ------ read intput variables # -- number of structures n_pop = config.getint('EA', 'n_pop') if n_pop <= 0: raise ValueError('n_pop must be positive int') n_crsov = config.getint('EA', 'n_crsov') if n_crsov < 0: raise ValueError('n_crsov must be zero or positive int') n_perm = config.getint('EA', 'n_perm') if n_perm < 0: raise ValueError('n_perm must be zero or positive int')
import numpy as np import numpy.linalg as la import torch import torch.nn.functional as F import torchvision import json import time from matplotlib import pyplot as plt #from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm, trange from lietorch import SE3, LieGroupParameter from scipy.spatial.transform import Rotation as R import cv2 from nerf import (get_ray_bundle, run_one_iter_of_nerf) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def mahalanobis(u, v, cov): delta = u - v m = torch.dot(delta, torch.matmul(torch.inverse(cov), delta)) return m rot_x = lambda phi: torch.tensor([ [1., 0., 0.], [0., torch.cos(phi), -torch.sin(phi)], [0., torch.sin(phi), torch.cos(phi)]], dtype=torch.float32) rot_x_np = lambda phi: np.array([ [1., 0., 0.], [0., np.cos(phi), -np.sin(phi)], [0., np.sin(phi), np.cos(phi)]], dtype=np.float32) rot_psi = lambda phi: np.array([ [1, 0, 0, 0], [0, np.cos(phi), -np.sin(phi), 0], [0, np.sin(phi), np.cos(phi), 0], [0, 0, 0, 1]]) rot_theta = lambda th: np.array([ [np.cos(th), 0, -np.sin(th), 0], [0, 1, 0, 0], [np.sin(th), 0, np.cos(th), 0], [0, 0, 0, 1]]) rot_phi = lambda psi: np.array([ [np.cos(psi), -np.sin(psi), 0, 0], [np.sin(psi), np.cos(psi), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]) trans_t = lambda t: np.array([ [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, t], [0, 0, 0, 1]]) def SE3_to_trans_and_quat(data): rot = data[:3, :3] trans = data[:3, 3] r = R.from_matrix(rot) quat = r.as_quat() return np.concatenate([trans, quat]) def find_POI(img_rgb, DEBUG=False): # img - RGB image in range 0...255 img = np.copy(img_rgb) #img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) #sift = cv2.SIFT_create() #keypoints = sift.detect(img, None) # Initiate ORB detector orb = cv2.ORB_create() # find the keypoints with ORB keypoints2 = orb.detect(img,None) #if DEBUG: # img = cv2.drawKeypoints(img_gray, keypoints, img) #keypoints = keypoints + keypoints2 keypoints = keypoints2 xy = [keypoint.pt for keypoint in keypoints] xy = np.array(xy).astype(int) # Remove duplicate points xy_set = set(tuple(point) for point in xy) xy = np.array([list(point) for point in xy_set]).astype(int) return xy # pixel coordinates def nearestPD(A): """Find the nearest positive-definite matrix to input A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which credits [2]. [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd [2] <NAME>, "Computing a nearest symmetric positive semidefinite matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6 """ B = (A + A.T) / 2 _, s, V = la.svd(B) H = np.dot(V.T, np.dot(np.diag(s), V)) A2 = (B + H) / 2 A3 = (A2 + A2.T) / 2 if isPD(A3): return A3 spacing = np.spacing(la.norm(A)) # The above is different from [1]. It appears that MATLAB's `chol` Cholesky # decomposition will accept matrixes with exactly 0-eigenvalue, whereas # Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab # for `np.spacing`), we use the above definition. CAVEAT: our `spacing` # will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on # the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas # `spacing` will, for Gaussian random matrixes of small dimension, be on # othe order of 1e-16. In practice, both ways converge, as the unit test # below suggests. I = np.eye(A.shape[0]) k = 1 while not isPD(A3): mineig = np.min(np.real(la.eigvals(A3))) A3 += I * (-mineig * k**2 + spacing) k += 1 return A3 def isPD(B): """Returns true when input is positive-definite, via Cholesky""" try: _ = la.cholesky(B) return True except la.LinAlgError: return False class Estimator(): def __init__(self, filter_cfg, agent, start_state, filter=True) -> None: # Parameters self.batch_size = filter_cfg['batch_size'] self.kernel_size = filter_cfg['kernel_size'] self.dil_iter = filter_cfg['dil_iter'] self.lrate = filter_cfg['lrate'] self.sampling_strategy = filter_cfg['sampling_strategy'] self.reject_thresh = filter_cfg['reject_thresh'] self.agent = agent self.is_filter = filter #State initial estimate at time t=0 self.xt = start_state #Size 18 self.sig = 1e-1*torch.eye(start_state.shape[0]) self.Q = 1e-1*torch.eye(start_state.shape[0]) #self.sig = filter_cfg['sig0'] #State covariance 18x18 #self.Q = filter_cfg['Q'] #Process noise covariance self.R = filter_cfg['R'] #Measurement covariance self.iter = filter_cfg['N_iter'] #NERF SPECIFIC CONFIGS # create meshgrid from the observed image self.W, self.H, self.focal = filter_cfg['W'], filter_cfg['H'], filter_cfg['focal'] #self.coords = np.asarray(np.stack(np.meshgrid(np.linspace(0, self.W - 1, self.W), np.linspace(0, self.H - 1, self.H)), -1), # dtype=int) #Storage for plots self.pixel_losses = {} self.dyn_losses = {} self.covariance = [] self.state_estimates = [] self.states = {} self.predicted_states = [] self.actions = [] self.iteration = 0 def estimate_relative_pose(self, sensor_image, start_state, sig, obs_img_pose=None, obs_img=None, model_coarse=None, model_fine=None,cfg=None, encode_position_fn=None, encode_direction_fn=None): b_print_comparison_metrics = obs_img_pose is not None b_generate_overlaid_images = b_print_comparison_metrics and obs_img is not None obs_img_noised = sensor_image W_obs = sensor_image.shape[0] H_obs = sensor_image.shape[1] # find points of interest of the observed image POI = find_POI(obs_img_noised, False) # xy pixel coordinates of points of interest (N x 2) ### IF FEATURE DETECTION CANT FIND POINTS, RETURN INITIAL if len(POI.shape) == 1: self.pixel_losses[f'{self.iteration}'] = [] self.dyn_losses[f'{self.iteration}'] = [] self.states[f'{self.iteration}'] = [] return start_state.clone().detach(), False obs_img_noised = (np.array(obs_img_noised) / 255.).astype(np.float32) obs_img_noised = torch.tensor(obs_img_noised).cuda() #sensor_image[POI[:, 1], POI[:, 0]] = [0, 255, 0] # create meshgrid from the observed image coords = np.asarray(np.stack(np.meshgrid(np.linspace(0, W_obs - 1, W_obs), np.linspace(0, H_obs - 1, H_obs)), -1), dtype=int) # create sampling mask for interest region sampling strategy interest_regions = np.zeros((H_obs, W_obs, ), dtype=np.uint8) interest_regions[POI[:,1], POI[:,0]] = 1 I = self.dil_iter interest_regions = cv2.dilate(interest_regions, np.ones((self.kernel_size, self.kernel_size), np.uint8), iterations=I) interest_regions = np.array(interest_regions, dtype=bool) interest_regions = coords[interest_regions] # not_POI contains all points except of POI coords = coords.reshape(H_obs * W_obs, 2) #not_POI = set(tuple(point) for point in coords) - set(tuple(point) for point in POI) #not_POI = np.array([list(point) for point in not_POI]).astype(int) #Break up state into components start_trans = start_state[:3].reshape((3, 1)) ### IMPORTANT: ROTATION MATRIX IS ROTATED BY SOME AMOUNT TO ACCOUNT FOR CAMERA ORIENTATION start_rot = rot_x_np(np.pi/2) @ start_state[6:15].reshape((3, 3)) start_pose = np.concatenate((start_rot, start_trans), axis=1) start_vel = torch.tensor(start_state[3:6]).cuda() start_omega = torch.tensor(start_state[15:]).cuda() # Create pose transformation model start_pose = SE3_to_trans_and_quat(start_pose) starting_pose = SE3(torch.from_numpy(start_pose).float().cuda()) starting_pose = LieGroupParameter(starting_pose).cuda() #print('Start pose', start_pose, start_vel, start_omega) # Add velocities, omegas, and pose object to optimizer if self.is_filter is True: optimizer = torch.optim.Adam(params=[starting_pose, start_vel, start_omega], lr=self.lrate, betas=(0.9, 0.999)) else: optimizer = torch.optim.Adam(params=[starting_pose], lr=self.lrate, betas=(0.9, 0.999)) # calculate angles and translation of the observed image's pose if b_print_comparison_metrics: phi_ref = np.arctan2(obs_img_pose[1,0], obs_img_pose[0,0])*180/np.pi theta_ref = np.arctan2(-obs_img_pose[2, 0], np.sqrt(obs_img_pose[2, 1]**2 + obs_img_pose[2, 2]**2))*180/np.pi psi_ref = np.arctan2(obs_img_pose[2, 1], obs_img_pose[2, 2])*180/np.pi translation_ref = np.sqrt(obs_img_pose[0,3]**2 + obs_img_pose[1,3]**2 + obs_img_pose[2,3]**2) #Store data pix_losses = [] dyn_losses = [] states = [] for k in range(self.iter): model_coarse.eval() if model_fine: model_fine.eval() rgb_coarse, rgb_fine = None, None # TODO: IMPLEMENT INERF WITH USE_CACHED DATSET!!! rand_inds = np.random.choice(interest_regions.shape[0], size=self.batch_size, replace=False) batch = interest_regions[rand_inds] target_s = obs_img_noised[batch[:, 1], batch[:, 0]] #target_s = torch.Tensor(target_s).to(device) pose = starting_pose.retr().matrix()[:3, :4] ray_origins, ray_directions = get_ray_bundle(self.H, self.W, self.focal, pose) # (H, W, 3), (H, W, 3) #with torch.no_grad(): # r_o, r_d = ray_origins, ray_directions #print('Ray origins cuda', ray_origins.is_cuda) ray_origins = ray_origins[batch[:, 1], batch[:, 0], :] ray_directions = ray_directions[batch[:, 1], batch[:, 0], :] then = time.time() rgb_coarse, _, _, rgb_fine, _, _ = run_one_iter_of_nerf( self.H, self.W, self.focal, model_coarse, model_fine, ray_origins, ray_directions, cfg, mode="validation", encode_position_fn=encode_position_fn, encode_direction_fn=encode_direction_fn, ) #target_ray_values = target_s #print(time.time() - then) ### OUTLIER REJECTION threshold = self.reject_thresh with torch.no_grad(): coarse_sample_loss = torch.sum(torch.abs(rgb_coarse[..., :3] - target_s[..., :3]), 1)/3 fine_sample_loss = torch.sum(torch.abs(rgb_fine[..., :3] - target_s[..., :3]), 1)/3 csl = F.relu(-(coarse_sample_loss-threshold)) fsl = F.relu(-(fine_sample_loss-threshold)) coarse_ind = torch.nonzero(csl) fine_ind = torch.nonzero(fsl) ### ---------------- ### coarse_loss = torch.nn.functional.mse_loss( rgb_coarse[coarse_ind, :3], target_s[coarse_ind, :3] ) fine_loss = None if rgb_fine is not None: fine_loss = torch.nn.functional.mse_loss( rgb_fine[fine_ind, :3], target_s[fine_ind, :3] ) loss = coarse_loss + (fine_loss if fine_loss is not None else 0.0) pix_losses.append(loss.clone().cpu().detach().numpy().tolist()) #Add dynamics loss state = torch.cat((pose[:3, 3], start_vel, (rot_x(torch.tensor(-np.pi/2)) @ pose[:3, :3]).reshape(-1), start_omega), dim=0) dyn_loss = mahalanobis(state, torch.tensor(start_state), sig) states.append(state.clone().cpu().detach().numpy().tolist()) dyn_losses.append(dyn_loss.clone().cpu().detach().numpy().tolist()) if self.is_filter is True: loss += dyn_loss loss.backward() optimizer.step() optimizer.zero_grad() new_lrate = self.lrate * (0.8 ** ((k + 1) / 100)) #new_lrate = extra_arg_dict['lrate'] * np.exp(-(k)/1000) for param_group in optimizer.param_groups: param_group['lr'] = new_lrate # print results periodically if b_print_comparison_metrics and ((k + 1) % 20 == 0 or k == 0): print('Step: ', k) print('Loss: ', loss) with torch.no_grad(): pose_dummy = starting_pose.retr().matrix().clone().cpu().detach().numpy() # calculate angles and translation of the optimized pose phi = np.arctan2(pose_dummy[1, 0], pose_dummy[0, 0]) * 180 / np.pi theta = np.arctan2(-pose_dummy[2, 0], np.sqrt(pose_dummy[2, 1] ** 2 + pose_dummy[2, 2] ** 2)) * 180 / np.pi psi = np.arctan2(pose_dummy[2, 1], pose_dummy[2, 2]) * 180 / np.pi translation = np.sqrt(pose_dummy[0,3]**2 + pose_dummy[1,3]**2 + pose_dummy[2,3]**2) #translation
components/resources. These metrics types are explicit and internally represented as 'external' metric types such as CW_METRIC, etc.""" return [] # overrides def hook_internal(self, route: "Route") -> None: # not interested in data routes, should keep a track pass def hook_internal_signal(self, signal: "Signal") -> None: """A change to do the early validations for better user-exp before activation (before resources provisioned). These specific validations can only be performed here since they are impl based and apply expectations from AWS CW APIs mostly. """ super().hook_internal_signal(signal) if signal.resource_access_spec.source == SignalSourceType.INTERNAL_ALARM: # validate for user defined / NEW / internal alarms metrics all have the same period alarm_params = signal.resource_access_spec.alarm_params if isinstance(alarm_params.target_metric_or_expression, MetricExpression): id: str = alarm_params.target_metric_or_expression.alias if id is not None and (not id or not id[0].islower()): raise ValueError( f"Alias {id!r} for the target expression of alarm {signal.alias!r} should not be empty and its " f"first character must be lower case. Problematic expression: {alarm_params.target_metric_or_expression!r}" ) # check if all of the metrics share the same period! # please note that we don't need to check for materialization of the dimensions here since such a # generic check is supposed to be done by application front-end and also in framework core already. metric_periods: Dict[str, int] = dict() for metric_signal in alarm_params.metric_signals: if not metric_signal.alias or not metric_signal.alias[0].islower(): raise ValueError( f"Alias {metric_signal.alias!r} for input metric of alarm {signal.alias!r} should not be empty and its " f"first character must be lower case. Problematic metric: {metric_signal!r}" ) metric_periods[metric_signal.alias] = metric_signal.domain_spec.dimension_filter_spec.find_dimension_by_name( MetricDimension.PERIOD ).value if len(set(metric_periods.values())) > 1: raise ValueError( f"All metrics in the alarm {signal.alias!r} should have the same period!\n" f"Metric periods: \n{metric_periods!r}" ) for expression in alarm_params.metric_expressions: if not expression.alias or not expression.alias[0].islower(): raise ValueError( f"Alias {expression.alias!r} for intermediate metric expression of alarm {signal.alias!r} should not be empty and its " f"first character must be lower case. Problematic expression: {alarm_params.target_metric_or_expression!r}" ) @classmethod def _generate_cw_alarm_to_sns_publish_permissin_label(cls, alarm_acc_id: str, alarm_region_id: str, alarm_name: str): return generate_statement_id(f"{alarm_acc_id}_{alarm_region_id}_{alarm_name}_external_alarm_publish") # overrides def _process_raw_external_alarms(self, new_signals: Set[Signal], current_signals: Set[Signal]) -> None: """Register own topic as an action to this alarm from the same or an different account. Skips upstream external alarms. Processor should be able to register them directly. We are avoiding an extra hop in connection from upstream down to the Processor. And that direct connection is guaranteed to work. Normally Processor can register to both but: - Processor registration to an alarm is not guaranteed (ex: Lambda registration for CW composite alarms) - And there is too much alarming tech related details involved for which Diagnostics impl would be the right place for encapsulation. So for external (non IF-governed) Alarms, Diagnostics driver is taking the responsibility. """ processed_resources: Set[str] = {s.resource_access_spec.path_format for s in current_signals} new_processed_resources: Set[str] = {s.resource_access_spec.path_format for s in new_signals} resources_to_be_deleted = processed_resources - new_processed_resources # 1 - REMOVALS for ext_signal in current_signals: removed_alarm = ext_signal.resource_access_spec.path_format if removed_alarm in resources_to_be_deleted: cw = self._get_session_for(ext_signal, self._dev_platform).client( service_name="cloudwatch", region_name=ext_signal.resource_access_spec.region_id ) # 1- Remove this SNS from external alarm's action lists try: # best effort (auto-disconnection) action against external alarm (not owned by current plat/app) cw_alarm_resp = exponential_retry(cw.describe_alarms, {}, AlarmNames=[ext_signal.resource_access_spec.name]) alarm_type, alarms = next(iter(cw_alarm_resp.items())) alarm = alarms[0] # remove self._topic_arn from action lists. alarm_actions = alarm["AlarmActions"] ok_actions = alarm["OKActions"] insuffficient_data_actions = alarm["InsufficientDataActions"] changed = False if self._topic_arn in alarm_actions: alarm_actions.remove(self._topic_arn) changed = True if self._topic_arn in insuffficient_data_actions: insuffficient_data_actions.remove(self._topic_arn) changed = True if self._topic_arn in ok_actions: ok_actions.remove(self._topic_arn) changed = True if changed: if alarm_type == "CompositeAlarms": exponential_retry(cw.put_composite_alarm, {}, **alarm) elif alarm_type == "MetricAlarms": exponential_retry(cw.put_metric_alarm, {}, **alarm) except Exception as err: # swallow so that the following actions can be taken by the developer to unblock the app. module_logger.critical( f"RheocerOS could not update alarm actions for the removal of SNS: {self._topic_arn}" f" due to {str(err)}. Please manually make sure that external alarm {ext_signal.resource_access_spec.arn}" f" does not have this topic in its alarm actions." ) # 2- Remove external alarm from SNS policy (remove publish permission) permission_label: str = self._generate_cw_alarm_to_sns_publish_permissin_label( ext_signal.resource_access_spec.account_id, ext_signal.resource_access_spec.region_id, ext_signal.name ) try: exponential_retry( self._sns.remove_permission, {"InternalErrorException"}, TopicArn=self._topic_arn, Label=permission_label ) except self._sns.exceptions.NotFoundException: pass # 2 - SURVIVORS + NEW ONES (always doing the check/update even against the existing [common] signals) for ext_signal in new_signals: # 1- add permission to publish to this SNS topic # TODO replace the following (acc based) permission control code with a more granular policy based version. # similar to what we do in '_setup_event_channel' using 'self._topic_root_policy', and then # calling 'self._sns.set_topic_attributes' # AWSAccountId based permission won't work since we need to authorize the service: # "Principal": {"Service": "cloudwatch.amazonaws.com"}, # again similar to what we do in _setup_event_channel. # So in stead of manipulating permissions in this method, we can move extract the policy update logic from # _setup_event_channel and add external (processed) signals to the policy at the end of this method. permission_label: str = self._generate_cw_alarm_to_sns_publish_permissin_label( ext_signal.resource_access_spec.account_id, ext_signal.resource_access_spec.region_id, ext_signal.name ) try: exponential_retry(self._sns.remove_permission, {"InternalErrorException"}, TopicArn=self._topic_arn, Label=permission_label) except self._sns.exceptions.NotFoundException: pass exponential_retry( self._sns.add_permission, {"InternalErrorException"}, TopicArn=self._topic_arn, Label=permission_label, AWSAccountId=[ext_signal.resource_access_spec.account_id], ActionName=["Publish"], ) # 2- register this topic to external raw alarm's lists. cw = self._get_session_for(ext_signal, self._dev_platform).client( service_name="cloudwatch", region_name=ext_signal.resource_access_spec.region_id ) try: cw_alarm_resp = exponential_retry(cw.describe_alarms, {}, AlarmNames=[ext_signal.resource_access_spec.name]) alarm_type, alarms = next(iter(cw_alarm_resp.items())) alarm = alarms[0] # add self._topic_arn to action lists. alarm_actions = alarm["AlarmActions"] ok_actions = alarm["OKActions"] insuffficient_data_actions = alarm["InsufficientDataActions"] changed = False if self._topic_arn not in alarm_actions: alarm_actions.append(self._topic_arn) changed = True if self._topic_arn not in insuffficient_data_actions: insuffficient_data_actions.append(self._topic_arn) changed = True if self._topic_arn not in ok_actions: ok_actions.append(self._topic_arn) changed = True if changed: if alarm_type == "CompositeAlarms": # - add self._topic_arn to action lists. exponential_retry(put_composite_alarm, {}, cw, **alarm) elif alarm_type == "MetricAlarms": exponential_retry(cw.put_metric_alarm, {}, **alarm) except Exception as err: # swallow so that the following actions can be taken by the developer to unblock the app. module_logger.critical( f"RheocerOS could update alarm actions for alarm {ext_signal.resource_access_spec.arn} to " f"register SNS topic {self._topic_arn} due to {str(err)}. Please make sure that this external " f"alarm contains this topic in its alarm actions." ) # overrides def _process_internal(self, new_routes: Set[Route], current_routes: Set[Route]) -> None: # for connections, it is expected that other constructs should send a conn request # to this Diagnostics impl (see _process_construct_connections). # according to the needs of their underlying (AWS) resources. # # But for this Diagnostics impl, we don't need to track internal routes. # pass def _remove_internal_alarm(self, int_signal: Signal, is_terminating=False) -> None: # 1- Remove the alarm from account internal_alarm_name = self.get_unique_internal_alarm_name(int_signal.resource_access_spec.alarm_id) try: exponential_retry(self._cw.delete_alarms, {}, AlarmNames=[internal_alarm_name]) except ClientError as error: if error.response["Error"]["Code"] not in ["ResourceNotFound", "ResourceNotFoundException"]: raise # 2- Remove external alarm from SNS policy (remove publish permission) permission_label: str = self._generate_cw_alarm_to_sns_publish_permissin_label(self.account_id, self.region, internal_alarm_name) if not is_terminating: # OPTIMIZATION: ignore the state in the topic since it must also be going down as part of the termination. try: exponential_retry(self._sns.remove_permission, {"InternalErrorException"}, TopicArn=self._topic_arn, Label=permission_label) except self._sns.exceptions.NotFoundException: pass # overrides def _process_internal_alarms( self, new_alarms: Set[Signal], current_alarms: Set[Signal], resource_paths_to_be_deleted: Set[str] ) -> None: # no need to check alarm inputs type (metric for metric alarms and alarms for composite alarms) # and their materialization. Application layer makes sure that compilation/front-end takes care of # necessary validations. # Logic: # - create/update alarms as CW metric alarms or composite alarms. # - register self._topic_arn to their action lists # 1 - REMOVALS # 1.1- Remove Composite alarms first (hard condition in case they might depend on some of the metric alarms # which are also to be deleted) for int_signal in current_alarms: if ( int_signal.resource_access_spec.path_format in resource_paths_to_be_deleted and int_signal.resource_access_spec.source == SignalSourceType.INTERNAL_COMPOSITE_ALARM ): self._remove_internal_alarm(int_signal) # 1.2- Remove metric alarms for int_signal in current_alarms: if ( int_signal.resource_access_spec.path_format in resource_paths_to_be_deleted and int_signal.resource_access_spec.source == SignalSourceType.INTERNAL_ALARM ): self._remove_internal_alarm(int_signal) # 2 - SURVIVORS + NEW ONES (always doing the check/update even against the existing [common] signals) for int_signal in new_alarms: internal_alarm_name = self.get_unique_internal_alarm_name(int_signal.resource_access_spec.alarm_id) # 1- add permission to publish to this SNS topic # TODO replace the following (acc based) permission control code with a more granular policy based version. # similar
+ playertxid + "%22]" + '"' player_info = rpc_connection.cclib("playerinfo", "17", player_info_arg) return player_info def rogue_extract(rpc_connection, game_txid, pubkey): extract_info_arg = '"' + "[%22" + game_txid + "%22,%22" + pubkey + "%22]" + '"' extract_info = rpc_connection.cclib("extract", "17", extract_info_arg) return extract_info def rogue_keystrokes(rpc_connection, game_txid, keystroke): rogue_keystrokes_arg = '"' + "[%22" + game_txid + "%22,%22" + keystroke + "%22]" + '"' keystroke_info = rpc_connection.cclib("keystrokes", "17", rogue_keystrokes_arg) return keystroke_info def print_multiplayer_games_list(rpc_connection): while True: pending_list = rogue_pending(rpc_connection) multiplayer_pending_list = [] for game in pending_list["pending"]: if rogue_game_info(rpc_connection, game)["maxplayers"] > 1: multiplayer_pending_list.append(game) print("Multiplayer games availiable to join: \n") for active_multiplayer_game in multiplayer_pending_list: game_info = rogue_game_info(rpc_connection, active_multiplayer_game) print(colorize("\n================================\n", "green")) print("Game txid: " + game_info["gametxid"]) print("Game buyin: " + str(game_info["buyin"])) print("Game height: " + str(game_info["gameheight"])) print("Start height: " + str(game_info["start"])) print("Alive players: " + str(game_info["alive"])) print("Registered players: " + str(game_info["numplayers"])) print("Max players: " + str(game_info["maxplayers"])) print(colorize("\n***\n", "blue")) print("Players in game:") for player in game_info["players"]: print("Slot: " + str(player["slot"])) if "baton" in player.keys(): print("Baton: " + str(player["baton"])) if "tokenid" in player.keys(): print("Tokenid: " + str(player["tokenid"])) print("Is mine?: " + str(player["ismine"])) print(colorize("\nR + Enter - refresh list.\nE + Enter - to the game choice.\nCTRL + C - back to main menu", "blue")) is_refresh = input("Choose your destiny: ") if is_refresh == "R": print("\n") pass elif is_refresh == "E": print("\n") break else: print("\nPlease choose R or E\n") def rogue_newgame_singleplayer(rpc_connection): try: new_game_txid = rpc_connection.cclib("newgame", "17", "[1]")["txid"] print("New singleplayer training game succesfully created. txid: " + new_game_txid) while True: mempool = rpc_connection.getrawmempool() if new_game_txid in mempool: print(colorize("Waiting for game transaction to be mined", "blue")) time.sleep(5) else: print(colorize("Game transaction is mined", "green")) break players_list = rogue_players_list(rpc_connection) if len(players_list["playerdata"]) > 0: print_players_list(rpc_connection) while True: is_choice_needed = input("Do you want to choose a player for this game? [y/n] ") if is_choice_needed == "y": player_txid = input("Please input player txid: ") newgame_regisration_txid = rogue_game_register(rpc_connection, new_game_txid, player_txid)["txid"] break elif is_choice_needed == "n": set_warriors_name(rpc_connection) newgame_regisration_txid = rogue_game_register(rpc_connection, new_game_txid)["txid"] break else: print("Please choose y or n !") else: print("No players available to select") input("Press [Enter] to continue...") newgame_regisration_txid = rogue_game_register(rpc_connection, new_game_txid)["txid"] while True: mempool = rpc_connection.getrawmempool() if newgame_regisration_txid in mempool: print(colorize("Waiting for registration transaction to be mined", "blue")) time.sleep(5) else: print(colorize("Registration transaction is mined", "green")) break game_info = rogue_game_info(rpc_connection, new_game_txid) start_time = time.time() while True: subprocess.call(["cc/rogue/rogue", str(game_info["seed"]), str(game_info["gametxid"])]) time_elapsed = time.time() - start_time if time_elapsed > 1: break else: print("Game less than 1 second. Trying to start again") time.sleep(1) game_end_height = int(rpc_connection.getinfo()["blocks"]) while True: current_height = int(rpc_connection.getinfo()["blocks"]) height_difference = current_height - game_end_height if height_difference == 0: print(current_height) print(game_end_height) print(colorize("Waiting for next block before bailout", "blue")) time.sleep(5) else: break #print("\nKeystrokes of this game:\n") #time.sleep(0.5) while True: keystrokes_rpc_responses = find_game_keystrokes_in_log(new_game_txid)[1::2] if len(keystrokes_rpc_responses) < 1: print("No keystrokes broadcasted yet. Let's wait 5 seconds") time.sleep(5) else: break #print(keystrokes_rpc_responses) for keystroke in keystrokes_rpc_responses: json_keystroke = json.loads(keystroke)["result"] if "status" in json_keystroke.keys() and json_keystroke["status"] == "error": while True: print("Trying to re-brodcast keystroke") keystroke_rebroadcast = rogue_keystrokes(rpc_connection, json_keystroke["gametxid"], json_keystroke["keystrokes"]) if "txid" in keystroke_rebroadcast.keys(): print("Keystroke broadcasted! txid: " + keystroke_rebroadcast["txid"]) break else: print("Let's try again in 5 seconds") time.sleep(5) # waiting for last keystroke confirmation here last_keystroke_json = json.loads(keystrokes_rpc_responses[-1]) while True: while True: try: rpc_connection.sendrawtransaction(last_keystroke_json["result"]["hex"]) except Exception as e: pass try: confirmations_amount = rpc_connection.getrawtransaction(last_keystroke_json["result"]["txid"], 1)["confirmations"] break except Exception as e: print(e) print("Let's wait a little bit more") time.sleep(5) pass if confirmations_amount < 2: print("Last keystroke not confirmed yet! Let's wait a little") time.sleep(10) else: print("Last keystroke confirmed!") break while True: print("\nExtraction info:\n") extraction_info = rogue_extract(rpc_connection, new_game_txid, rpc_connection.getinfo()["pubkey"]) if extraction_info["status"] == "error": print(colorize("Your warrior died or no any information about game was saved on blockchain", "red")) print("If warrior was alive - try to wait a little (choose n to wait for a next block). If he is dead - you can bailout now (choose y).") else: print("Current game state:") print("Game txid: " + extraction_info["gametxid"]) print("Information about game saved on chain: " + extraction_info["extracted"]) print("\n") is_bailout_needed = input("Do you want to make bailout now [y] or wait for one more block [n]? [y/n]: ") if is_bailout_needed == "y": while True: bailout_info = rogue_bailout(rpc_connection, new_game_txid) if "hex" in bailout_info.keys(): break else: print("bailout not broadcasted yet by some reason. Let's wait...") time.sleep(5) break elif is_bailout_needed == "n": game_end_height = int(rpc_connection.getinfo()["blocks"]) while True: current_height = int(rpc_connection.getinfo()["blocks"]) height_difference = current_height - game_end_height if height_difference == 0: print(current_height) print(game_end_height) print(colorize("Waiting for next block before bailout", "blue")) time.sleep(5) else: break else: print("Please choose y or n !") print(bailout_info) print("\nGame is finished!\n") bailout_txid = bailout_info["txid"] input("Press [Enter] to continue...") except Exception as e: print("Something went wrong.") print(e) input("Press [Enter] to continue...") def play_multiplayer_game(rpc_connection): # printing list of user active multiplayer games active_games_list = rpc_connection.cclib("games", "17")["games"] active_multiplayer_games_list = [] for game in active_games_list: gameinfo = rogue_game_info(rpc_connection, game) if gameinfo["maxplayers"] > 1: active_multiplayer_games_list.append(gameinfo) games_counter = 0 for active_multiplayer_game in active_multiplayer_games_list: games_counter = games_counter + 1 is_ready_to_start = False try: active_multiplayer_game["seed"] is_ready_to_start = True except Exception as e: pass print(colorize("\n================================\n", "green")) print("Game txid: " + active_multiplayer_game["gametxid"]) print("Game buyin: " + str(active_multiplayer_game["buyin"])) if is_ready_to_start: print(colorize("Ready for start!", "green")) else: print(colorize("Not ready for start yet, wait until start height!", "red")) print("Game height: " + str(active_multiplayer_game["gameheight"])) print("Start height: " + str(active_multiplayer_game["start"])) print("Alive players: " + str(active_multiplayer_game["alive"])) print("Registered players: " + str(active_multiplayer_game["numplayers"])) print("Max players: " + str(active_multiplayer_game["maxplayers"])) print(colorize("\n***\n", "blue")) print("Players in game:") for player in active_multiplayer_game["players"]: print("Slot: " + str(player["slot"])) print("Baton: " + str(player["baton"])) print("Tokenid: " + str(player["tokenid"])) print("Is mine?: " + str(player["ismine"])) # asking user if he want to start any of them while True: start_game = input("\nDo you want to start any of your pendning multiplayer games?[y/n]: ") if start_game == "y": new_game_txid = input("Input txid of game which you want to start: ") game_info = rogue_game_info(rpc_connection, new_game_txid) try: start_time = time.time() while True: subprocess.call(["cc/rogue/rogue", str(game_info["seed"]), str(game_info["gametxid"])]) time_elapsed = time.time() - start_time if time_elapsed > 1: break else: print("Game less than 1 second. Trying to start again") time.sleep(1) except Exception as e: print("Maybe game isn't ready for start yet or your input was not correct, sorry.") input("Press [Enter] to continue...") break game_end_height = int(rpc_connection.getinfo()["blocks"]) while True: current_height = int(rpc_connection.getinfo()["blocks"]) height_difference = current_height - game_end_height if height_difference == 0: print(current_height) print(game_end_height) print(colorize("Waiting for next block before bailout or highlander", "blue")) time.sleep(5) else: break while True: keystrokes_rpc_responses = find_game_keystrokes_in_log(new_game_txid)[1::2] if len(keystrokes_rpc_responses) < 1: print("No keystrokes broadcasted yet. Let's wait 5 seconds") time.sleep(5) else: break for keystroke in keystrokes_rpc_responses: json_keystroke = json.loads(keystroke)["result"] if "status" in json_keystroke.keys() and json_keystroke["status"] == "error": while True: print("Trying to re-brodcast keystroke") keystroke_rebroadcast = rogue_keystrokes(rpc_connection, json_keystroke["gametxid"], json_keystroke["keystrokes"]) if "txid" in keystroke_rebroadcast.keys(): print("Keystroke broadcasted! txid: " + keystroke_rebroadcast["txid"]) break else: print("Let's try again in 5 seconds") time.sleep(5) last_keystroke_json = json.loads(keystrokes_rpc_responses[-1]) while True: while True: try: confirmations_amount = rpc_connection.getrawtransaction(last_keystroke_json["result"]["txid"], 1)["confirmations"] break except Exception as e: print(e) print("Let's wait a little bit more") rpc_connection.sendrawtransaction(last_keystroke_json["result"]["hex"]) time.sleep(5) pass if confirmations_amount < 2: print("Last keystroke not confirmed yet! Let's wait a little") time.sleep(10) else: print("Last keystroke confirmed!") break while True: print("\nExtraction info:\n") extraction_info = rogue_extract(rpc_connection, new_game_txid, rpc_connection.getinfo()["pubkey"]) if extraction_info["status"] == "error": print(colorize("Your warrior died or no any information about game was saved on blockchain", "red")) print("If warrior was alive - try to wait a little (choose n to wait for a next block). If he is dead - you can bailout now (choose y).") else: print("Current game state:") print("Game txid: " + extraction_info["gametxid"]) print("Information about game saved on chain: " + extraction_info["extracted"]) print("\n") is_bailout_needed = input( "Do you want to make bailout now [y] or wait for one more block [n]? [y/n]: ") if is_bailout_needed == "y": if game_info["alive"] > 1: bailout_info = rogue_bailout(rpc_connection, new_game_txid) try: bailout_txid = bailout_info["txid"] print(bailout_info) print("\nGame is finished!\n") input("Press [Enter] to continue...") break except Exception: highlander_info = rogue_highlander(rpc_connection, new_game_txid) highlander_info = highlander_info["txid"] print(highlander_info) print("\nGame is finished!\n") input("Press [Enter] to continue...") break else: highlander_info = rogue_highlander(rpc_connection, new_game_txid) if 'error' in highlander_info.keys() and highlander_info["error"] == 'numplayers != maxplayers': bailout_info = rogue_bailout(rpc_connection, new_game_txid) print(bailout_info) print("\nGame is finished!\n") input("Press [Enter] to continue...") break else: print(highlander_info) print("\nGame is finished!\n") input("Press [Enter] to continue...") break elif
import pytest,warnings import os,sys sys.path.append(os.path.join(os.path.dirname(__file__), '../')) warnings.resetwarnings() warnings.simplefilter('ignore', FutureWarning) warnings.simplefilter('ignore', DeprecationWarning) from jax import numpy as jnp, lax, vmap from deltapv import simulator,materials,sun,bcond,solver,physics,scales Array = jnp.ndarray#util.Array f64 = jnp.float64#util.f64 i64 = jnp.int64#util.i64 L = 3e-4 J = 5e-6 material = materials.create_material(Chi=3.9, Eg=1.5, eps=9.4, Nc=8e17, Nv=1.8e19, mn=100, mp=100, Et=0, tn=1e-8, tp=1e-8, A=1e4) des = simulator.make_design(n_points=500, Ls=[J, L-J], mats=[material, material], Ns=[1e17, -1e15], Snl=1e7, Snr=0, Spl=0, Spr=1e7) ls = simulator.incident_light(Lambda=sun.Lambda_eff, P_in = sun.P_in_eff) cell = simulator.init_cell(des,ls) cell_optics = simulator.init_cell(des,ls,optics=True) bound_eq = bcond.boundary_eq(cell) def aeq(val0,val1,tol=1e-7): return lax.abs(val0-val1)<tol def test_vincr(): dv = solver.vincr(cell) assert aeq(dv,1.9340863535916804) def test_ooe_guess(): pot_eq = simulator.equilibrium(des,ls) pot_ooe_guess = solver.ooe_guess(cell_optics,pot_eq) assert aeq(pot_ooe_guess.phi,pot_eq.phi).all() assert aeq(pot_ooe_guess.phi_n, - cell.Chi - pot_eq.phi).all() assert aeq(pot_ooe_guess.phi_p, - cell.Chi - cell.Eg - pot_eq.phi).all() #print(pot_ooe_guess) #print(scales.energy) def test_logdamp(): vec = jnp.arange(0,3,0.2) damped_vec_true = jnp.array([0.,0.2,0.4,0.6,0.8,1., 1.11972125,1.22612561,1.32228903,1.41001089,1.49065438,1.56527702, 1.63471572,1.69964418,1.76061274]) assert aeq(solver.logdamp(vec),damped_vec_true).all() def test_scaleddamp(): vec = jnp.arange(0,120,10) damp_vec = solver.scaledamp(vec) assert aeq(jnp.max(damp_vec),50.0) for i in range(len(vec)-1): assert damp_vec[i]<damp_vec[i+1] def test_to_be_pytested(): print('\nThe functions of solver.pot2vec,vec2pot,modify,residnorm,linesearch,fwdlnsrch,linguess,genlinguess,quadguess have to be tested.') def test_eq_guess(): pot_ini = solver.eq_guess(cell, bound_eq) phi_guess = pot_ini.phi N = len(cell.Eg) assert vmap(aeq)(phi_guess[:9],-156.05169243*jnp.ones(9,f64)).all() assert vmap(aeq)(phi_guess[9:],-202.19671446*jnp.ones(491,f64)).all() assert vmap(aeq)(pot_ini.phi_n,jnp.zeros(N)).all() assert vmap(aeq)(pot_ini.phi_p,jnp.zeros(N)).all() def test_step_eq(): pot_ini = solver.eq_guess(cell, bound_eq) pot, stats = solver.step_eq(cell,bound_eq,pot_ini) #print(pot) #print(stats) #print(pot) phi_true = jnp.array( [-154.20241177, -155.02610195, -157.21444183, -158.08702838, -158.58799572, -158.95269887, -159.24627598, -159.49626315, -159.71691432, -198.68004819, -198.90390713, -199.12241736, -199.33445325, -199.53875364, -199.73395405, -199.91864266, -200.09144102, -200.25110509, -200.3966351 , -200.52737657, -200.64309252, -200.74399081, -200.83070012, -200.90420029, -200.96572261, -201.01663993, -201.05836463, -201.09226631, -201.11961422, -201.14154351, -201.15904135, -201.17294732, -201.18396295, -201.19266632, -201.20215447, -201.21064419, -201.21728604, -201.22248221, -201.22654739, -201.22972774, -201.23221585, -201.2341624 , -201.23568526, -201.23687666, -201.23780873, -201.23853793, -201.23910841, -201.23955473, -201.23990389, -201.24017706, -201.24039077, -201.24055796, -201.24068876, -201.24079109, -201.24087115, -201.24093378, -201.24098278, -201.24102112, -201.24105111, -201.24107457, -201.24109293, -201.24110729, -201.24111852, -201.24112731, -201.24113419, -201.24113957, -201.24114378, -201.24114707, -201.24114965, -201.24115166, -201.24115324, -201.24115447, -201.24115544, -201.24115619, -201.24115678, -201.24115724, -201.24115761, -201.24115789, -201.24115811, -201.24115828, -201.24115842, -201.24115852, -201.24115861, -201.24115867, -201.24115872, -201.24115876, -201.24115879, -201.24115882, -201.24115884, -201.24115885, -201.24115886, -201.24115887, -201.24115888, -201.24115888, -201.24115889, -201.24115889, -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.2411589 , -201.24115889, -201.24115889, -201.24115889, -201.24115888, -201.24115888, -201.24115887, -201.24115886, -201.24115885, -201.24115883, -201.24115881, -201.24115878, -201.24115875, -201.24115871, -201.24115865, -201.24115858, -201.24115849, -201.24115837, -201.24115823, -201.24115804, -201.2411578 , -201.24115749, -201.2411571 , -201.24115659, -201.24115595, -201.24115513, -201.24115407, -201.24115273, -201.24115101, -201.24114882, -201.24114601, -201.24114242, -201.24113784, -201.24113198, -201.24112448, -201.24111491, -201.24110267, -201.24108702, -201.24106702, -201.24104145, -201.24100878, -201.24096701, -201.24091362, -201.24084538, -201.24075815, -201.24064665, -201.24050414, -201.24032197, -201.24008912, -201.23979148, -201.23941104, -201.23892476, -201.23830318, -201.23750867, -201.23649311, -201.235195 , -201.23353574, -201.23141484, -201.22870388, -201.22523867, -201.22080939, -201.2151478 , -201.20791106, -201.19866093, -201.18985613, -201.18040338, -201.16844937, -201.15337471, -201.13443109, -201.1107281 , -201.08122626, -201.04474107, -200.99996324, -200.94550038, -200.87994337, -200.80195652, -200.71038403, -200.60435856, -200.48339265, -200.34743379]) assert vmap(aeq)(pot.phi,phi_true).all() def test_solver_eq(): pot_ini = solver.eq_guess(cell, bound_eq) pot = solver.solve_eq(cell,bound_eq,pot_ini) #print(pot) phi_true = jnp.array( [-152.93817712, -152.96207861, -152.99233673, -153.03678375, -153.10650344, -153.21791909, -153.39501105, -153.67080119, -154.08637116, -154.68570866, -155.23546933, -155.75548157, -156.25671665, -156.74551585, -157.22564675, -157.69938832, -158.1681359 , -158.63275115, -159.09376954, -159.5515255 , -160.00622877, -160.45801134, -160.90695656, -161.35311721, -161.79652683, -162.23720675, -162.67517063, -163.11042719, -163.54298205, -163.97283882, -164.39999983, -164.82446659, -165.24624008, -165.66532094, -166.08170958, -166.49540628, -166.90641121, -167.31472449, -167.7203462 , -168.12327639, -168.52351509, -168.92106233, -169.31591812, -169.70808248, -170.0975554 , -170.48433689, -170.86842696, -171.24982561, -171.62853284, -172.00454865, -172.37787304, -172.74850601, -173.11644756, -173.4816977 , -173.84425642, -174.20412372, -174.5612996 , -174.91578406, -175.26757711, -175.61667874, -175.96308895, -176.30680774, -176.64783512, -176.98617107, -177.32181561, -177.65476874, -177.98503044, -178.31260073, -178.6374796 , -178.95966705, -179.27916309, -179.59596771, -179.91008091, -180.22150269, -180.53023305, -180.836272 , -181.13961953, -181.44027564, -181.73824034, -182.03351361, -182.32609547, -182.61598592, -182.90318494, -183.18769255, -183.46950874, -183.74863351, -184.02506687, -184.29880881, -184.56985933, -184.83821844, -185.10388613, -185.3668624 , -185.62714726, -185.88474071, -186.13964275, -186.39185337, -186.64137258, -186.88820039, -187.13233679, -187.3737818 , -187.6125354 , -187.84859762, -188.08196846, -188.31264792, -188.54063602, -188.76593278, -188.98853821, -189.20845233, -189.42567517, -189.64020676, -189.85204716, -190.06119639, -190.26765454, -190.47142166, -190.67249786, -190.87088324, -191.06657794, -191.2595821 , -191.44989592, -191.63751963, -191.82245349, -192.00469783, -192.18425301, -192.3611195 , -192.53529781, -192.70678855, -192.87559246, -193.04171037, -193.20514327, -193.36589228, -193.52395873, -193.67934412, -193.83205021, -193.98207898, -194.12943273, -194.27411406, -194.41612591, -194.55547165, -194.69215505, -194.82618037, -194.9575524 , -195.08627649, -195.2123586 , -195.33580539, -195.45662423, -195.57482325, -195.69041146, -195.80339872, -195.91379585, -196.02161468, -196.12686809, -196.22957007, -196.32973574, -196.42738146, -196.52252481, -196.61518466, -196.7053812 , -196.79313595, -196.87847183, -196.96141309, -197.04198542, -197.12021587, -197.19613288, -197.26976626, -197.34114718, -197.41030811, -197.47728281, -197.54210628, -197.60481468, -197.66544533, -197.72403658, -197.78062777, -197.83525916, -197.88797182, -197.93880758, -197.98780892, -198.03501888, -198.08048096, -198.12423906, -198.16633733, -198.20682014, -198.24573194, -198.28311717, -198.31902023, -198.3534853 , -198.38655634, -198.41827698, -198.44869043, -198.47783945, -198.50576623, -198.53251239, -198.55811888, -198.58262594, -198.60607308, -198.628499 , -198.64994158, -198.67043784, -198.69002392, -198.70873504, -198.72660552, -198.74366873, -198.75995709, -198.77550208, -198.79033422, -198.80448306, -198.81797723, -198.8308444 , -198.84311129, -198.85480373, -198.86594661, -198.87656392, -198.88667879, -198.89631347, -198.90548936, -198.91422706, -198.92254633, -198.93046615, -198.93800476, -198.94517964, -198.95200754, -198.95850454, -198.96468601, -198.97056668, -198.97616067, -198.98148145, -198.98654193, -198.99135444, -198.99593077, -199.00028218, -199.00441943, -199.00835279, -199.01209206, -199.0156466 , -199.01902532, -199.02223676, -199.02528902, -199.02818985, -199.03094662, -199.03356637, -199.0360558 , -199.03842129, -199.04066892, -199.04280449, -199.04483351, -199.04676122, -199.04859263, -199.05033249, -199.05198533,
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from .constant import Constant __NR_osf_syscall = Constant('__NR_osf_syscall',0) __NR_exit = Constant('__NR_exit',1) __NR_fork = Constant('__NR_fork',2) __NR_read = Constant('__NR_read',3) __NR_write = Constant('__NR_write',4) __NR_osf_old_open = Constant('__NR_osf_old_open',5) __NR_close = Constant('__NR_close',6) __NR_osf_wait4 = Constant('__NR_osf_wait4',7) __NR_osf_old_creat = Constant('__NR_osf_old_creat',8) __NR_link = Constant('__NR_link',9) __NR_unlink = Constant('__NR_unlink',10) __NR_osf_execve = Constant('__NR_osf_execve',11) __NR_chdir = Constant('__NR_chdir',12) __NR_fchdir = Constant('__NR_fchdir',13) __NR_mknod = Constant('__NR_mknod',14) __NR_chmod = Constant('__NR_chmod',15) __NR_chown = Constant('__NR_chown',16) __NR_brk = Constant('__NR_brk',17) __NR_osf_getfsstat = Constant('__NR_osf_getfsstat',18) __NR_lseek = Constant('__NR_lseek',19) __NR_getxpid = Constant('__NR_getxpid',20) __NR_osf_mount = Constant('__NR_osf_mount',21) __NR_umount = Constant('__NR_umount',22) __NR_setuid = Constant('__NR_setuid',23) __NR_getxuid = Constant('__NR_getxuid',24) __NR_exec_with_loader = Constant('__NR_exec_with_loader',25) __NR_ptrace = Constant('__NR_ptrace',26) __NR_osf_nrecvmsg = Constant('__NR_osf_nrecvmsg',27) __NR_osf_nsendmsg = Constant('__NR_osf_nsendmsg',28) __NR_osf_nrecvfrom = Constant('__NR_osf_nrecvfrom',29) __NR_osf_naccept = Constant('__NR_osf_naccept',30) __NR_osf_ngetpeername = Constant('__NR_osf_ngetpeername',31) __NR_osf_ngetsockname = Constant('__NR_osf_ngetsockname',32) __NR_access = Constant('__NR_access',33) __NR_osf_chflags = Constant('__NR_osf_chflags',34) __NR_osf_fchflags = Constant('__NR_osf_fchflags',35) __NR_sync = Constant('__NR_sync',36) __NR_kill = Constant('__NR_kill',37) __NR_osf_old_stat = Constant('__NR_osf_old_stat',38) __NR_setpgid = Constant('__NR_setpgid',39) __NR_osf_old_lstat = Constant('__NR_osf_old_lstat',40) __NR_dup = Constant('__NR_dup',41) __NR_pipe = Constant('__NR_pipe',42) __NR_osf_set_program_attributes = Constant('__NR_osf_set_program_attributes',43) __NR_osf_profil = Constant('__NR_osf_profil',44) __NR_open = Constant('__NR_open',45) __NR_osf_old_sigaction = Constant('__NR_osf_old_sigaction',46) __NR_getxgid = Constant('__NR_getxgid',47) __NR_osf_sigprocmask = Constant('__NR_osf_sigprocmask',48) __NR_osf_getlogin = Constant('__NR_osf_getlogin',49) __NR_osf_setlogin = Constant('__NR_osf_setlogin',50) __NR_acct = Constant('__NR_acct',51) __NR_sigpending = Constant('__NR_sigpending',52) __NR_ioctl = Constant('__NR_ioctl',54) __NR_osf_reboot = Constant('__NR_osf_reboot',55) __NR_osf_revoke = Constant('__NR_osf_revoke',56) __NR_symlink = Constant('__NR_symlink',57) __NR_readlink = Constant('__NR_readlink',58) __NR_execve = Constant('__NR_execve',59) __NR_umask = Constant('__NR_umask',60) __NR_chroot = Constant('__NR_chroot',61) __NR_osf_old_fstat = Constant('__NR_osf_old_fstat',62) __NR_getpgrp = Constant('__NR_getpgrp',63) __NR_getpagesize = Constant('__NR_getpagesize',64) __NR_osf_mremap = Constant('__NR_osf_mremap',65) __NR_vfork = Constant('__NR_vfork',66) __NR_stat = Constant('__NR_stat',67) __NR_lstat = Constant('__NR_lstat',68) __NR_osf_sbrk = Constant('__NR_osf_sbrk',69) __NR_osf_sstk = Constant('__NR_osf_sstk',70) __NR_mmap = Constant('__NR_mmap',71) __NR_osf_old_vadvise = Constant('__NR_osf_old_vadvise',72) __NR_munmap = Constant('__NR_munmap',73) __NR_mprotect = Constant('__NR_mprotect',74) __NR_madvise = Constant('__NR_madvise',75) __NR_vhangup = Constant('__NR_vhangup',76) __NR_osf_kmodcall = Constant('__NR_osf_kmodcall',77) __NR_osf_mincore = Constant('__NR_osf_mincore',78) __NR_getgroups = Constant('__NR_getgroups',79) __NR_setgroups = Constant('__NR_setgroups',80) __NR_osf_old_getpgrp = Constant('__NR_osf_old_getpgrp',81) __NR_setpgrp = Constant('__NR_setpgrp',82) __NR_osf_setitimer = Constant('__NR_osf_setitimer',83) __NR_osf_old_wait = Constant('__NR_osf_old_wait',84) __NR_osf_table = Constant('__NR_osf_table',85) __NR_osf_getitimer = Constant('__NR_osf_getitimer',86) __NR_gethostname = Constant('__NR_gethostname',87) __NR_sethostname = Constant('__NR_sethostname',88) __NR_getdtablesize = Constant('__NR_getdtablesize',89) __NR_dup2 = Constant('__NR_dup2',90) __NR_fstat = Constant('__NR_fstat',91) __NR_fcntl = Constant('__NR_fcntl',92) __NR_osf_select = Constant('__NR_osf_select',93) __NR_poll = Constant('__NR_poll',94) __NR_fsync = Constant('__NR_fsync',95) __NR_setpriority = Constant('__NR_setpriority',96) __NR_socket = Constant('__NR_socket',97) __NR_connect = Constant('__NR_connect',98) __NR_accept = Constant('__NR_accept',99) __NR_getpriority = Constant('__NR_getpriority',100) __NR_send = Constant('__NR_send',101) __NR_recv = Constant('__NR_recv',102) __NR_sigreturn = Constant('__NR_sigreturn',103) __NR_bind = Constant('__NR_bind',104) __NR_setsockopt = Constant('__NR_setsockopt',105) __NR_listen = Constant('__NR_listen',106) __NR_osf_plock = Constant('__NR_osf_plock',107) __NR_osf_old_sigvec = Constant('__NR_osf_old_sigvec',108) __NR_osf_old_sigblock = Constant('__NR_osf_old_sigblock',109) __NR_osf_old_sigsetmask = Constant('__NR_osf_old_sigsetmask',110) __NR_sigsuspend = Constant('__NR_sigsuspend',111) __NR_osf_sigstack = Constant('__NR_osf_sigstack',112) __NR_recvmsg = Constant('__NR_recvmsg',113) __NR_sendmsg = Constant('__NR_sendmsg',114) __NR_osf_old_vtrace = Constant('__NR_osf_old_vtrace',115) __NR_osf_gettimeofday = Constant('__NR_osf_gettimeofday',116) __NR_osf_getrusage = Constant('__NR_osf_getrusage',117) __NR_getsockopt = Constant('__NR_getsockopt',118) __NR_readv = Constant('__NR_readv',120) __NR_writev = Constant('__NR_writev',121) __NR_osf_settimeofday = Constant('__NR_osf_settimeofday',122) __NR_fchown = Constant('__NR_fchown',123) __NR_fchmod = Constant('__NR_fchmod',124) __NR_recvfrom = Constant('__NR_recvfrom',125) __NR_setreuid = Constant('__NR_setreuid',126) __NR_setregid = Constant('__NR_setregid',127) __NR_rename = Constant('__NR_rename',128) __NR_truncate = Constant('__NR_truncate',129) __NR_ftruncate = Constant('__NR_ftruncate',130) __NR_flock = Constant('__NR_flock',131) __NR_setgid = Constant('__NR_setgid',132) __NR_sendto = Constant('__NR_sendto',133) __NR_shutdown = Constant('__NR_shutdown',134) __NR_socketpair = Constant('__NR_socketpair',135) __NR_mkdir = Constant('__NR_mkdir',136) __NR_rmdir = Constant('__NR_rmdir',137) __NR_osf_utimes = Constant('__NR_osf_utimes',138) __NR_osf_old_sigreturn = Constant('__NR_osf_old_sigreturn',139) __NR_osf_adjtime = Constant('__NR_osf_adjtime',140) __NR_getpeername = Constant('__NR_getpeername',141) __NR_osf_gethostid = Constant('__NR_osf_gethostid',142) __NR_osf_sethostid = Constant('__NR_osf_sethostid',143) __NR_getrlimit = Constant('__NR_getrlimit',144) __NR_setrlimit = Constant('__NR_setrlimit',145) __NR_osf_old_killpg = Constant('__NR_osf_old_killpg',146) __NR_setsid = Constant('__NR_setsid',147) __NR_quotactl = Constant('__NR_quotactl',148) __NR_osf_oldquota = Constant('__NR_osf_oldquota',149) __NR_getsockname = Constant('__NR_getsockname',150) __NR_osf_pid_block = Constant('__NR_osf_pid_block',153) __NR_osf_pid_unblock = Constant('__NR_osf_pid_unblock',154) __NR_sigaction = Constant('__NR_sigaction',156) __NR_osf_sigwaitprim = Constant('__NR_osf_sigwaitprim',157) __NR_osf_nfssvc = Constant('__NR_osf_nfssvc',158) __NR_osf_getdirentries = Constant('__NR_osf_getdirentries',159) __NR_osf_statfs = Constant('__NR_osf_statfs',160) __NR_osf_fstatfs = Constant('__NR_osf_fstatfs',161) __NR_osf_asynch_daemon = Constant('__NR_osf_asynch_daemon',163) __NR_osf_getfh = Constant('__NR_osf_getfh',164) __NR_osf_getdomainname = Constant('__NR_osf_getdomainname',165) __NR_setdomainname = Constant('__NR_setdomainname',166) __NR_osf_exportfs = Constant('__NR_osf_exportfs',169) __NR_osf_alt_plock = Constant('__NR_osf_alt_plock',181) __NR_osf_getmnt = Constant('__NR_osf_getmnt',184) __NR_osf_alt_sigpending = Constant('__NR_osf_alt_sigpending',187) __NR_osf_alt_setsid = Constant('__NR_osf_alt_setsid',188) __NR_osf_swapon = Constant('__NR_osf_swapon',199) __NR_msgctl = Constant('__NR_msgctl',200) __NR_msgget = Constant('__NR_msgget',201) __NR_msgrcv = Constant('__NR_msgrcv',202) __NR_msgsnd = Constant('__NR_msgsnd',203) __NR_semctl = Constant('__NR_semctl',204) __NR_semget = Constant('__NR_semget',205) __NR_semop = Constant('__NR_semop',206) __NR_osf_utsname = Constant('__NR_osf_utsname',207) __NR_lchown = Constant('__NR_lchown',208) __NR_osf_shmat = Constant('__NR_osf_shmat',209) __NR_shmctl = Constant('__NR_shmctl',210) __NR_shmdt = Constant('__NR_shmdt',211) __NR_shmget = Constant('__NR_shmget',212) __NR_osf_mvalid = Constant('__NR_osf_mvalid',213) __NR_osf_getaddressconf = Constant('__NR_osf_getaddressconf',214) __NR_osf_msleep = Constant('__NR_osf_msleep',215) __NR_osf_mwakeup = Constant('__NR_osf_mwakeup',216) __NR_msync = Constant('__NR_msync',217) __NR_osf_signal = Constant('__NR_osf_signal',218) __NR_osf_utc_gettime = Constant('__NR_osf_utc_gettime',219) __NR_osf_utc_adjtime = Constant('__NR_osf_utc_adjtime',220) __NR_osf_security = Constant('__NR_osf_security',222) __NR_osf_kloadcall = Constant('__NR_osf_kloadcall',223) __NR_getpgid = Constant('__NR_getpgid',233) __NR_getsid = Constant('__NR_getsid',234) __NR_sigaltstack = Constant('__NR_sigaltstack',235) __NR_osf_waitid = Constant('__NR_osf_waitid',236) __NR_osf_priocntlset = Constant('__NR_osf_priocntlset',237) __NR_osf_sigsendset = Constant('__NR_osf_sigsendset',238) __NR_osf_set_speculative = Constant('__NR_osf_set_speculative',239) __NR_osf_msfs_syscall = Constant('__NR_osf_msfs_syscall',240) __NR_osf_sysinfo = Constant('__NR_osf_sysinfo',241) __NR_osf_uadmin = Constant('__NR_osf_uadmin',242) __NR_osf_fuser = Constant('__NR_osf_fuser',243) __NR_osf_proplist_syscall = Constant('__NR_osf_proplist_syscall',244) __NR_osf_ntp_adjtime = Constant('__NR_osf_ntp_adjtime',245) __NR_osf_ntp_gettime = Constant('__NR_osf_ntp_gettime',246) __NR_osf_pathconf = Constant('__NR_osf_pathconf',247) __NR_osf_fpathconf = Constant('__NR_osf_fpathconf',248) __NR_osf_uswitch = Constant('__NR_osf_uswitch',250) __NR_osf_usleep_thread = Constant('__NR_osf_usleep_thread',251) __NR_osf_audcntl = Constant('__NR_osf_audcntl',252) __NR_osf_audgen = Constant('__NR_osf_audgen',253) __NR_sysfs = Constant('__NR_sysfs',254) __NR_osf_subsys_info = Constant('__NR_osf_subsys_info',255) __NR_osf_getsysinfo = Constant('__NR_osf_getsysinfo',256) __NR_osf_setsysinfo = Constant('__NR_osf_setsysinfo',257) __NR_osf_afs_syscall = Constant('__NR_osf_afs_syscall',258) __NR_osf_swapctl = Constant('__NR_osf_swapctl',259) __NR_osf_memcntl = Constant('__NR_osf_memcntl',260) __NR_osf_fdatasync = Constant('__NR_osf_fdatasync',261) __NR_bdflush = Constant('__NR_bdflush',300) __NR_sethae = Constant('__NR_sethae',301) __NR_mount = Constant('__NR_mount',302) __NR_old_adjtimex = Constant('__NR_old_adjtimex',303) __NR_swapoff = Constant('__NR_swapoff',304) __NR_getdents = Constant('__NR_getdents',305) __NR_create_module = Constant('__NR_create_module',306) __NR_init_module = Constant('__NR_init_module',307) __NR_delete_module = Constant('__NR_delete_module',308) __NR_get_kernel_syms = Constant('__NR_get_kernel_syms',309) __NR_syslog = Constant('__NR_syslog',310) __NR_reboot = Constant('__NR_reboot',311) __NR_clone = Constant('__NR_clone',312) __NR_uselib = Constant('__NR_uselib',313) __NR_mlock = Constant('__NR_mlock',314) __NR_munlock = Constant('__NR_munlock',315) __NR_mlockall = Constant('__NR_mlockall',316) __NR_munlockall = Constant('__NR_munlockall',317) __NR_sysinfo = Constant('__NR_sysinfo',318) __NR__sysctl = Constant('__NR__sysctl',319) __NR_oldumount = Constant('__NR_oldumount',321) __NR_swapon = Constant('__NR_swapon',322) __NR_times = Constant('__NR_times',323) __NR_personality = Constant('__NR_personality',324) __NR_setfsuid = Constant('__NR_setfsuid',325) __NR_setfsgid = Constant('__NR_setfsgid',326) __NR_ustat = Constant('__NR_ustat',327) __NR_statfs = Constant('__NR_statfs',328) __NR_fstatfs = Constant('__NR_fstatfs',329) __NR_sched_setparam = Constant('__NR_sched_setparam',330) __NR_sched_getparam = Constant('__NR_sched_getparam',331) __NR_sched_setscheduler = Constant('__NR_sched_setscheduler',332) __NR_sched_getscheduler = Constant('__NR_sched_getscheduler',333) __NR_sched_yield = Constant('__NR_sched_yield',334) __NR_sched_get_priority_max = Constant('__NR_sched_get_priority_max',335) __NR_sched_get_priority_min = Constant('__NR_sched_get_priority_min',336) __NR_sched_rr_get_interval = Constant('__NR_sched_rr_get_interval',337) __NR_afs_syscall = Constant('__NR_afs_syscall',338) __NR_uname = Constant('__NR_uname',339) __NR_nanosleep = Constant('__NR_nanosleep',340) __NR_mremap = Constant('__NR_mremap',341) __NR_nfsservctl = Constant('__NR_nfsservctl',342) __NR_setresuid = Constant('__NR_setresuid',343) __NR_getresuid = Constant('__NR_getresuid',344) __NR_pciconfig_read = Constant('__NR_pciconfig_read',345) __NR_pciconfig_write = Constant('__NR_pciconfig_write',346) __NR_query_module = Constant('__NR_query_module',347) __NR_prctl = Constant('__NR_prctl',348) __NR_pread = Constant('__NR_pread',349) __NR_pwrite = Constant('__NR_pwrite',350) __NR_rt_sigreturn = Constant('__NR_rt_sigreturn',351) __NR_rt_sigaction = Constant('__NR_rt_sigaction',352) __NR_rt_sigprocmask = Constant('__NR_rt_sigprocmask',353) __NR_rt_sigpending = Constant('__NR_rt_sigpending',354) __NR_rt_sigtimedwait = Constant('__NR_rt_sigtimedwait',355) __NR_rt_sigqueueinfo = Constant('__NR_rt_sigqueueinfo',356) __NR_rt_sigsuspend = Constant('__NR_rt_sigsuspend',357) __NR_select = Constant('__NR_select',358) __NR_gettimeofday = Constant('__NR_gettimeofday',359) __NR_settimeofday = Constant('__NR_settimeofday',360) __NR_getitimer = Constant('__NR_getitimer',361) __NR_setitimer = Constant('__NR_setitimer',362) __NR_utimes = Constant('__NR_utimes',363) __NR_getrusage = Constant('__NR_getrusage',364) __NR_wait4 = Constant('__NR_wait4',365) __NR_adjtimex = Constant('__NR_adjtimex',366) __NR_getcwd = Constant('__NR_getcwd',367) __NR_capget = Constant('__NR_capget',368) __NR_capset = Constant('__NR_capset',369) __NR_sendfile = Constant('__NR_sendfile',370) __NR_setresgid = Constant('__NR_setresgid',371) __NR_getresgid = Constant('__NR_getresgid',372) __NR_dipc = Constant('__NR_dipc',373) __NR_pivot_root = Constant('__NR_pivot_root',374) __NR_mincore = Constant('__NR_mincore',375) __NR_pciconfig_iobase = Constant('__NR_pciconfig_iobase',376) __NR_getdents64 = Constant('__NR_getdents64',377) __NR_gettid = Constant('__NR_gettid',378) __NR_readahead = Constant('__NR_readahead',379) __NR_tkill = Constant('__NR_tkill',381) __NR_setxattr = Constant('__NR_setxattr',382) __NR_lsetxattr = Constant('__NR_lsetxattr',383) __NR_fsetxattr = Constant('__NR_fsetxattr',384) __NR_getxattr = Constant('__NR_getxattr',385) __NR_lgetxattr = Constant('__NR_lgetxattr',386) __NR_fgetxattr = Constant('__NR_fgetxattr',387) __NR_listxattr = Constant('__NR_listxattr',388) __NR_llistxattr = Constant('__NR_llistxattr',389) __NR_flistxattr = Constant('__NR_flistxattr',390) __NR_removexattr = Constant('__NR_removexattr',391) __NR_lremovexattr = Constant('__NR_lremovexattr',392) __NR_fremovexattr = Constant('__NR_fremovexattr',393) __NR_futex = Constant('__NR_futex',394) __NR_sched_setaffinity = Constant('__NR_sched_setaffinity',395) __NR_sched_getaffinity = Constant('__NR_sched_getaffinity',396) __NR_tuxcall = Constant('__NR_tuxcall',397) __NR_io_setup = Constant('__NR_io_setup',398) __NR_io_destroy = Constant('__NR_io_destroy',399) __NR_io_getevents = Constant('__NR_io_getevents',400) __NR_io_submit = Constant('__NR_io_submit',401) __NR_io_cancel = Constant('__NR_io_cancel',402) __NR_exit_group = Constant('__NR_exit_group',405) __NR_lookup_dcookie = Constant('__NR_lookup_dcookie',406) __NR_sys_epoll_create = Constant('__NR_sys_epoll_create',407) __NR_sys_epoll_ctl = Constant('__NR_sys_epoll_ctl',408) __NR_sys_epoll_wait = Constant('__NR_sys_epoll_wait',409) __NR_remap_file_pages = Constant('__NR_remap_file_pages',410) __NR_set_tid_address = Constant('__NR_set_tid_address',411) __NR_restart_syscall = Constant('__NR_restart_syscall',412) __NR_fadvise64 = Constant('__NR_fadvise64',413) __NR_timer_create = Constant('__NR_timer_create',414) __NR_timer_settime = Constant('__NR_timer_settime',415) __NR_timer_gettime = Constant('__NR_timer_gettime',416) __NR_timer_getoverrun = Constant('__NR_timer_getoverrun',417) __NR_timer_delete = Constant('__NR_timer_delete',418) __NR_clock_settime = Constant('__NR_clock_settime',419) __NR_clock_gettime = Constant('__NR_clock_gettime',420) __NR_clock_getres = Constant('__NR_clock_getres',421) __NR_clock_nanosleep = Constant('__NR_clock_nanosleep',422) __NR_semtimedop = Constant('__NR_semtimedop',423) __NR_tgkill = Constant('__NR_tgkill',424) __NR_stat64 = Constant('__NR_stat64',425) __NR_lstat64 = Constant('__NR_lstat64',426) __NR_fstat64 = Constant('__NR_fstat64',427) __NR_vserver = Constant('__NR_vserver',428) __NR_mbind = Constant('__NR_mbind',429) __NR_get_mempolicy = Constant('__NR_get_mempolicy',430) __NR_set_mempolicy = Constant('__NR_set_mempolicy',431) __NR_mq_open = Constant('__NR_mq_open',432) __NR_mq_unlink = Constant('__NR_mq_unlink',433) __NR_mq_timedsend = Constant('__NR_mq_timedsend',434) __NR_mq_timedreceive = Constant('__NR_mq_timedreceive',435) __NR_mq_notify = Constant('__NR_mq_notify',436) __NR_mq_getsetattr = Constant('__NR_mq_getsetattr',437) __NR_waitid = Constant('__NR_waitid',438) __NR_add_key = Constant('__NR_add_key',439) __NR_request_key = Constant('__NR_request_key',440) __NR_keyctl = Constant('__NR_keyctl',441) __NR_ioprio_set = Constant('__NR_ioprio_set',442) __NR_ioprio_get = Constant('__NR_ioprio_get',443) __NR_inotify_init = Constant('__NR_inotify_init',444) __NR_inotify_add_watch = Constant('__NR_inotify_add_watch',445) __NR_inotify_rm_watch = Constant('__NR_inotify_rm_watch',446) __NR_fdatasync = Constant('__NR_fdatasync',447) __NR_kexec_load = Constant('__NR_kexec_load',448) __NR_migrate_pages = Constant('__NR_migrate_pages',449) __NR_openat = Constant('__NR_openat',450) __NR_mkdirat = Constant('__NR_mkdirat',451) __NR_mknodat = Constant('__NR_mknodat',452) __NR_fchownat = Constant('__NR_fchownat',453) __NR_futimesat = Constant('__NR_futimesat',454) __NR_fstatat64 = Constant('__NR_fstatat64',455) __NR_unlinkat = Constant('__NR_unlinkat',456) __NR_renameat = Constant('__NR_renameat',457) __NR_linkat = Constant('__NR_linkat',458) __NR_symlinkat = Constant('__NR_symlinkat',459) __NR_readlinkat = Constant('__NR_readlinkat',460) __NR_fchmodat = Constant('__NR_fchmodat',461) __NR_faccessat = Constant('__NR_faccessat',462) __NR_pselect6 = Constant('__NR_pselect6',463) __NR_ppoll = Constant('__NR_ppoll',464) __NR_unshare = Constant('__NR_unshare',465) __NR_set_robust_list = Constant('__NR_set_robust_list',466) __NR_get_robust_list = Constant('__NR_get_robust_list',467) __NR_splice = Constant('__NR_splice',468) __NR_sync_file_range = Constant('__NR_sync_file_range',469) __NR_tee = Constant('__NR_tee',470) __NR_vmsplice = Constant('__NR_vmsplice',471) __NR_move_pages = Constant('__NR_move_pages',472) __NR_getcpu = Constant('__NR_getcpu',473) __NR_epoll_pwait = Constant('__NR_epoll_pwait',474) __NR_utimensat = Constant('__NR_utimensat',475) __NR_signalfd = Constant('__NR_signalfd',476) __NR_timerfd = Constant('__NR_timerfd',477) __NR_eventfd = Constant('__NR_eventfd',478) __SYS_NERR = Constant('__SYS_NERR',((135) + 1)) _SYS_TIME_H = Constant('_SYS_TIME_H',1) SYS_accept = Constant('SYS_accept',99) SYS_access = Constant('SYS_access',33) SYS_acct = Constant('SYS_acct',51) SYS_add_key = Constant('SYS_add_key',439) SYS_adjtimex = Constant('SYS_adjtimex',366) SYS_afs_syscall = Constant('SYS_afs_syscall',338) SYS_bdflush = Constant('SYS_bdflush',300) SYS_bind = Constant('SYS_bind',104) SYS_brk = Constant('SYS_brk',17) SYS_capget = Constant('SYS_capget',368) SYS_capset = Constant('SYS_capset',369) SYS_chdir = Constant('SYS_chdir',12) SYS_chmod = Constant('SYS_chmod',15) SYS_chown = Constant('SYS_chown',16) SYS_chroot = Constant('SYS_chroot',61) SYS_clock_getres = Constant('SYS_clock_getres',421) SYS_clock_gettime = Constant('SYS_clock_gettime',420) SYS_clock_nanosleep = Constant('SYS_clock_nanosleep',422) SYS_clock_settime = Constant('SYS_clock_settime',419) SYS_clone = Constant('SYS_clone',312) SYS_close = Constant('SYS_close',6) SYS_connect = Constant('SYS_connect',98) SYS_create_module = Constant('SYS_create_module',306) SYS_delete_module = Constant('SYS_delete_module',308) SYS_dipc = Constant('SYS_dipc',373) SYS_dup = Constant('SYS_dup',41) SYS_dup2 = Constant('SYS_dup2',90) SYS_epoll_pwait = Constant('SYS_epoll_pwait',474) SYS_eventfd = Constant('SYS_eventfd',478) SYS_execve = Constant('SYS_execve',59) SYS_exec_with_loader = Constant('SYS_exec_with_loader',25) SYS_exit = Constant('SYS_exit',1) SYS_exit_group = Constant('SYS_exit_group',405) SYS_faccessat = Constant('SYS_faccessat',462) SYS_fadvise64 = Constant('SYS_fadvise64',413) SYS_fchdir = Constant('SYS_fchdir',13) SYS_fchmod = Constant('SYS_fchmod',124) SYS_fchmodat = Constant('SYS_fchmodat',461) SYS_fchown = Constant('SYS_fchown',123) SYS_fchownat = Constant('SYS_fchownat',453) SYS_fcntl = Constant('SYS_fcntl',92) SYS_fdatasync = Constant('SYS_fdatasync',447) SYS_fgetxattr = Constant('SYS_fgetxattr',387) SYS_flistxattr = Constant('SYS_flistxattr',390) SYS_flock = Constant('SYS_flock',131) SYS_fork = Constant('SYS_fork',2) SYS_fremovexattr = Constant('SYS_fremovexattr',393) SYS_fsetxattr = Constant('SYS_fsetxattr',384) SYS_fstat = Constant('SYS_fstat',91) SYS_fstat64 = Constant('SYS_fstat64',427) SYS_fstatat64 = Constant('SYS_fstatat64',455) SYS_fstatfs = Constant('SYS_fstatfs',329) SYS_fsync = Constant('SYS_fsync',95) SYS_ftruncate = Constant('SYS_ftruncate',130) SYS_futex = Constant('SYS_futex',394) SYS_futimesat = Constant('SYS_futimesat',454) SYS_getcpu = Constant('SYS_getcpu',473) SYS_getcwd = Constant('SYS_getcwd',367) SYS_getdents = Constant('SYS_getdents',305) SYS_getdents64 = Constant('SYS_getdents64',377) SYS_getdtablesize = Constant('SYS_getdtablesize',89) SYS_getgroups = Constant('SYS_getgroups',79) SYS_gethostname = Constant('SYS_gethostname',87) SYS_getitimer = Constant('SYS_getitimer',361) SYS_get_kernel_syms = Constant('SYS_get_kernel_syms',309) SYS_get_mempolicy = Constant('SYS_get_mempolicy',430) SYS_getpagesize = Constant('SYS_getpagesize',64) SYS_getpeername = Constant('SYS_getpeername',141) SYS_getpgid = Constant('SYS_getpgid',233) SYS_getpgrp = Constant('SYS_getpgrp',63) SYS_getpriority = Constant('SYS_getpriority',100) SYS_getresgid = Constant('SYS_getresgid',372) SYS_getresuid = Constant('SYS_getresuid',344) SYS_getrlimit = Constant('SYS_getrlimit',144) SYS_get_robust_list = Constant('SYS_get_robust_list',467) SYS_getrusage = Constant('SYS_getrusage',364) SYS_getsid = Constant('SYS_getsid',234) SYS_getsockname = Constant('SYS_getsockname',150) SYS_getsockopt = Constant('SYS_getsockopt',118) SYS_gettid = Constant('SYS_gettid',378) SYS_gettimeofday = Constant('SYS_gettimeofday',359) SYS_getxattr = Constant('SYS_getxattr',385) SYS_getxgid = Constant('SYS_getxgid',47) SYS_getxpid = Constant('SYS_getxpid',20) SYS_getxuid = Constant('SYS_getxuid',24) SYS_init_module = Constant('SYS_init_module',307) SYS_inotify_add_watch = Constant('SYS_inotify_add_watch',445) SYS_inotify_init = Constant('SYS_inotify_init',444) SYS_inotify_rm_watch = Constant('SYS_inotify_rm_watch',446) SYS_io_cancel = Constant('SYS_io_cancel',402) SYS_ioctl = Constant('SYS_ioctl',54) SYS_io_destroy = Constant('SYS_io_destroy',399) SYS_io_getevents = Constant('SYS_io_getevents',400) SYS_ioprio_get = Constant('SYS_ioprio_get',443) SYS_ioprio_set = Constant('SYS_ioprio_set',442) SYS_io_setup = Constant('SYS_io_setup',398) SYS_io_submit = Constant('SYS_io_submit',401) SYS_kexec_load = Constant('SYS_kexec_load',448) SYS_keyctl = Constant('SYS_keyctl',441) SYS_kill = Constant('SYS_kill',37) SYS_lchown = Constant('SYS_lchown',208) SYS_lgetxattr = Constant('SYS_lgetxattr',386) SYS_link = Constant('SYS_link',9) SYS_linkat = Constant('SYS_linkat',458) SYS_listen = Constant('SYS_listen',106) SYS_listxattr = Constant('SYS_listxattr',388) SYS_llistxattr = Constant('SYS_llistxattr',389) SYS_lookup_dcookie = Constant('SYS_lookup_dcookie',406) SYS_lremovexattr = Constant('SYS_lremovexattr',392) SYS_lseek = Constant('SYS_lseek',19) SYS_lsetxattr = Constant('SYS_lsetxattr',383) SYS_lstat = Constant('SYS_lstat',68) SYS_lstat64 = Constant('SYS_lstat64',426) SYS_madvise = Constant('SYS_madvise',75) SYS_mbind = Constant('SYS_mbind',429) SYS_migrate_pages = Constant('SYS_migrate_pages',449) SYS_mincore = Constant('SYS_mincore',375) SYS_mkdir = Constant('SYS_mkdir',136) SYS_mkdirat = Constant('SYS_mkdirat',451) SYS_mknod = Constant('SYS_mknod',14) SYS_mknodat = Constant('SYS_mknodat',452) SYS_mlock = Constant('SYS_mlock',314) SYS_mlockall = Constant('SYS_mlockall',316) SYS_mmap = Constant('SYS_mmap',71) SYS_mount = Constant('SYS_mount',302) SYS_move_pages = Constant('SYS_move_pages',472) SYS_mprotect = Constant('SYS_mprotect',74) SYS_mq_getsetattr = Constant('SYS_mq_getsetattr',437) SYS_mq_notify = Constant('SYS_mq_notify',436) SYS_mq_open = Constant('SYS_mq_open',432) SYS_mq_timedreceive = Constant('SYS_mq_timedreceive',435) SYS_mq_timedsend = Constant('SYS_mq_timedsend',434) SYS_mq_unlink = Constant('SYS_mq_unlink',433) SYS_mremap = Constant('SYS_mremap',341) SYS_msgctl = Constant('SYS_msgctl',200) SYS_msgget = Constant('SYS_msgget',201) SYS_msgrcv = Constant('SYS_msgrcv',202) SYS_msgsnd = Constant('SYS_msgsnd',203) SYS_msync = Constant('SYS_msync',217) SYS_munlock = Constant('SYS_munlock',315) SYS_munlockall = Constant('SYS_munlockall',317) SYS_munmap = Constant('SYS_munmap',73) SYS_nanosleep = Constant('SYS_nanosleep',340) SYS_nfsservctl = Constant('SYS_nfsservctl',342) SYS_old_adjtimex = Constant('SYS_old_adjtimex',303) SYS_oldumount = Constant('SYS_oldumount',321) SYS_open = Constant('SYS_open',45) SYS_openat = Constant('SYS_openat',450) SYS_osf_adjtime = Constant('SYS_osf_adjtime',140) SYS_osf_afs_syscall = Constant('SYS_osf_afs_syscall',258) SYS_osf_alt_plock = Constant('SYS_osf_alt_plock',181) SYS_osf_alt_setsid = Constant('SYS_osf_alt_setsid',188) SYS_osf_alt_sigpending = Constant('SYS_osf_alt_sigpending',187) SYS_osf_asynch_daemon = Constant('SYS_osf_asynch_daemon',163) SYS_osf_audcntl = Constant('SYS_osf_audcntl',252) SYS_osf_audgen = Constant('SYS_osf_audgen',253) SYS_osf_chflags = Constant('SYS_osf_chflags',34) SYS_osf_execve = Constant('SYS_osf_execve',11) SYS_osf_exportfs = Constant('SYS_osf_exportfs',169) SYS_osf_fchflags = Constant('SYS_osf_fchflags',35) SYS_osf_fdatasync = Constant('SYS_osf_fdatasync',261) SYS_osf_fpathconf = Constant('SYS_osf_fpathconf',248) SYS_osf_fstatfs = Constant('SYS_osf_fstatfs',161) SYS_osf_fuser = Constant('SYS_osf_fuser',243) SYS_osf_getaddressconf = Constant('SYS_osf_getaddressconf',214) SYS_osf_getdirentries = Constant('SYS_osf_getdirentries',159) SYS_osf_getdomainname = Constant('SYS_osf_getdomainname',165) SYS_osf_getfh = Constant('SYS_osf_getfh',164) SYS_osf_getfsstat = Constant('SYS_osf_getfsstat',18) SYS_osf_gethostid = Constant('SYS_osf_gethostid',142) SYS_osf_getitimer = Constant('SYS_osf_getitimer',86) SYS_osf_getlogin = Constant('SYS_osf_getlogin',49) SYS_osf_getmnt = Constant('SYS_osf_getmnt',184) SYS_osf_getrusage = Constant('SYS_osf_getrusage',117) SYS_osf_getsysinfo = Constant('SYS_osf_getsysinfo',256) SYS_osf_gettimeofday = Constant('SYS_osf_gettimeofday',116) SYS_osf_kloadcall = Constant('SYS_osf_kloadcall',223) SYS_osf_kmodcall = Constant('SYS_osf_kmodcall',77) SYS_osf_memcntl = Constant('SYS_osf_memcntl',260) SYS_osf_mincore = Constant('SYS_osf_mincore',78) SYS_osf_mount = Constant('SYS_osf_mount',21) SYS_osf_mremap = Constant('SYS_osf_mremap',65) SYS_osf_msfs_syscall = Constant('SYS_osf_msfs_syscall',240) SYS_osf_msleep = Constant('SYS_osf_msleep',215) SYS_osf_mvalid = Constant('SYS_osf_mvalid',213) SYS_osf_mwakeup = Constant('SYS_osf_mwakeup',216) SYS_osf_naccept = Constant('SYS_osf_naccept',30) SYS_osf_nfssvc = Constant('SYS_osf_nfssvc',158) SYS_osf_ngetpeername = Constant('SYS_osf_ngetpeername',31) SYS_osf_ngetsockname = Constant('SYS_osf_ngetsockname',32) SYS_osf_nrecvfrom = Constant('SYS_osf_nrecvfrom',29) SYS_osf_nrecvmsg = Constant('SYS_osf_nrecvmsg',27) SYS_osf_nsendmsg = Constant('SYS_osf_nsendmsg',28) SYS_osf_ntp_adjtime = Constant('SYS_osf_ntp_adjtime',245) SYS_osf_ntp_gettime = Constant('SYS_osf_ntp_gettime',246) SYS_osf_old_creat = Constant('SYS_osf_old_creat',8) SYS_osf_old_fstat = Constant('SYS_osf_old_fstat',62) SYS_osf_old_getpgrp = Constant('SYS_osf_old_getpgrp',81) SYS_osf_old_killpg = Constant('SYS_osf_old_killpg',146) SYS_osf_old_lstat = Constant('SYS_osf_old_lstat',40) SYS_osf_old_open = Constant('SYS_osf_old_open',5) SYS_osf_oldquota = Constant('SYS_osf_oldquota',149) SYS_osf_old_sigaction = Constant('SYS_osf_old_sigaction',46) SYS_osf_old_sigblock = Constant('SYS_osf_old_sigblock',109) SYS_osf_old_sigreturn = Constant('SYS_osf_old_sigreturn',139) SYS_osf_old_sigsetmask = Constant('SYS_osf_old_sigsetmask',110) SYS_osf_old_sigvec = Constant('SYS_osf_old_sigvec',108) SYS_osf_old_stat = Constant('SYS_osf_old_stat',38) SYS_osf_old_vadvise = Constant('SYS_osf_old_vadvise',72) SYS_osf_old_vtrace = Constant('SYS_osf_old_vtrace',115) SYS_osf_old_wait = Constant('SYS_osf_old_wait',84) SYS_osf_pathconf = Constant('SYS_osf_pathconf',247) SYS_osf_pid_block = Constant('SYS_osf_pid_block',153) SYS_osf_pid_unblock = Constant('SYS_osf_pid_unblock',154) SYS_osf_plock = Constant('SYS_osf_plock',107) SYS_osf_priocntlset = Constant('SYS_osf_priocntlset',237) SYS_osf_profil = Constant('SYS_osf_profil',44) SYS_osf_proplist_syscall = Constant('SYS_osf_proplist_syscall',244) SYS_osf_reboot = Constant('SYS_osf_reboot',55) SYS_osf_revoke = Constant('SYS_osf_revoke',56) SYS_osf_sbrk = Constant('SYS_osf_sbrk',69) SYS_osf_security = Constant('SYS_osf_security',222) SYS_osf_select = Constant('SYS_osf_select',93) SYS_osf_sethostid = Constant('SYS_osf_sethostid',143) SYS_osf_setitimer = Constant('SYS_osf_setitimer',83) SYS_osf_setlogin = Constant('SYS_osf_setlogin',50) SYS_osf_set_program_attributes = Constant('SYS_osf_set_program_attributes',43) SYS_osf_set_speculative = Constant('SYS_osf_set_speculative',239) SYS_osf_setsysinfo = Constant('SYS_osf_setsysinfo',257) SYS_osf_settimeofday = Constant('SYS_osf_settimeofday',122) SYS_osf_shmat = Constant('SYS_osf_shmat',209) SYS_osf_signal = Constant('SYS_osf_signal',218) SYS_osf_sigprocmask = Constant('SYS_osf_sigprocmask',48) SYS_osf_sigsendset =
in zero field in a dictionary where the keys are the element symbols and the values are the numpy force array for all atoms of that element. for_1 : dict Ionic forces in applied efield but with clamped ions in a dictionary formatted like for_0. z_exp : dict Expected born effective charge for each element type from a matrix-only calculation. Keys are element symbols, and values are expected BECs. e_ext : float, optional, default: 0.001 The magnitude of the applied electric field (au). e_field_direction : list, optional, default: [0,0,1] The 3D vector direction of the efield. Ex: [0,0,1] is an electric field in the positive z-direction. """ e_loc = {} e_field_direction = np.array(e_field_direction) / np.linalg.norm(np.array(e_field_direction)) # make sure the parsed forces have matching elements if set(for_0.keys()) != set(for_1.keys()): raise ValueError('Different elements present in the two provided files.') # get the Born Effective Charge using the finite difference between 0 field and clamped ion for key in for_0: if len(for_0[key]) != len(for_1[key]): raise ValueError('Provided files have different number of {} atoms'.format(key)) e_loc[key] = (for_1[key].dot(e_field_direction) - for_0[key].dot(e_field_direction)) / z_exp[key] - e_ext return e_loc def infer_e_field(for_0, for_1, z_exp, e_field_direction=[0, 0, 1]): """ Calculate the born effective charges for an array of ions. Parameters ---------- for_0 : dict Ionic forces in zero field in a dictionary where the keys are the element symbols and the values are the numpy force array for all atoms of that element. for_1 : dict Ionic forces in applied efield but with clamped ions in a dictionary formatted like for_0. z_exp : dict Expected born effective charge for each element type from a matrix-only calculation. Keys are element symbols, and values are expected BECs. e_field_direction : list, optional, default: [0,0,1] The 3D vector direction of the efield. Ex: [0,0,1] is an electric field in the positive z-direction. """ e_loc = {} e_field_direction = np.array(e_field_direction) / np.linalg.norm(np.array(e_field_direction)) # make sure the parsed forces have matching elements if set(for_0.keys()) != set(for_1.keys()): raise ValueError('Different elements present in the two provided files.') # get the Born Effective Charge using the finite difference between 0 field and clamped ion for key in for_0: if len(for_0[key]) != len(for_1[key]): raise ValueError('Provided files have different number of {} atoms'.format(key)) e_loc[key] = (for_1[key].dot(e_field_direction) - for_0[key].dot(e_field_direction)) / z_exp[key] return e_loc def get_field_along_d(field_dict, sub_mean_field=False, e_field=0.25, e_field_direction=[0, 0, 1]): """ Calculate the electric field along a specific direction from the FE results. Parameters ---------- field_dict : dict Electric field at atomic locations in a dictionary where the keys are the element symbols and the values are the numpy array of the electric field for all atoms of that element. sub_mean_field : bool, optional If set, the external applied field is subtracted from the calculated fields, meaning that only the local field disturbance caused by the inclusion will be plotted. Defaults to False. e_field : float The magnitude of the applied electric field in V/m. e_field_direction : list The 3D vector direction of the efield. Ex: [0,0,1] is an electric field in the positive z-direction. Returns ------- field : dict Electric field magnitude along the specified direction at atomic locations in a dictionary with same format as field_dict. """ field = {} e_field_direction = np.array(e_field_direction) / np.linalg.norm(np.array(e_field_direction)) for key in field_dict: if sub_mean_field: field_dict[key] = field_dict[key] - (e_field * e_field_direction) field[key] = field_dict[key].dot(e_field_direction.T) return field def to_Bohr(coords): """Convert a coordinate dictionary from Angstroms to Bohr""" for key in coords: coords[key] = coords[key] / A_TO_B return coords def get_dipole_field(coords, dipole_loc=[0, 0, 0], p_vec=[0, 0, 1], p=1, is_angstrom=True): """ Returns the electric field from a point dipole. Parameters ---------- coords : dict Dictionary of atomic coordinates, where the keys are the element symbols, and the values are the numpy coordinate array for all atoms of that element. dipole_loc : list or numpy.ndarray The 3D coordinates of the dipole location. Ex: dipole_loc=get_centroid(coords, key='Ag') dipole_loc : list or numpy.ndarray The 3D coordinates of the dipole location. Ex: dipole_loc=get_centroid(coords, key='Ag') is_angstrom : bool, optional, default : True Indicates whether the input atomic coordinates are in Angstroms (if False, Bohr is assumed) Returns ------- field : dict Electric field at atomic locations in a dictionary with same format as coords. """ dipole_loc = np.array(dipole_loc) p_vec = np.array(p_vec) # verify that it is normalized first p_vec = p_vec / np.linalg.norm(p_vec) p_vec = p * p_vec # make sure everything is in atomic units if is_angstrom: coords = to_Bohr(coords) dipole_loc = dipole_loc / A_TO_B ions = utils.as_dataframe(coords) field = {} for key in coords: r_vec = ions.loc[ions['element'] == key][['X', 'Y', 'Z']].values - dipole_loc r_mag = np.linalg.norm(r_vec, axis=1).reshape(-1, 1) r_unit = r_vec / r_mag field[key] = 1 / r_mag ** 3 * (np.dot(r_unit, 3 * p_vec.T).reshape(-1, 1) * r_unit - p_vec) return field def get_dipole_field_displaced(coords, dipole_loc=[0, 0, 0], p_vec=[0, 0, 1], q=1, d=0.1, is_angstrom=True): """ Returns the electric field from a point dipole. Parameters ---------- coords : dict Dictionary of atomic coordinates, where the keys are the element symbols, and the values are the numpy coordinate array for all atoms of that element. dipole_loc : list or numpy.ndarray The 3D coordinates of the dipole location. Ex: dipole_loc=get_centroid(coords, key='Ag') is_angstrom : bool, optional, default : True Indicates whether the input atomic coordinates are in Angstroms (if False, Bohr is assumed) Returns ------- field : dict Electric field at atomic locations in a dictionary with same format as coords. """ dipole_loc = np.array(dipole_loc) p_vec = np.array(p_vec) # verify that it is normalized first p_vec = p_vec / np.linalg.norm(p_vec) # make sure everything is in atomic units if is_angstrom: coords = to_Bohr(coords) dipole_loc = dipole_loc / A_TO_B ions = utils.as_dataframe(coords) field = {} for key in coords: ppos = dipole_loc + d / 2 * p_vec pneg = dipole_loc - d / 2 * p_vec r_pos = ions.loc[ions['element'] == key][['X', 'Y', 'Z']].values - ppos r_pos_mag = np.linalg.norm(r_pos, axis=1).reshape(-1, 1) r_neg = ions.loc[ions['element'] == key][['X', 'Y', 'Z']].values - pneg r_neg_mag = np.linalg.norm(r_neg, axis=1).reshape(-1, 1) r_pos_unit = r_pos / r_pos_mag r_neg_unit = r_neg / r_neg_mag pos_field = q / r_pos_mag ** 2 * r_pos_unit neg_field = -q / r_neg_mag ** 2 * r_neg_unit field[key] = pos_field + neg_field return field def gen_BEC_df(no_efield, clamped_ion, xyz, e_field=0.001, e_field_direction=[0, 0, 1], add_forces=False): """ Generate a pandas dataframe containing the Born Effective Charges of the ions in the unit cell Parameters ---------- no_efield : string File path of QE output file containing the polarization with no applied electric field clamped_ion : string File path of QE output file containing the polarization with applied electric field but ions clamped in place xyz : string File path of .xyz - formatted coordinates of unit cell e_field : float, optional, default=0.001 Electric field strength in au. e_field_direction : list, optional, default=[0, 0, 1] Vector of the electric field direction. In positive z-direction by default add_forces : bool, optional, default=False If True, include the forces on the ions before and after electric field applied to the output dataframe Returns ------- pandas.Dataframe Dataframe of ionic coordinates and Born effective charges. Columns are ["Element", "X", "Y", "Z", "BEC"], with optional "Force0", "Force1". """ # parse coordinates coords = parsers.get_coordinates(xyz) # parse forces for_0, for_1 = parsers.get_converged_forces(no_efield), parsers.get_converged_forces(clamped_ion) # calculate Born Effective Charges BEC = get_BECs(for_0, for_1, e_field, e_field_direction) if add_forces: return utils.as_dataframe(coords, BEC, for_0, for_1) else: return utils.as_dataframe(coords, BEC) def ave_BEC_dict(elements, BECs): """ Create a dictionary where the keys are the element symbols and the values are the average Born Effective Charges for that element. """ elements, BECs = (list(elements), list(BECs)) BEC_dict = {el: [] for el in elements} for element, BEC in zip(elements, BECs): BEC_dict[element].append(BEC) for key in BEC_dict: BEC_dict[key] = np.mean(BEC_dict[key]) return BEC_dict def point_in_hull(point, hull, tolerance=1e-12, inc=False): """https://stackoverflow.com/questions/16750618/whats-an-efficient-way-to-find-if-a-point-lies-in-the-convex-hull-of-a-point-cl/42165596#42165596""" if not isinstance(hull, ConvexHull): hull = ConvexHull(hull) if inc: return all((np.dot(eq[:-1], point) + eq[-1] <= tolerance) for eq in hull.equations) else: # don't include points that lie within a tolerance of a facet
'int', 'false', '0'), ('finish_macro_block_count', 'int', 'false', '0'), ('partition_count', 'int', 'false', '0'), ('finish_partition_count', 'int', 'false', '0'), ('restore_info', 'varchar:OB_INNER_TABLE_DEFAULT_VALUE_LENTH'), ], columns_with_tenant_id = [], ) def_table_schema(**all_backup_log_archive_status_def) def_table_schema(**gen_history_table_def(282, all_table_v2_def)) all_tenant_object_type_def = dict( table_name = '__all_tenant_object_type', table_id = '283', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int', 'false'), ('object_type_id', 'int', 'false'), ('type', 'int', 'false'), ], rs_restart_related = True, in_tenant_space = True, normal_columns = [ ('schema_version', 'int'), ('properties', 'int'), ('charset_id', 'int'), ('charset_form', 'int'), ('length', 'int'), ('number_precision', 'int'), ('scale', 'int'), ('zero_fill', 'int'), ('coll_type', 'int'), ('database_id', 'int'), ('flag', 'int', 'false'), ('owner_id', 'int', 'false'), ('comp_flag', 'int', 'true'), ('object_name', 'varchar:OB_MAX_TABLE_TYPE_LENGTH', 'false'), ('exec_env', 'varchar:OB_MAX_PROC_ENV_LENGTH', 'true'), ('source', 'longtext', 'true'), ('comment', 'varchar:MAX_TENANT_COMMENT_LENGTH', 'true'), ('route_sql', 'longtext', 'true') ], columns_with_tenant_id = ['database_id', 'owner_id', 'object_type_id'], ) def_table_schema(**all_tenant_object_type_def) def_table_schema(**gen_history_table_def(284, all_tenant_object_type_def)) all_backup_validation_job_def = dict( table_name = '__all_backup_validation_job', table_id = '285', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('job_id', 'int'), ('tenant_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ], in_tenant_space = False, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('tenant_name', 'varchar:OB_MAX_TENANT_NAME_LENGTH_STORE'), ('progress_percent', 'int'), ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ], columns_with_tenant_id = [], ) def_table_schema(**all_backup_validation_job_def) all_backup_validation_job_history_def = dict( table_name = '__all_backup_validation_job_history', table_id = '286', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('job_id', 'int'), ('tenant_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ], in_tenant_space = False, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('tenant_name', 'varchar:OB_MAX_TENANT_NAME_LENGTH_STORE'), ('progress_percent', 'int'), ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ], columns_with_tenant_id = [], ) def_table_schema(**all_backup_validation_job_history_def) all_tenant_backup_validation_task_def = dict( table_name = '__all_tenant_backup_validation_task', table_id = '287', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int'), ('job_id', 'int'), ('task_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ], in_tenant_space = True, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ('backup_dest', 'varchar:OB_MAX_BACKUP_DEST_LENGTH', 'true'), ('start_time', 'timestamp'), ('end_time', 'timestamp'), ('total_pg_count', 'int'), ('finish_pg_count', 'int'), ('total_partition_count', 'int'), ('finish_partition_count', 'int'), ('total_macro_block_count', 'int'), ('finish_macro_block_count', 'int'), ("log_size", 'int'), ('result', 'int'), ('comment', 'varchar:MAX_TABLE_COMMENT_LENGTH'), ], columns_with_tenant_id = [], ) def_table_schema(**all_tenant_backup_validation_task_def) all_backup_validation_task_history_def = dict( table_name = '__all_backup_validation_task_history', table_id = '288', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int'), ('job_id', 'int'), ('task_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ], in_tenant_space = False, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ('backup_dest', 'varchar:OB_MAX_BACKUP_DEST_LENGTH', 'true'), ('start_time', 'timestamp'), ('end_time', 'timestamp'), ('total_pg_count', 'int'), ('finish_pg_count', 'int'), ('total_partition_count', 'int'), ('finish_partition_count', 'int'), ('total_macro_block_count', 'int'), ('finish_macro_block_count', 'int'), ("log_size", 'int'), ('result', 'int'), ('comment', 'varchar:MAX_TABLE_COMMENT_LENGTH'), ], columns_with_tenant_id = [], ) def_table_schema(**all_backup_validation_task_history_def) all_tenant_pg_backup_validation_task_def = dict( table_name = '__all_tenant_pg_backup_validation_task', table_id = '289', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int'), ('job_id', 'int'), ('task_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ('table_id', 'int'), ('partition_id', 'int'), ], in_tenant_space = True, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('archive_round', 'int'), ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ('trace_id', 'varchar:OB_MAX_TRACE_ID_BUFFER_SIZE', 'true'), ('svr_ip', 'varchar:MAX_IP_ADDR_LENGTH'), ('svr_port', 'int'), ('total_partition_count', 'int'), ('finish_partition_count', 'int'), ('total_macro_block_count', 'int'), ('finish_macro_block_count', 'int'), ('log_info', 'varchar:64'), ("log_size", 'int'), ('result', 'int'), ('comment', 'varchar:MAX_TABLE_COMMENT_LENGTH'), ], columns_with_tenant_id = [], ) def_table_schema(**all_tenant_pg_backup_validation_task_def) def_table_schema( table_name = '__all_tenant_time_zone', table_id = '290', table_type = 'SYSTEM_TABLE', gm_columns = [], rowkey_columns = [ ('tenant_id', 'int', 'false', '-1'), ('time_zone_id', 'int', 'false', 'NULL') ], in_tenant_space = True, is_cluster_private = False, is_backup_private = False, normal_columns = [ ('use_leap_seconds', 'varchar:8', 'false', 'N'), ('version', 'int', 'true'), ], columns_with_tenant_id = [], ) def_table_schema( table_name = '__all_tenant_time_zone_name', table_id = '291', table_type = 'SYSTEM_TABLE', gm_columns = [], rowkey_columns = [ ('tenant_id', 'int', 'false', '-1'), ('name', 'varchar:64', 'false', 'NULL') ], in_tenant_space = True, is_cluster_private = False, is_backup_private = False, normal_columns = [ ('time_zone_id', 'int', 'false', 'NULL'), ('version', 'int', 'true'), ], columns_with_tenant_id = [], ) def_table_schema( table_name = '__all_tenant_time_zone_transition', table_id = '292', table_type = 'SYSTEM_TABLE', gm_columns = [], rowkey_columns = [ ('tenant_id', 'int', 'false', '-1'), ('time_zone_id', 'int', 'false', 'NULL'), ('transition_time', 'int', 'false', 'NULL') ], in_tenant_space = True, is_cluster_private = False, is_backup_private = False, normal_columns = [ ('transition_type_id', 'int', 'false', 'NULL'), ('version', 'int', 'true'), ], columns_with_tenant_id = [], ) def_table_schema( table_name = '__all_tenant_time_zone_transition_type', table_id = '293', table_type = 'SYSTEM_TABLE', gm_columns = [], rowkey_columns = [ ('tenant_id', 'int', 'false', '-1'), ('time_zone_id', 'int', 'false', 'NULL'), ('transition_type_id', 'int', 'false', 'NULL') ], in_tenant_space = True, is_cluster_private = False, is_backup_private = False, normal_columns = [ ('offset', 'int', 'false', '0'), ('is_dst', 'int', 'false', '0'), ('abbreviation', 'varchar:8', 'false', ''), ('version', 'int', 'true'), ], columns_with_tenant_id = [], ) all_tenant_constraint_column_def = dict( table_name = '__all_tenant_constraint_column', table_id = '294', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int', 'false'), ('table_id', 'int', 'false'), ('constraint_id', 'int', 'false'), ('column_id', 'int', 'false'), ], rs_restart_related = True, in_tenant_space = True, normal_columns = [ ('schema_version', 'int', 'false'), ], columns_with_tenant_id = ['table_id'], ) def_table_schema(**all_tenant_constraint_column_def) def_table_schema(**gen_history_table_def(295, all_tenant_constraint_column_def)) def_table_schema( table_name = '__all_tenant_global_transaction', table_id = '296', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('gtrid', 'varbinary:128'), ('bqual', 'varbinary:128'), ('format_id', 'int', 'false', '1'), ], in_tenant_space = True, is_cluster_private = False, is_backup_private = True, normal_columns = [ ('trans_id', 'varchar:512'), ('coordinator', 'varchar:128', 'true'), ('scheduler_ip', 'varchar:OB_MAX_SERVER_ADDR_SIZE'), ('scheduler_port', 'int'), ('is_readonly', 'bool', 'false', '0'), ('state', 'int'), ('end_flag', 'int'), ], columns_with_tenant_id = [], ) all_tenant_dependency_def = dict( table_name = '__all_tenant_dependency', table_id = '297', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int', 'false'), ('dep_obj_type', 'int'), ('dep_obj_id', 'int'), ('dep_order', 'int'), ], rs_restart_related = False, is_cluster_private = True, is_backup_private = True, in_tenant_space = True, normal_columns = [ ('schema_version', 'int'), ('dep_timestamp', 'int'), ('ref_obj_type', 'int'), ('ref_obj_id', 'int'), ('ref_timestamp', 'int'), ('dep_obj_owner_id', 'int', 'true'), ('property', 'int'), ('dep_attrs', 'varbinary:OB_MAX_ORACLE_RAW_SQL_COL_LENGTH', 'true'), ('dep_reason', 'varbinary:OB_MAX_ORACLE_RAW_SQL_COL_LENGTH', 'true'), ('ref_obj_name', 'varchar:OB_MAX_TABLE_NAME_LENGTH', 'true') ], columns_with_tenant_id = ['dep_obj_id', 'ref_obj_id', 'dep_obj_owner_id'], ) def_table_schema(**all_tenant_dependency_def) all_backup_backupset_job_def = dict( table_name = '__all_backup_backupset_job', table_id = '298', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('job_id', 'int'), ('tenant_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ('copy_id', 'int'), ], in_tenant_space = False, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('backup_backupset_type', 'varchar:OB_INNER_TABLE_BACKUP_TYPE_LENTH'), ('tenant_name', 'varchar:OB_MAX_TENANT_NAME_LENGTH_STORE'), ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ], columns_with_tenant_id = [], ) def_table_schema(**all_backup_backupset_job_def) all_backup_backupset_job_history_def = dict( table_name = '__all_backup_backupset_job_history', table_id = '299', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('job_id', 'int'), ('tenant_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ('copy_id', 'int'), ], in_tenant_space = False, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('backup_backupset_type', 'varchar:OB_INNER_TABLE_BACKUP_TYPE_LENTH'), ('tenant_name', 'varchar:OB_MAX_TENANT_NAME_LENGTH_STORE'), ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ], columns_with_tenant_id = [], ) def_table_schema(**all_backup_backupset_job_history_def) all_tenant_backup_backupset_task_def = dict( table_name = '__all_tenant_backup_backupset_task', table_id = '300', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int'), ('job_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ('copy_id', 'int'), ], in_tenant_space = True, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('backup_type', 'varchar:OB_INNER_TABLE_BACKUP_TYPE_LENTH'), ('snapshot_version', 'int'), ('prev_full_backup_set_id', 'int'), ('prev_inc_backup_set_id', 'int'), ('prev_backup_data_version', 'int'), ('input_bytes', 'int'), ('output_bytes', 'int'), ('start_time', 'timestamp'), ('end_time', 'timestamp'), ('compatible', 'int'), ('cluster_id', 'int', 'true'), ('cluster_version', 'int'), ('cluster_version_display', 'varchar:OB_INNER_TABLE_BACKUP_TASK_CLUSTER_FORMAT_LENGTH', 'true'), ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ('src_backup_dest', 'varchar:OB_MAX_BACKUP_DEST_LENGTH', 'true'), ('dst_backup_dest', 'varchar:OB_MAX_BACKUP_DEST_LENGTH', 'true'), ('src_device_type', 'varchar:OB_DEFAULT_OUTPUT_DEVICE_TYPE_LENTH'), ('dst_device_type', 'varchar:OB_DEFAULT_OUTPUT_DEVICE_TYPE_LENTH'), ('backup_data_version', 'int', 'true'), ('backup_schema_version', 'int', 'true'), ('total_pg_count', 'int'), ('finish_pg_count', 'int'), ('total_partition_count', 'int'), ('finish_partition_count', 'int'), ('total_macro_block_count', 'int'), ('finish_macro_block_count', 'int'), ('result', 'int'), ('encryption_mode', 'varchar:OB_MAX_ENCRYPTION_MODE_LENGTH', 'false', 'None'), ('passwd', '<PASSWORD>', '<PASSWORD>', ''), ], columns_with_tenant_id = [], ) def_table_schema(**all_tenant_backup_backupset_task_def) all_backup_backupset_task_history_def = dict( table_name = '__all_backup_backupset_task_history', table_id = '301', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int'), ('job_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ('copy_id', 'int'), ], in_tenant_space = False, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('backup_type', 'varchar:OB_INNER_TABLE_BACKUP_TYPE_LENTH'), ('snapshot_version', 'int'), ('prev_full_backup_set_id', 'int'), ('prev_inc_backup_set_id', 'int'), ('prev_backup_data_version', 'int'), ('input_bytes', 'int'), ('output_bytes', 'int'), ('start_time', 'timestamp'), ('end_time', 'timestamp'), ('compatible', 'int'), ('cluster_id', 'int', 'true'), ('cluster_version', 'int'), ('cluster_version_display', 'varchar:OB_INNER_TABLE_BACKUP_TASK_CLUSTER_FORMAT_LENGTH', 'true'), ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ('src_backup_dest', 'varchar:OB_MAX_BACKUP_DEST_LENGTH', 'true'), ('dst_backup_dest', 'varchar:OB_MAX_BACKUP_DEST_LENGTH', 'true'), ('src_device_type', 'varchar:OB_DEFAULT_OUTPUT_DEVICE_TYPE_LENTH'), ('dst_device_type', 'varchar:OB_DEFAULT_OUTPUT_DEVICE_TYPE_LENTH'), ('backup_data_version', 'int', 'true'), ('backup_schema_version', 'int', 'true'), ('total_pg_count', 'int'), ('finish_pg_count', 'int'), ('total_partition_count', 'int'), ('finish_partition_count', 'int'), ('total_macro_block_count', 'int'), ('finish_macro_block_count', 'int'), ('result', 'int'), ('encryption_mode', 'varchar:OB_MAX_ENCRYPTION_MODE_LENGTH', 'false', 'None'), ('passwd', '<PASSWORD>', '<PASSWORD>', ''), ('is_mark_deleted', 'bool', 'true'), ], columns_with_tenant_id = [], ) def_table_schema(**all_backup_backupset_task_history_def) all_tenant_pg_backup_backupset_task_def = dict( table_name = '__all_tenant_pg_backup_backupset_task', table_id = '302', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int'), ('job_id', 'int'), ('incarnation', 'int'), ('backup_set_id', 'int'), ('copy_id', 'int'), ('table_id', 'int'), ('partition_id', 'int'), ], in_tenant_space = True, rs_restart_related = False, is_cluster_private = True, is_backup_private = True, normal_columns = [ ('status', 'varchar:OB_DEFAULT_STATUS_LENTH'), ('trace_id', 'varchar:OB_MAX_TRACE_ID_BUFFER_SIZE', 'true'), ('svr_ip', 'varchar:OB_MAX_SERVER_ADDR_SIZE'), ('svr_port', 'int'), ('total_partition_count', 'int'), ('finish_partition_count', 'int'), ('total_macro_block_count', 'int'), ('finish_macro_block_count', 'int'), ('result', 'int'), ('comment', 'varchar:MAX_TABLE_COMMENT_LENGTH'), ], columns_with_tenant_id = [], ) def_table_schema(**all_tenant_pg_backup_backupset_task_def) # __all_tenant_backup_backup_log_archive_status all_tenant_backup_backup_log_archive_status_def = dict( table_name = '__all_tenant_backup_backup_log_archive_status', table_id = '303', table_type = 'SYSTEM_TABLE', gm_columns = ['gmt_create', 'gmt_modified'], rowkey_columns = [ ('tenant_id', 'int'), ('incarnation', 'int'), ('log_archive_round', 'int'), ('copy_id', 'int'), ], in_tenant_space = True, rs_restart_related = False, is_cluster_private = True, is_backup_private
<gh_stars>0 import requests import requests_cache from bs4 import BeautifulSoup import json from lxml import html import pdb import re import sys import logging import datetime import time # import winsound from jinja2 import Environment, FileSystemLoader import math import itertools from playsound import playsound class Planet: # planet's current location prediction could be scattered throughout the sky. What is (maxRa, maxDec) (in arc seconds) until we discard the planet maxScatteredness = (1500, 1000) # Warn when object is scattered (but don't flag it as discarded) maxScatterednessWarning = (1000, 800) # Min score for planet to be worth observing minScore = 25 # Min Magnitude minMagnitude = 22 #maxNotSeenDays maxNotSeenDays = 4 def __init__(self, info): parts = info.split() self.name = parts[0] self.score = int(parts[1]) self.numObservations = int(parts[12]) self.arc = float(parts[-3]) self.notSeenDays = float(parts[-1]) # Rectacension self.ra = float(parts[5]) # Declination self.dec = float(parts[6]) self.magnitude = float(parts[7]) # Object not good for observing self.discard = False self.scatterednessUrl = False def analyzePlanet(self): # pdb.set_trace() print("\n" + str(datetime.datetime.utcnow()) + " Working on: " + self.name) self.getEphemerides() if self.haveWeObserved(): self.discard = True logging.warning('Planet ' + self.name + ' discarded. Reason: we have observed it already before') if self.score < Planet.minScore: self.discard = True logging.warning('Planet ' + self.name + ' discarded. Reason: score too low (' + str(self.score) + ')') if self.scatterednessUrl: self.scatteredness = self.getScatteredness() if self.scatteredness[0] > Planet.maxScatteredness[0] or self.scatteredness[1] > Planet.maxScatteredness[1]: self.discard = True logging.warning('Planet ' + self.name + ' discarded. Reason: predicted locations too scattered (' + str(self.scatteredness[0]) + ', ' + str(self.scatteredness[1]) + ')') elif self.scatteredness[0] > Planet.maxScatterednessWarning[0] or self.scatteredness[1] > Planet.maxScatterednessWarning[1]: logging.warning('Location of planet ' + self.name + ' is very scattered! (' + str(self.scatteredness[0]) + ', ' + str(self.scatteredness[1]) + ')') # pdb.set_trace() # filter not seen > 1.2 days if self.notSeenDays > Planet.maxNotSeenDays: self.discard = True logging.warning('Planet ' + self.name + ' discarded. Reason: too long not seen (' + str(self.notSeenDays) + ' days)') # Get Max Altitude # TODO - do something with maximum altitude if len(self.ephemerides) > 0: self.maxAltitudeEphemeride = self.maxAlt() if self.maxAltitudeEphemeride: pass # print("Max Altitude Date: " + self.maxAltitudeEphemeride.date) if self.maxAltitudeEphemeride.effMagnitude > Planet.minMagnitude: self.discard = True logging.warning('Planet ' + self.name + ' discarded. Reason: effective magnitude too low (' + str(self.maxAltitudeEphemeride.effMagnitude) + ')' + ' Magnitude (' + str(self.maxAltitudeEphemeride.magnitude) + ')') else: self.discard = True logging.warning('Planet ' + self.name + ' discarded. Reason: no maximum altitude obtained') self.nearestToNow() self.currentEphemerideInterpolation() else: self.discard = True logging.warning('Planet ' + self.name + ' discarded. Reason: no ephemerides available') if not self.discard: logging.warning('PLANET OK: ' + self.name) def getEphemerides(self): url = "https://cgi.minorplanetcenter.net/cgi-bin/confirmeph2.cgi" # print(self.name) resp = requests.post(url, data={"mb": -30, "mf": 30, "dl": -90, "du": +90, "nl": 0, "nu": 100, "sort": "d", "W": "j", "obj": self.name, "Parallax": 1, "obscode": "L01", "long": None, "lat": None, "alt": None, "int": 1, "start": 0, "raty": "a", "mot": "m", "dmot": "p", "out": "f", "sun": "x", "oalt": 20}) resp1 = resp.text page = BeautifulSoup(resp1, "html5lib") links = page.find("pre") lines = (links.text).split("\n") lines = lines[2:-1] lines = [l for l in lines if "<suppressed>" not in l] # if self.name == 'ZTF00Wh': # pdb.set_trace() # if html.find("pre").find_all('a')[2]['href'] if len(page.find("pre").find_all('a')) > 1 and page.find("pre").find_all('a')[1]['href']: self.scatterednessUrl = page.find("pre").find_all('a')[1]['href'] tree = html.fromstring(resp.content) mapLinks = tree.xpath("//pre/a[text()='Map']/@href") if len(mapLinks) > 0: self.mapLink = mapLinks[0] if len(tree.xpath("//a[text()='observations']/@href")) > 0: self.observationsUrl = tree.xpath("//a[text()='observations']/@href")[0] self.ephemerides = [] ephReport = {} for l in lines: eph = Ephemeride(l) if eph.isValid(): self.ephemerides.append(eph) ephReport["OK"] = ephReport["OK"] + 1 if "OK" in ephReport else 1 else: ephReport[eph.discardReason] = ephReport[eph.discardReason] + 1 if eph.discardReason in ephReport else 1 if len(ephReport): print("Ephemerides report: " + json.dumps(ephReport)) # print(ephDropReasons) def maxAlt(self): maxAlt = float("-inf") index = None # logging.warning('Obtaining efemeride for: ' + self.name) for i, eph in enumerate(self.ephemerides): # logging.warning('Eph.alt: ' + str(eph.alt)) if eph.alt > maxAlt: maxAlt = eph.alt index = i if index is None: self.discard = True return None return self.ephemerides[index] def nearestToNow(self): secondsFromNow = float("inf") index = None for i, eph in enumerate(self.ephemerides): if eph.secondsFromNowPlus600() < secondsFromNow: secondsFromNow = eph.secondsFromNowPlus600() index = i if isinstance(index, int): self.nearestToNowEphemeride = self.ephemerides[index] return index return None def currentEphemerideInterpolation(self): index = self.nearestToNow() interpolatedEph = Ephemeride(self.ephemerides[index].line) if index: eph = Ephemeride(self.ephemerides[index].line) if eph.secondsFromNowPlus600() > 0: if len(self.ephemerides) > index + 1: currentEph = eph nextEph = self.ephemerides[index + 1] else: self.currentInterpolatedEphemeride = eph return eph elif eph.secondsFromNowPlus600() == 0: self.currentInterpolatedEphemeride = eph return eph else: if index > 0: currentEph = self.ephemerides[index - 1] nextEph = eph else: self.currentInterpolatedEphemeride = eph return eph timeInterval = nextEph.dateUnix - currentEph.dateUnix dt = time.mktime(datetime.datetime.utcnow().timetuple()) + 600 - currentEph.dateUnix dtPerc = dt / timeInterval interpolatedEph.azimuth = currentEph.azimuth + ((nextEph.azimuth - currentEph.azimuth) * dtPerc) interpolatedEph.alt = currentEph.alt + ((nextEph.alt - currentEph.alt) * dtPerc) interpolatedEph.dateUnix = currentEph.dateUnix + dt interpolatedEph.updateLineFromData() # print('Interpolated Ephemeride: ') # print(interpolatedEph.line) self.currentInterpolatedEphemeride = interpolatedEph return interpolatedEph self.currentInterpolatedEphemeride = None return None # Have we observed the planet before def haveWeObserved(self): resp = requests.get(self.observationsUrl) tree = html.fromstring(resp.content) text = tree.xpath('//pre/text()') # pdb.set_trace() if re.search("L01\n", text[0]): return True return False # scatteredness of results def getScatteredness(self): resp = requests.get(self.scatterednessUrl).text html = BeautifulSoup(resp, "html5lib") links = html.find("pre") observationPoints = re.findall(r'([+-][0-9]+) +([+-][0-9]+).*Ephemeris # [0-9]+$', links.text, re.M) minRa, maxRa, minDec, maxDec = 0, 0, 0, 0 for point in observationPoints: if int(point[0]) < minRa: minRa = int(point[0]) elif int(point[0]) > maxRa: maxRa = int(point[0]) if int(point[1]) < minDec: minDec = int(point[1]) elif int(point[1]) > maxDec: maxDec = int(point[1]) return (maxRa - minRa, maxDec - minDec) # planet1 = Planet() class Ephemeride: # Maximum sun altiude (otherwise we can't observe the planet) maxSunAlt = -15 # Minimum altitude of object (below can't be seen due to horizon or clouds) minAlt = 15 # Minimum distance of object from the Moon minMoonDistance = 20 # Minimum motion (speed = "/min) minMotion = 2.5 # Why did the ephemeride get discarded (if at all) discardReason = '' def __init__(self, info): # Date UT * R.A. (J2000) Decl. Elong. V Motion Object Sun Moon # h m "/min P.A. Azi. Alt. Alt. Phase Dist. Alt. # 2018 10 12 1900 23 26 39.1 +30 55 48 146.2 22.0 0.22 129.4 289 +62 -28 0.15 114 -03 self.line = info parts = self.line.split() self.date = parts[0] + ' ' + parts[1] + ' ' + parts[2] + ' ' + parts[3] self.dateUnix = time.mktime(datetime.datetime.strptime(self.date, "%Y %m %d %H%M").timetuple()) # Azimuth of object at that time self.azimuth = float(parts[14]) # Altitude of object (above horizon) at that time self.alt = float(parts[15]) # Altitude of sun at the time self.sunAlt = float(parts[16]) # Distance from the moon self.moonDistance = float(parts[18]) self.magnitude = float(parts[11]) # Effective magnitude - Manitude that takes into account atmospheric extiction due to (low) altitude of planet self.effMagnitude = self.getEffectiveMagnitude() self.motion = float(parts[12]) # Observation time needed (in minutes) - approximates the imaging time needed to get a good picture self.observationTime = self.getObservationTime() # pdb.set_trace() # logging.warning('Magnitude vs Effective Magnitude: ' + str(self.magnitude) + " : " + str(self.effMagnitude)) def isValid(self): if self.sunAlt > Ephemeride.maxSunAlt: self.discardReason = 'nearSun' return False if self.alt < Ephemeride.minAlt: self.discardReason = 'altLow' return False if self.moonDistance < Ephemeride.minMoonDistance: self.discardReason = 'nearMoon' return False if self.dateUnix > Main.endObservationTimestamp: self.discardReason = 'tooLate' return False if self.motion < Ephemeride.minMotion: self.discardReason = 'tooSlow' return False return True def getEffectiveMagnitude(self): if self.alt < 40: return self.magnitude + ((self.alt - 40) * 0.1) else: return self.magnitude def getObservationTime(self): return round(10 + (self.effMagnitude - 18) * 5, 2) def secondsFromNowPlus600(self): """ Number of seconds from (Now + 600 seconds) """ currentTimePlus600 = time.mktime(datetime.datetime.utcnow().timetuple()) + 600 return math.fabs(self.dateUnix - currentTimePlus600) def updateLineFromData(self): line = self.line.split(' ') line[0] = datetime.datetime.fromtimestamp(self.dateUnix).strftime("%Y") line[1] = datetime.datetime.fromtimestamp(self.dateUnix).strftime("%m") line[2] = datetime.datetime.fromtimestamp(self.dateUnix).strftime("%d") line[3] = datetime.datetime.fromtimestamp(self.dateUnix).strftime("%H%M") # Azimuth & Alititude line[22] = str(round(self.azimuth)).zfill(3) line[24] = str(round(self.alt)) if self.alt < 0 else ('+' + str(round(self.alt))) self.line = ' '.join(line) class Map: def __init__(self, planets): renderPlanets = [] for planet in planets: if not planet.discard and planet.currentInterpolatedEphemeride: # pdb.set_trace() renderDict = {} renderDict["name"] = planet.name renderDict["magnitude"]
this; b = w.__number__ == 0x04 ? w : new $long(w); if (x === null || typeof x == 'undefined') { c = null; } else { c = x.__number__ == 0x04 ? x : new $long(x); } if (b.ob_size < 0) { if (c !== null) { throw pyjslib['TypeError']("pow() 2nd argument cannot be negative when 3rd argument specified"); } return Math.pow(v.valueOf(), w.valueOf()); } if (c !== null) { if (c.ob_size == 0) { throw pyjslib['ValueError']("pow() 3rd argument cannot be 0"); } if (c.ob_size < 0) { negativeOutput = 1; temp = $pow_temp_c; temp.ob_digit = c.ob_digit.slice(0); temp.ob_size = -c.ob_size; c = temp; } if (c.ob_size == 1 && c.ob_digit[0] == 1) { return $const_long_0; } if (a.ob_size < 0) { temp = $pow_temp_a; l_divmod(a, c, null, temp); a = temp; } } z = new $long(1); temp = $pow_temp_z; if (b.ob_size <= FIVEARY_CUTOFF) { for (i = b.ob_size - 1; i >= 0; --i) { bi = b.ob_digit[i]; for (j = 1 << (PyLong_SHIFT-1); j != 0; j >>>= 1) { z = z.__mul(z); if (c !== null) { l_divmod(z, c, null, temp); z.ob_digit = temp.ob_digit.slice(0); z.ob_size = temp.ob_size; } if (bi & j) { z = z.__mul(a); if (c !== null) { l_divmod(z, c, null, temp); z.ob_digit = temp.ob_digit.slice(0); z.ob_size = temp.ob_size; } } } } } else { table[0] = z; for (i = 1; i < 32; ++i) { table[i] = table[i-1].__mul(a); if (c !== null) { l_divmod(table[i], c, null, temp); table[i].ob_digit = temp.ob_digit.slice(0); table[i].ob_size = temp.ob_size; } } for (i = b.ob_size - 1; i >= 0; --i) { bi = b.ob_digit[i]; for (j = PyLong_SHIFT - 5; j >= 0; j -= 5) { var index = (bi >>> j) & 0x1f; for (k = 0; k < 5; ++k) { z = z.__mul(z); if (c !== null) { l_divmod(z, c, null, temp); z.ob_digit = temp.ob_digit.slice(0); z.ob_size = temp.ob_size; } } if (index) { z = z.__mul(table[index]); if (c !== null) { l_divmod(z, c, null, temp); z.ob_digit = temp.ob_digit.slice(0); z.ob_size = temp.ob_size; } } } } } if ((c !== null) && negativeOutput && (z.ob_size != 0) && (c.ob_size != 0)) { z = z.__sub__(c); } return z; }; $long.__pow__ = function (y, z) { switch (y.__number__) { case 0x02: if (typeof z == 'undefined') return this.__pow(new $long(y.__v), null); switch (z.__number) { case 0x02: return this.__pow(new $long(y.__v), new $long(z)); case 0x04: return this.__pow(new $long(y.__v), z); } break; case 0x04: if (typeof z == 'undefined') return this.__pow(y, null); switch (z.__number) { case 0x02: return this.__pow(y, new $long(z)); case 0x04: return this.__pow(y, z); } break; } return pyjslib['NotImplemented']; }; var $const_long_0 = new $long(0), $const_long_1 = new $long(1); // Since javascript is single threaded: var $l_divmod_div = new $long(0), $l_divmod_mod = new $long(0), $x_divrem_v = new $long(0), $x_divrem_w = new $long(0), $pow_temp_a = new $long(0), $pow_temp_c = new $long(0), $pow_temp_z = new $long(0); })(); """) """@CONSTANT_DECLARATION@""" class NotImplementedType(object): def __repr__(self): return "<type 'NotImplementedType'>" def __str__(self): self.__repr__() def toString(self): self.__repr__() NotImplemented = NotImplementedType() JS(""" var $iter_array = function (l) { this.__array = l; this.i = -1; }; $iter_array.prototype.next = function (noStop) { if (++this.i == this.__array.length) { if (noStop === true) { return; } throw pyjslib.StopIteration; } return this.__array[this.i]; }; $iter_array.prototype.__iter__ = function ( ) { return this; }; var $reversed_iter_array = function (l) { this.___array = l; this.i = l.length; }; $reversed_iter_array.prototype.next = function (noStop) { if (--this.i == -1) { if (noStop === true) { return; } throw pyjslib.StopIteration; } return this.___array[this.i]; }; $reversed_iter_array.prototype.__iter__ = function ( ) { return this; }; //$reversed_iter_array.prototype.$genfunc = $reversed_iter_array.prototype.next; var $enumerate_array = function (l) { this.array = l; this.i = -1; this.tuple = """) tuple([0, ""]) JS(""" this.tl = this.tuple.__array; }; $enumerate_array.prototype.next = function (noStop, reuseTuple) { if (++this.i == this.array.length) { if (noStop === true) { return; } throw pyjslib.StopIteration; } this.tl[1] = this.array[this.i]; if (this.tl[0].__number__ == 0x01) { this.tl[0] = this.i; } else { this.tl[0] = new pyjslib['int'](this.i); } return reuseTuple === true ? this.tuple : pyjslib.tuple(this.tl); }; $enumerate_array.prototype.__iter__ = function ( ) { return this; }; $enumerate_array.prototype.$genfunc = $enumerate_array.prototype.next; """) # NOTE: $genfunc is defined to enable faster loop code class list: def __init__(self, data=JS("[]")): # Basically the same as extend, but to save expensive function calls... JS(""" if (data === null) { throw pyjslib['TypeError']("'NoneType' is not iterable"); } if (data.constructor === Array) { self.__array = data.slice(); return null; } if (typeof data.__iter__ == 'function') { if (typeof data.__array == 'object') { self.__array = data.__array.slice(); return null; } var iter = data.__iter__(); if (typeof iter.__array == 'object') { self.__array = iter.__array.slice(); return null; } data = []; var item, i = 0; if (typeof iter.$genfunc == 'function') { while (typeof (item=iter.next(true)) != 'undefined') { data[i++] = item; } } else { try { while (true) { data[i++] = iter.next(); } } catch (e) { if (e.__name__ != 'StopIteration') throw e; } } self.__array = data; return null; } throw pyjslib['TypeError']("'" + pyjslib['repr'](data) + "' is not iterable"); """) def __hash__(self): raise TypeError("list objects are unhashable") def append(self, item): JS("""self.__array[self.__array.length] = item;""") # extend in place, just in case there's somewhere a shortcut to self.__array def extend(self, data): # Transform data into an array and append to self.__array JS(""" if (data === null) { throw pyjslib['TypeError']("'NoneType' is not iterable"); } if (data.constructor === Array) { } else if (typeof data.__iter__ == 'function') { if (typeof data.__array == 'object') { data = data.__array; } else { var iter = data.__iter__(); if (typeof iter.__array == 'object') { data = iter.__array; } data = []; var item, i = 0; if (typeof iter.$genfunc == 'function') { while (typeof (item=iter.next(true)) != 'undefined') { data[i++] = item; } } else { try { while (true) { data[i++] = iter.next(); } } catch (e) { if (e.__name__ != 'StopIteration') throw e; } } } } else { throw pyjslib['TypeError']("'" + pyjslib['repr'](data) + "' is not iterable"); } var l = self.__array; var j = self.__array.length; var n = data.length, i = 0; while (i < n) { l[j++] = data[i++]; } """) def remove(self, value): JS(""" var index=self.index(value); if (index<0) { throw pyjslib.ValueError("list.remove(x): x not in list"); } self.__array.splice(index, 1); return true; """) def index(self, value, start=0): JS(""" start = start.valueOf(); if (typeof value == 'number' || typeof value == 'string') { start = self.__array.indexOf(value, start); if (start >= 0) return start; } else { var len = self.__array.length >>> 0; start = (start < 0) ? Math.ceil(start) : Math.floor(start); if (start < 0) start += len; for (; start < len; start++) { if (start in self.__array && pyjslib.cmp(self.__array[start], value) == 0) return start; } } """) raise ValueError("list.index(x): x not in list") def insert(self, index, value): JS(""" var a = self.__array; self.__array=a.slice(0, index).concat(value, a.slice(index));""") def pop(self, index = -1): JS(""" index = index.valueOf(); if (index<0) index += self.__array.length; if (index < 0 || index >= self.__array.length) { if (self.__array.length == 0) { throw pyjslib.IndexError("pop from empty list"); } throw pyjslib.IndexError("pop index out of range"); } var a = self.__array[index]; self.__array.splice(index, 1); return a; """) def __cmp__(self, l): if not isinstance(l, list): return -1 JS(""" var n1 = self.__array.length, n2 = l.__array.length, a1 = self.__array, a2 = l.__array, n, c; n = (n1 < n2 ? n1 : n2); for (var i = 0; i < n; i++) { c = pyjslib.cmp(a1[i], a2[i]); if (c) return c; } if (n1 < n2) return -1; if (n1 > n2) return 1; return 0;""") def __getslice__(self, lower, upper): JS(""" if (upper==null) return pyjslib.list(self.__array.slice(lower)); return pyjslib.list(self.__array.slice(lower, upper)); """) def __delslice__(self, lower, upper): JS(""" var n = upper - lower; if (upper==null) { n = self.__array.length; } if (!lower) lower = 0; if (n > 0) self.__array.splice(lower, n); """) return None def __setslice__(self, lower, upper, data): self.__delslice__(lower, upper) tail = self.__getslice__(lower, None) self.__delslice__(lower, None) self.extend(data) self.extend(tail) return None def __getitem__(self, index): JS(""" index = index.valueOf(); if (index < 0) index += self.__array.length; if (index < 0 ||
+= s * dx if nx == nxL: iL += 1 return iL def func_001b15acc807450c90cda8803e3be4d6(nx, nxU, nxL): if nx == nxL: iL += 1 if nx == nxU: iU += 1 return iU def func_149e53146763406c98f7ec2894c4f17b(nx, nxU, nxL): if nx == nxL: iL += 1 if nx == nxU: iU += 1 return iL def func_a6a9c6e6681d40079297ea0784389a3a(s, w, nx): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx return dx def func_e087955e3cb1427f925f59e75935116a(s, w, nx): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx return x def func_facd685633274e8f886c574a2fb26f02(s, w, nx): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx return a def func_23cf03d331954170beab89f9388b922a(dx, s, nx): a += 2 * w * dx + s * dx * dx x = nx w += s * dx return x def func_60b73c787d4742f4badf9e0b01692bf0(dx, s, nx): a += 2 * w * dx + s * dx * dx x = nx w += s * dx return a def func_6b648157b67745bab64d0f2288188186(dx, s, nx): a += 2 * w * dx + s * dx * dx x = nx w += s * dx return w def func_1bdcbbd08369422bacbf768433cd2494(dx, s, nx, nxL): x = nx w += s * dx if nx == nxL: iL += 1 return iL def func_8b78240618d24b8080d790c2c724408d(dx, s, nx, nxL): x = nx w += s * dx if nx == nxL: iL += 1 return w def func_ac941b4b8d0a475abea414e92628e7eb(dx, s, nx, nxL): x = nx w += s * dx if nx == nxL: iL += 1 return x def func_83730b8a0b644eec9a69cd2d4488ddce(dx, s, nx, nxU, nxL): w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return iU def func_86f722bf9a4a49b1b9b73ecff0faee74(dx, s, nx, nxU, nxL): w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return iL def func_08e1237d21d5438e9646a54610d315db(dx, s, nx, nxU, nxL): w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return w def func_3db720c419ba4d309bbae8ca5d7cde87(s, nx): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx return dx def func_07f202c6e4764f4f8356700a9a3fa551(s, nx): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx return a def func_ee459e91a0814cb5b514b9a2fdaf7637(s, nx): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx return x def func_a75f3df798ce4911928cceb34fd2d225(s, nx): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx return w def func_b9138d3a6376461fac83865987068f5f(dx, s, nx, nxL): a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 return a def func_481910dba3b44bf3a37c594e466b198b(dx, s, nx, nxL): a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 return w def func_ad1e3a41c23d49f1b26c1f0d9c2fa268(dx, s, nx, nxL): a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 return x def func_b976871a854f4ee2b9832ed2e75997df(dx, s, nx, nxL): a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 return iL def func_1db14cde499a4849b576f4e1904f74d1(dx, s, nx, nxU, nxL): x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return iL def func_b8d2f116271644a78369a0d34f9163f3(dx, s, nx, nxU, nxL): x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return iU def func_16a5441b74354b74bbdc193a1445759f(dx, s, nx, nxU, nxL): x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return w def func_ba2fc1b6fa774294b3de4129be80767b(dx, s, nx, nxU, nxL): x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return x def func_097b25d4a9604f63b59f257925b16786(s, nx, nxL): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 return x def func_edc38b0b9b4745eeb035757b491f4b02(s, nx, nxL): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 return iL def func_9c2b20b06f2e490da49a92673318fff3(s, nx, nxL): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 return w def func_38ff479074614bab85c99bb8dc07403c(s, nx, nxL): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 return a def func_390f5f0c78d14e68adf182c354da9823(s, nx, nxL): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 return dx def func_da5eb4d1d2874397a92c51db9201e52d(dx, s, nx, nxU, nxL): a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return a def func_936ba88f593f49e1a1d9bc987a57c260(dx, s, nx, nxU, nxL): a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return w def func_945b62ddb3934150ac5aefc32b9e7f63(dx, s, nx, nxU, nxL): a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return iU def func_e51b6ad5c3a34a7694ec2c15d41d8775(dx, s, nx, nxU, nxL): a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return x def func_f3b035d02711483fbf54fef405e42099(dx, s, nx, nxU, nxL): a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return iL def func_a600b523889049aba2519a3f7e8d189e(s, nx, nxU, nxL): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return dx def func_1c035c9f413140329959f5107ce9547f(s, nx, nxU, nxL): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return a def func_cbdfc4dec6c34e868d00cc48ffc862c9(s, nx, nxU, nxL): dx = nx - x a += 2 * w * dx + s * dx * dx x = nx w += s * dx if nx == nxL: iL += 1 if nx == nxU: iU += 1 return iU def func_81f76ffc8298404fb4a4267009bfd9d9(s, nx, nxU, nxL): dx = nx - x a += 2 * w * dx + s * dx * dx
from pathlib import Path from typing import Any, Callable, List, Tuple, Union from PyQt5.QtCore import Qt from PyQt5.QtGui import QFont from PyQt5.QtWidgets import QWidget from src.database_commander import DB_COMMANDER from src.dialog_handler import DialogHandler, UI_LANGUAGE from src.models import Cocktail, Ingredient from src.config_manager import shared from src import MAX_SUPPORTED_BOTTLES STYLE_FOLDER = Path(__file__).parents[0].absolute() / "ui" / "styles" class DisplayController(DialogHandler): """ Controler Class to get Values from the UI""" ######################## # UI "EXTRACT" METHODS # ######################## def get_current_combobox_items(self, combobox_list: List[Any]) -> List[str]: """Get a list of the current combobox items""" return [combobox.currentText() for combobox in combobox_list] def get_toggle_status(self, button_list: List[Any]) -> List[bool]: """Get a list of if the buttons are checked""" return [button.isChecked() for button in button_list] def get_lineedit_text(self, lineedit_list: List[Any]) -> List[str]: """Get a list of the text of the lineedits""" return [lineedit.text().strip() for lineedit in lineedit_list] def get_list_widget_selection(self, list_widget) -> str: """Returns the curent selected item of the list widget""" if not list_widget.selectedItems(): return "" return list_widget.currentItem().text() def get_ingredient_data(self, lineedit_list: List[Any], checkbox, list_widget): """Returns an Ingredient Object from the ingredient data fields""" ingredient_name, alcohollevel, volume = self.get_lineedit_text(lineedit_list) hand_add = checkbox.isChecked() selected_ingredient = "" if list_widget.selectedItems(): selected_ingredient = list_widget.currentItem().text() return Ingredient(None, ingredient_name, int(alcohollevel), int(volume), None, hand_add, selected=selected_ingredient) def get_cocktail_data(self, w) -> Tuple[str, int, int]: """Returns [name, volume, factor] from maker""" cocktail_volume = int(w.LCustomMenge.text()) # when pulling, the slider can reach every integer value (eg, 1,2,...) # but whe only want stepsize of *5 -> therefore it ranges from -5 to 5 but we # multiply by *5 to get an effective range from -25 to 25 with a stepsize of 5 alcohol_faktor = 1 + (w.HSIntensity.value() * 5 / 100) cocktailname = "" if w.LWMaker.selectedItems(): cocktailname = w.LWMaker.currentItem().text() return cocktailname, cocktail_volume, alcohol_faktor def get_recipe_field_data(self, w) -> Tuple[str, str, List[str], List[str], int, str]: """ Return [name, selected, [ingredients], [volumes], enabled, comment] """ recipe_name = w.LECocktail.text().strip() selected_recipe = self.get_list_widget_selection(w.LWRezepte) # this is also a str, because user may type non int char into box ingredient_volumes = self.get_lineedit_text(self.get_lineedits_recipe(w)) ingredient_names = self.get_current_combobox_items(self.get_comboboxes_recipes(w)) enabled = int(w.CHBenabled.isChecked()) comment = w.LEKommentar.text() return recipe_name, selected_recipe, ingredient_names, ingredient_volumes, enabled, comment def validate_ingredient_data(self, lineedit_list) -> bool: """Validate the data from the ingredient window""" if self.__lineedit_is_missing(lineedit_list): self.say_some_value_missing() return False _, ingredient_percentage, ingredient_volume = lineedit_list if self.__lineedit_is_no_int([ingredient_percentage, ingredient_volume]): self.say_needs_to_be_int() return False if int(ingredient_percentage.text()) > 100: self.say_alcohollevel_max_limit() return False return True def get_ingredient_window_data(self, w) -> Tuple[str, int]: """Returns the needed data from the ingredient window""" ingredient_name = w.CBingredient.currentText() volume = int(w.LAmount.text()) return ingredient_name, volume def __check_password(self, lineedit): """Compares the given lineedit to the master password""" password = lineedit.text() lineedit.setText("") if password == self.UI_MASTERPASSWORD: return True return False def check_recipe_password(self, w): """Checks if the password in the recipe window is right""" return self.__check_password(w.LEpw) def check_bottles_password(self, w): """Checks if the password in the bottle window is right""" return self.__check_password(w.LECleanMachine) def check_ingredient_password(self, w): """Checks if the password in the ingredient window is right""" return self.__check_password(w.<PASSWORD>) def __lineedit_is_missing(self, lineedit_list) -> bool: """Checks if a lineedit is empty""" for lineedit in lineedit_list: if lineedit.text().strip() == "": return True return False def __lineedit_is_no_int(self, lineedits) -> bool: """Checks if a lineedit is no valid int""" for lineedit in lineedits: try: int(lineedit.text()) except ValueError: return True return False ########################### # UI "MANIPULATE" METHODS # ########################### # Misc def plusminus(self, label, operator: str, minimal=0, maximal=1000, delta=10, side_effect: Callable = None): """ increases or decreases the value by a given amount in the boundaries operator: '+' or '-' Also executes a sideeffect function, if one is given """ try: value_ = int(label.text()) value_ = value_ + (delta if operator == "+" else -delta) value_ = min(maximal, max(minimal, (value_ // delta) * delta)) except ValueError: value_ = maximal if operator == "+" else minimal label.setText(str(value_)) if side_effect is not None: side_effect() def set_display_settings(self, window_object: QWidget, resize=True): """Checks dev environment, adjust cursor and resize accordingly, if resize is wished""" if not self.UI_DEVENVIRONMENT: window_object.setCursor(Qt.BlankCursor) if resize: window_object.setFixedSize(self.UI_WIDTH, self.UI_HEIGHT) window_object.resize(self.UI_WIDTH, self.UI_HEIGHT) def inject_stylesheet(self, window_object: QWidget): """Adds the central stylesheet to the gui""" style_file = f"{self.MAKER_THEME}.css" with open(STYLE_FOLDER / style_file, "r", encoding="utf-8") as filehandler: window_object.setStyleSheet(filehandler.read()) def set_tab_width(self, mainscreen): """Hack to set tabs to full screen width, inheritance of custom tabBars dont work This is incredibly painfull, since all the CSS from the ui needs to be copied here, it will overwrite the whole class sheet and missing parts will not be used. Any changes to the .ui file for the tab needs to be applied here as well""" total_width = mainscreen.frameGeometry().width() width = round(total_width / 4, 0) - 10 mainscreen.tabWidget.setStyleSheet( "QTabBar::tab {" + "background-color: rgb(97, 97, 97);" + "color: rgb(255, 255, 255);" + "border-width: 1px;" + "border-color: rgb(255, 255, 255);" + "border-style: solid;" + "border-top-left-radius: 10px;" + "border-top-right-radius: 10px;" + "padding: 5px 0px 5px 0px;" + f"width: {width}px;" + "}" + "QTabBar::tab:selected {" + "color: rgb(255, 255, 255); " + "background-color: rgb(0, 123, 255)};" ) # TabWidget def set_tabwidget_tab(self, w, tab: str): """Sets the tabwidget to the given tab. tab: ['maker', 'ingredients', 'recipes', 'bottles'] """ tabs = { "maker": 0, "ingredients": 1, "recipes": 2, "bottles": 3 } w.tabWidget.setCurrentIndex(tabs.get(tab, 0)) # Slider def __set_slider_value(self, slider, value): slider.setValue(value) def reset_alcohol_slider(self, w): """Sets the alcohol slider to defaul (100%) value""" self.__set_slider_value(w.HSIntensity, 0) # LineEdit def clean_multiple_lineedit(self, lineedit_list: List[Any]): """Clear a list of line edits""" for lineedit in lineedit_list: lineedit.clear() def fill_multiple_lineedit(self, lineedit_list: List[Any], text_list: List[Union[str, int]]): """Fill a list of line edits""" for lineedit, text in zip(lineedit_list, text_list): lineedit.setText(str(text)) # Combobox def fill_single_combobox(self, combobox, itemlist: List[str], clear_first=False, sort_items=True, first_empty=True): """Fill a combobox with given items, with the option to sort and fill a empty element as first element""" if clear_first: combobox.clear() if combobox.count() == 0 and first_empty: combobox.addItem("") combobox.addItems(itemlist) if sort_items: combobox.model().sort(0) def fill_multiple_combobox(self, combobox_list: List[Any], itemlist: List[str], clear_first=False, sort_items=True, first_empty=True): """Fill multiple comboboxes with identical items, can sort and insert filler as first item""" for combobox in combobox_list: self.fill_single_combobox(combobox, itemlist, clear_first, sort_items, first_empty) def fill_multiple_combobox_individually(self, combobox_list: List[Any], list_of_itemlist: List[List[str]], clear_first=False, sort_items=True, first_empty=True): """Fill multiple comboboxes with different items, can sort and insert filler as first item""" for combobox, itemlist in zip(combobox_list, list_of_itemlist): self.fill_single_combobox(combobox, itemlist, clear_first, sort_items, first_empty) def delete_single_combobox_item(self, combobox, item: str): """Delete the given item from a combobox""" index = combobox.findText(item, Qt.MatchFixedString) if index >= 0: combobox.removeItem(index) # This seeems to be currently unused def delete_multiple_combobox_item(self, combobox, itemlist: List[str]): """Delete the given items from a combobox""" for item in itemlist: self.delete_single_combobox_item(combobox, item) def delete_item_in_multiple_combobox(self, combobox_list: List[Any], item: str): """Delete the given item from multiple comboboxed""" for combobox in combobox_list: self.delete_single_combobox_item(combobox, item) def set_multiple_combobox_to_top_item(self, combobox_list: List[Any]): """Set the list of comboboxes to the top item""" for combobox in combobox_list: combobox.setCurrentIndex(0) def set_multiple_combobox_items(self, combobox_list: List[Any], items_to_set: List[str]): """Set a list of comboboxes to the according item""" for combobox, item in zip(combobox_list, items_to_set): self.set_combobox_item(combobox, item) def set_combobox_item(self, combobox, item: str): """Set the combobox to the given item""" index = combobox.findText(item, Qt.MatchFixedString) combobox.setCurrentIndex(index) def adjust_bottle_comboboxes(self, combobox_list: List[Any], old_item: str, new_item: str): """Remove the old itemname and add new one in given comboboxex, sorting afterwards""" for combobox in combobox_list: if (old_item != "") and (combobox.findText(old_item, Qt.MatchFixedString) < 0): combobox.addItem(old_item) if (new_item != "") and (new_item != combobox.currentText()): self.delete_single_combobox_item(combobox, new_item) combobox.model().sort(0) def rename_single_combobox(self, combobox, old_item: str, new_item: str): """Rename the old item to new one in given box""" index = combobox.findText(old_item, Qt.MatchFixedString) if index >= 0: combobox.setItemText(index, new_item) combobox.model().sort(0) def rename_multiple_combobox(self, combobox_list: List[Any], old_item: str, new_item: str): """Renames an item in multiple comboboxes""" for combobox in combobox_list: self.rename_single_combobox(combobox, old_item, new_item) # buttons / togglebuttons def untoggle_buttons(self, button_list: List[Any]): """Set toggle to false in given button list""" for button in button_list: button.setChecked(False) # progress bars def set_progress_bar_values(self, progress_bar_list: List[Any], value_list: List[int]): """Set values of progress bars to given value""" for progress_bar, value in zip(progress_bar_list, value_list): progress_bar.setValue(value) # listwidget def unselect_list_widget_items(self, list_widget: Any): """Unselect all items in the list widget""" for i in range(list_widget.count()): list_widget.item(i).setSelected(False) def delete_list_widget_item(self, list_widget: Any, item: str): """Deletes an item in the list widget""" index_to_delete = list_widget.findItems(item, Qt.MatchExactly) if len(index_to_delete) > 0: for index in index_to_delete: list_widget.takeItem(list_widget.row(index)) def fill_list_widget(self,
<reponame>nkran/malariagen-data-python import os import random import shutil import dask.array as da import numpy as np import pandas as pd import pytest import scipy.stats import xarray as xr import zarr from numpy.testing import assert_allclose, assert_array_equal from pandas.testing import assert_frame_equal from malariagen_data import Ag3, Region from malariagen_data.ag3 import _cn_mode from malariagen_data.util import locate_region, resolve_region expected_species_legacy = { "gambiae", "coluzzii", "arabiensis", "intermediate_arabiensis_gambiae", "intermediate_gambiae_coluzzii", } expected_species = { "gambiae", "coluzzii", "arabiensis", "intermediate_gambcolu_arabiensis", "intermediate_gambiae_coluzzii", } contigs = "2R", "2L", "3R", "3L", "X" cohort_cols = ( "country_iso", "admin1_name", "admin1_iso", "admin2_name", "taxon", "cohort_admin1_year", "cohort_admin1_month", "cohort_admin2_year", "cohort_admin2_month", ) def setup_ag3(url="simplecache::gs://vo_agam_release/", **kwargs): kwargs.setdefault("check_location", False) kwargs.setdefault("show_progress", False) if url is None: # test default URL return Ag3(**kwargs) if url.startswith("simplecache::"): # configure the directory on the local file system to cache data kwargs["simplecache"] = dict(cache_storage="gcs_cache") return Ag3(url, **kwargs) @pytest.mark.parametrize( "url", [ None, "gs://vo_agam_release/", "gcs://vo_agam_release/", "gs://vo_agam_release", "gcs://vo_agam_release", "simplecache::gs://vo_agam_release/", "simplecache::gcs://vo_agam_release/", ], ) def test_sample_sets(url): ag3 = setup_ag3(url) df_sample_sets_v3 = ag3.sample_sets(release="3.0") assert isinstance(df_sample_sets_v3, pd.DataFrame) assert len(df_sample_sets_v3) == 28 assert tuple(df_sample_sets_v3.columns) == ("sample_set", "sample_count", "release") # test duplicates not allowed with pytest.raises(ValueError): ag3.sample_sets(release=["3.0", "3.0"]) # test default is all public releases df_default = ag3.sample_sets() df_all = ag3.sample_sets(release=ag3.releases) assert_frame_equal(df_default, df_all) def test_releases(): ag3 = setup_ag3() assert isinstance(ag3.releases, tuple) assert ag3.releases == ("3.0",) ag3 = setup_ag3(pre=True) assert isinstance(ag3.releases, tuple) assert len(ag3.releases) > 1 assert all([r.startswith("3.") for r in ag3.releases]) def test_sample_metadata(): ag3 = setup_ag3() df_sample_sets_v3 = ag3.sample_sets(release="3.0") expected_cols = ( "sample_id", "partner_sample_id", "contributor", "country", "location", "year", "month", "latitude", "longitude", "sex_call", "sample_set", "release", ) # all v3 df_samples_v3 = ag3.sample_metadata(sample_sets="3.0") assert tuple(df_samples_v3.columns[: len(expected_cols)]) == expected_cols expected_len = df_sample_sets_v3["sample_count"].sum() assert len(df_samples_v3) == expected_len # single sample set df_samples_x = ag3.sample_metadata(sample_sets="AG1000G-X") assert tuple(df_samples_x.columns[: len(expected_cols)]) == expected_cols expected_len = df_sample_sets_v3.query("sample_set == 'AG1000G-X'")[ "sample_count" ].sum() assert len(df_samples_x) == expected_len # multiple sample sets sample_sets = ["AG1000G-BF-A", "AG1000G-BF-B", "AG1000G-BF-C"] df_samples_bf = ag3.sample_metadata(sample_sets=sample_sets) assert tuple(df_samples_bf.columns[: len(expected_cols)]) == expected_cols loc_sample_sets = df_sample_sets_v3["sample_set"].isin(sample_sets) expected_len = df_sample_sets_v3.loc[loc_sample_sets]["sample_count"].sum() assert len(df_samples_bf) == expected_len # duplicate sample sets with pytest.raises(ValueError): ag3.sample_metadata(sample_sets=["3.0", "3.0"]) with pytest.raises(ValueError): ag3.sample_metadata(sample_sets=["AG1000G-UG", "AG1000G-UG"]) with pytest.raises(ValueError): ag3.sample_metadata(sample_sets=["AG1000G-UG", "3.0"]) # default is all public releases df_default = ag3.sample_metadata() df_all = ag3.sample_metadata(sample_sets=ag3.releases) assert_frame_equal(df_default, df_all) def test_sample_metadata_with_aim_species(): ag3 = setup_ag3(species_analysis="aim_20220528") expected_cols = ( "sample_id", "partner_sample_id", "contributor", "country", "location", "year", "month", "latitude", "longitude", "sex_call", "sample_set", "release", "aim_species_fraction_arab", "aim_species_fraction_colu", "aim_species_fraction_colu_no2l", "aim_species_gambcolu_arabiensis", "aim_species_gambiae_coluzzii", "aim_species", ) # AIM species calls, included by default df_samples_aim = ag3.sample_metadata(sample_sets="3.0") assert tuple(df_samples_aim.columns[: len(expected_cols)]) == expected_cols assert set(df_samples_aim["aim_species"].dropna()) == expected_species def test_sample_metadata_with_aim_species_legacy(): # TODO this is legacy, deprecate at some point ag3 = setup_ag3(species_analysis="aim_20200422") expected_cols = ( "sample_id", "partner_sample_id", "contributor", "country", "location", "year", "month", "latitude", "longitude", "sex_call", "sample_set", "release", "aim_species_fraction_colu", "aim_species_fraction_arab", "aim_species_gambcolu_arabiensis", "aim_species_gambiae_coluzzii", "aim_species", ) # AIM species calls, included by default df_samples_aim = ag3.sample_metadata(sample_sets="3.0") assert tuple(df_samples_aim.columns[: len(expected_cols)]) == expected_cols assert set(df_samples_aim["aim_species"].dropna()) == expected_species_legacy def test_sample_metadata_with_pca_species(): # TODO this is legacy, deprecate at some point ag3 = setup_ag3(species_analysis="pca_20200422") expected_cols = ( "sample_id", "partner_sample_id", "contributor", "country", "location", "year", "month", "latitude", "longitude", "sex_call", "sample_set", "release", "pca_species_pc1", "pca_species_pc2", "pca_species_gambcolu_arabiensis", "pca_species_gambiae_coluzzii", "pca_species", ) # PCA species calls df_samples_pca = ag3.sample_metadata(sample_sets="3.0") assert tuple(df_samples_pca.columns[: len(expected_cols)]) == expected_cols assert ( set(df_samples_pca["pca_species"].dropna()).difference(expected_species_legacy) == set() ) def test_sample_metadata_with_cohorts(): ag3 = setup_ag3() df_samples_coh = ag3.sample_metadata(sample_sets="3.0") for c in cohort_cols: assert c in df_samples_coh def test_sample_metadata_without_cohorts(): working_dir = os.path.dirname(os.path.abspath(__file__)) test_data_path = os.path.join(working_dir, "anopheles_test_data") ag3 = Ag3(test_data_path) df_samples_coh = ag3.sample_metadata(sample_sets="3.0") for c in cohort_cols: assert c in df_samples_coh assert df_samples_coh[c].isnull().all() @pytest.mark.parametrize( "sample_sets", [ "AG1000G-AO", "AG1000G-X", ["AG1000G-BF-A", "AG1000G-BF-B"], "3.0", None, ], ) @pytest.mark.parametrize("analysis", ["aim_20220528", "aim_20200422", "pca_20200422"]) def test_species_calls(sample_sets, analysis): ag3 = setup_ag3(species_analysis=analysis) df_samples = ag3.sample_metadata(sample_sets=sample_sets) df_species = ag3.species_calls(sample_sets=sample_sets) assert len(df_species) == len(df_samples) assert_array_equal(df_samples["sample_id"], df_species["sample_id"]) if analysis == "aim_20220528": assert ( set(df_species["aim_species"].dropna()).difference(expected_species) == set() ) if analysis == "aim_20200422": assert ( set(df_species["aim_species"].dropna()).difference(expected_species_legacy) == set() ) if analysis == "pca_20200422": assert ( set(df_species["pca_species"].dropna()).difference(expected_species_legacy) == set() ) @pytest.mark.parametrize("mask", ["gamb_colu_arab", "gamb_colu", "arab"]) def test_open_site_filters(mask): # check can open the zarr directly ag3 = setup_ag3() root = ag3.open_site_filters(mask=mask) assert isinstance(root, zarr.hierarchy.Group) for contig in ag3.contigs: assert contig in root @pytest.mark.parametrize("mask", ["gamb_colu_arab", "gamb_colu", "arab"]) @pytest.mark.parametrize( "region", ["2R", ["3R", "3L", "2R:48,714,463-48,715,355", "AGAP007280"]] ) def test_site_filters(mask, region): ag3 = setup_ag3() filter_pass = ag3.site_filters(region=region, mask=mask) assert isinstance(filter_pass, da.Array) assert filter_pass.ndim == 1 assert filter_pass.dtype == bool def test_open_snp_sites(): ag3 = setup_ag3() root = ag3.open_snp_sites() assert isinstance(root, zarr.hierarchy.Group) for contig in ag3.contigs: assert contig in root @pytest.mark.parametrize("chunks", ["auto", "native"]) @pytest.mark.parametrize( "region", ["2R", ["3R", "2R:48,714,463-48,715,355", "AGAP007280"]] ) def test_snp_sites(chunks, region): ag3 = setup_ag3() pos = ag3.snp_sites(region=region, field="POS", chunks=chunks) ref = ag3.snp_sites(region=region, field="REF", chunks=chunks) alt = ag3.snp_sites(region=region, field="ALT", chunks=chunks) assert isinstance(pos, da.Array) assert pos.ndim == 1 assert pos.dtype == "i4" assert isinstance(ref, da.Array) assert ref.ndim == 1 assert ref.dtype == "S1" assert isinstance(alt, da.Array) assert alt.ndim == 2 assert alt.dtype == "S1" assert pos.shape[0] == ref.shape[0] == alt.shape[0] # apply site mask filter_pass = ag3.site_filters(region=region, mask="gamb_colu_arab").compute() n_pass = np.count_nonzero(filter_pass) pos_pass = ag3.snp_sites( region=region, field="POS", site_mask="gamb_colu_arab", chunks=chunks ) assert isinstance(pos_pass, da.Array) assert pos_pass.ndim == 1 assert pos_pass.dtype == "i4" assert pos_pass.shape[0] == n_pass assert pos_pass.compute().shape == pos_pass.shape for f in "POS", "REF", "ALT": d = ag3.snp_sites( region=region, site_mask="gamb_colu_arab", field=f, chunks=chunks ) assert isinstance(d, da.Array) assert d.shape[0] == n_pass assert d.shape == d.compute().shape def test_open_snp_genotypes(): # check can open the zarr directly ag3 = setup_ag3() root = ag3.open_snp_genotypes(sample_set="AG1000G-AO") assert isinstance(root, zarr.hierarchy.Group) for contig in ag3.contigs: assert contig in root @pytest.mark.parametrize("chunks", ["auto", "native"]) @pytest.mark.parametrize( "sample_sets", [None, "AG1000G-X", ["AG1000G-BF-A", "AG1000G-BF-B"], "3.0"], ) @pytest.mark.parametrize( "region", ["2R", ["3R", "2R:48,714,463-48,715,355", "AGAP007280"]] ) def test_snp_genotypes(chunks, sample_sets, region): ag3 = setup_ag3() df_samples = ag3.sample_metadata(sample_sets=sample_sets) gt = ag3.snp_genotypes(region=region, sample_sets=sample_sets, chunks=chunks) assert isinstance(gt, da.Array) assert gt.ndim == 3 assert gt.dtype == "i1" assert gt.shape[1] == len(df_samples) # specific fields x = ag3.snp_genotypes( region=region, sample_sets=sample_sets, field="GT", chunks=chunks ) assert isinstance(x, da.Array) assert x.ndim == 3 assert x.dtype == "i1" x = ag3.snp_genotypes( region=region, sample_sets=sample_sets, field="GQ", chunks=chunks ) assert isinstance(x, da.Array) assert x.ndim == 2 assert x.dtype == "i2" x = ag3.snp_genotypes( region=region, sample_sets=sample_sets, field="MQ", chunks=chunks ) assert isinstance(x, da.Array) assert x.ndim == 2 assert x.dtype == "i2" x = ag3.snp_genotypes( region=region, sample_sets=sample_sets, field="AD", chunks=chunks ) assert isinstance(x, da.Array) assert x.ndim == 3 assert x.dtype == "i2" # site mask filter_pass = ag3.site_filters(region=region, mask="gamb_colu_arab").compute() gt_pass = ag3.snp_genotypes( region=region, sample_sets=sample_sets, site_mask="gamb_colu_arab", chunks=chunks, ) assert isinstance(gt_pass, da.Array) assert gt_pass.ndim == 3 assert gt_pass.dtype == "i1" assert gt_pass.shape[0] == np.count_nonzero(filter_pass) assert gt_pass.shape[1] == len(df_samples) assert gt_pass.shape[2] == 2 @pytest.mark.parametrize( "sample_sets", [None, "AG1000G-X", ["AG1000G-BF-A", "AG1000G-BF-B"], "3.0"], ) @pytest.mark.parametrize( "region", ["2R", ["3R", "2R:48,714,463-48,715,355", "AGAP007280"]] ) def test_snp_genotypes_chunks(sample_sets, region): ag3 = setup_ag3() gt_native = ag3.snp_genotypes( region=region, sample_sets=sample_sets, chunks="native" ) gt_auto = ag3.snp_genotypes(region=region, sample_sets=sample_sets, chunks="auto") gt_manual = ag3.snp_genotypes( region=region, sample_sets=sample_sets, chunks=(100_000, 10, 2) ) assert gt_native.chunks != gt_auto.chunks assert gt_auto.chunks != gt_manual.chunks assert gt_manual.chunks != gt_native.chunks assert gt_manual.chunks[0][0] == 100_000 assert gt_manual.chunks[1][0] == 10 assert gt_manual.chunks[2][0] == 2 def test_genome(): ag3 = setup_ag3() # test the open_genome() method to access as zarr genome = ag3.open_genome() assert isinstance(genome, zarr.hierarchy.Group) for contig in ag3.contigs: assert contig in genome assert genome[contig].dtype == "S1" # test the genome_sequence() method to access sequences for contig in ag3.contigs: seq = ag3.genome_sequence(contig) assert isinstance(seq, da.Array) assert seq.dtype == "S1" def test_geneset(): ag3 = setup_ag3() # default df = ag3.geneset() assert isinstance(df, pd.DataFrame) gff3_cols = [ "contig", "source", "type", "start", "end", "score", "strand", "phase", ] expected_cols = gff3_cols + ["ID", "Parent", "Name", "description"] assert df.columns.tolist() == expected_cols # don't unpack attributes df = ag3.geneset(attributes=None) assert isinstance(df, pd.DataFrame) expected_cols = gff3_cols + ["attributes"] assert df.columns.tolist() == expected_cols @pytest.mark.parametrize( "region", ["AGAP007280", "3R:28,000,000-29,000,000", "2R", "X", ["3R", "3L"]], ) def test_geneset_region(region): ag3 = setup_ag3() df = ag3.geneset(region=region) assert isinstance(df, pd.DataFrame) gff3_cols = [ "contig", "source", "type", "start", "end", "score", "strand", "phase", ] expected_cols = gff3_cols + ["ID", "Parent", "Name", "description"] assert df.columns.tolist() == expected_cols assert len(df) > 0 # check region region = ag3.resolve_region(region) if isinstance(region, Region): assert np.all(df["contig"].values == region.contig) if region.start and region.end: assert np.all(df.eval(f"start <= {region.end} and end >= {region.start}")) @pytest.mark.parametrize( "region", ["AGAP007280", "2R:48714463-48715355", "2R", "X"], ) @pytest.mark.parametrize("mask", ["gamb_colu_arab", "gamb_colu", "arab"]) def test_is_accessible(region, mask): ag3 = setup_ag3() # run a couple of tests is_accessible = ag3.is_accessible(region=region, site_mask=mask) assert isinstance(is_accessible, np.ndarray) assert is_accessible.ndim == 1 assert is_accessible.shape[0] == ag3.genome_sequence(region).shape[0] def test_cross_metadata(): ag3 = setup_ag3() df_crosses = ag3.cross_metadata() assert isinstance(df_crosses, pd.DataFrame) expected_cols = ["cross", "sample_id", "father_id", "mother_id", "sex", "role"] assert df_crosses.columns.tolist() == expected_cols # check samples are in AG1000G-X df_samples = ag3.sample_metadata(sample_sets="AG1000G-X") assert set(df_crosses["sample_id"]) == set(df_samples["sample_id"]) # check values expected_role_values = ["parent", "progeny"] assert df_crosses["role"].unique().tolist() == expected_role_values expected_sex_values = ["F", "M"] assert df_crosses["sex"].unique().tolist() == expected_sex_values def test_site_annotations(): ag3 = setup_ag3() # test access as zarr root =
<filename>plgx-esp-ui/polylogyx/blueprints/v1/hosts.py from flask_restplus import Namespace, Resource, inputs from polylogyx.blueprints.v1.utils import * from polylogyx.utils import assemble_configuration, assemble_additional_configuration from polylogyx.dao.v1 import hosts_dao, tags_dao, common_dao from polylogyx.wrappers.v1 import host_wrappers, parent_wrappers, config_wrappers from polylogyx.tasks import celery from polylogyx.authorize import admin_required ns = Namespace('hosts', description='nodes related operations') @ns.route('', endpoint='hosts_list') class HostsList(Resource): """ List all Nodes Filtered """ parser = requestparse(['status', 'platform', 'searchterm', 'start', 'limit', 'enabled', 'alerts_count'], [bool, str, str, int, int, inputs.boolean, inputs.boolean], ['status(true/false)', 'platform(windows/linux/darwin)', 'searchterm', 'start', 'limit', 'enabled(true/false)', 'alerts_count(true/false)'], [False, False, False, False, False, False, False], [None, ["windows", "linux", "darwin"], None, None, None, None, None], [None, None, "", None, None, True, True]) @ns.expect(parser) def post(self): args = self.parser.parse_args() query_set = hosts_dao.get_hosts_paginated(args['status'], args['platform'], args['searchterm'], args['enabled'], args['alerts_count']).offset(args['start']).limit(args['limit']).all() total_count = hosts_dao.get_hosts_total_count(args['status'], args['platform'], args['enabled']) if query_set: results = [] for node_alert_count_pair in query_set: if args['alerts_count']: node_dict = node_alert_count_pair[0].get_dict() node_dict['alerts_count'] = node_alert_count_pair[1] else: node_dict = node_alert_count_pair.get_node_dict() results.append(node_dict) data = {'results': results, 'count': hosts_dao.get_hosts_paginated(args['status'], args['platform'], args['searchterm'], args['enabled'], args['alerts_count']).count(), 'total_count': total_count} else: data = {'results': [], 'count': 0, 'total_count': total_count} status = "success" message = "Successfully fetched the hosts details" return marshal(prepare_response(message, status, data), parent_wrappers.common_response_wrapper, skip_none=True) @ns.route('/export') class NodesCSV(Resource): """ Returns a csv file object with nodes info as data """ def get(self): from sqlalchemy import desc, and_, or_ record_query = db.session.query(Node, db.func.count(Alerts.id))\ .filter(and_(Node.state != Node.REMOVED, Node.state != Node.DELETED))\ .outerjoin(Alerts, and_(Alerts.node_id == Node.id, or_(Alerts.status == None, Alerts.status != Alerts.RESOLVED)))\ .group_by(Node.id).order_by(desc(Node.id)).all() results = [] for node, alerts_count in record_query: res = {} res['ID'] = node.id res['Host Name'] = node.display_name res['Host Identifier'] = node.host_identifier if node.node_is_active(): res['State'] = "online" else: res['State'] = "offline" if alerts_count: res['Health'] = 'Unsafe' else: res['Health'] = 'Safe' res['Alerts Count'] = alerts_count if node.os_info: res['Operating System'] = node.os_info['name'] else: res['Operating System'] = node.platform res['Last IP'] = node.last_ip res['Tags'] = [tag.to_dict() for tag in node.tags] res['Platform'] = node.platform results.append(res) first_record = results[0] headers = [] for key in first_record.keys(): headers.append(key) bio = BytesIO() writer = csv.writer(bio) writer.writerow(headers) for data in results: row = [] row.extend([data.get(column, '') for column in headers]) writer.writerow(row) bio.seek(0) file_data = send_file( bio, mimetype='text/csv', as_attachment=True, attachment_filename='nodes.csv' ) return file_data @ns.route('/<string:host_identifier>', endpoint='node_details') @ns.route('/<int:node_id>', endpoint='node_details_by_id') class NodeDetailsList(Resource): """ List a Node Details """ def get(self, host_identifier=None, node_id=None): data = None if node_id: queryset = hosts_dao.get_node_by_id(node_id) elif host_identifier: queryset = hosts_dao.get_node_by_host_identifier(host_identifier) else: queryset = None db.session.commit() if not queryset: message = "There is no host exists with this host identifier or node id given!" status = "failure" else: data = marshal(queryset, host_wrappers.nodewrapper) if not data: data = {} message = "Node details are fetched successfully" status = "success" return marshal(prepare_response(message, status, data), parent_wrappers.common_response_wrapper, skip_none=True) @ns.route('/<string:host_identifier>/alerts/distribution', endpoint='host_alerts_count_for_host_identifier') @ns.route('/<int:node_id>/alerts/distribution', endpoint='host_alerts_count_for_node_id') class HostAlertsDistribution(Resource): """ List a Node Details """ def get(self, host_identifier=None, node_id=None): if node_id: node = hosts_dao.get_node_by_id(node_id) elif host_identifier: node = hosts_dao.get_node_by_host_identifier(host_identifier) else: node = None if not node: data = None message = "There is no host exists with this host identifier or node id given!" status = "failure" else: data = {} data['sources'] = hosts_dao.host_alerts_distribution_by_source(node) data['rules'] = [{"name": rule_count_pair[0], "count": rule_count_pair[1]} for rule_count_pair in hosts_dao.host_alerts_distribution_by_rule(node)] message = "Alerts distribution details are fetched for the host" status = "success" return marshal(prepare_response(message, status, data), parent_wrappers.common_response_wrapper, skip_none=True) @ns.route('/count', endpoint='nodes_related_count') class NodeCountList(Resource): """ Lists all Nodes Filtered count """ def get(self): data = hosts_dao.get_hosts_filtered_status_platform_count() return marshal(prepare_response("Successfully fetched the nodes status count", 'success', data), parent_wrappers.common_response_wrapper, skip_none=True) @ns.route('/status_logs', endpoint='node_status_logs') class HostStatusLogs(Resource): """ Host Status Logs """ parser = requestparse(['host_identifier', 'node_id', 'start', 'limit', 'searchterm'], [str, int, int, int, str], ["host identifier of the node", "id of the node", 'start', 'limit', 'searchterm'], [False, False, False, False, False], [None, None, None, None, None], [None, None, None, None, '']) @ns.expect(parser) def post(self): args = self.parser.parse_args() data = None status = "failure" if args['node_id'] is not None or args['host_identifier'] is not None: if args['host_identifier'] is not None: qs = hosts_dao.get_node_by_host_identifier(args['host_identifier']) else: node_id = args['node_id'] qs = hosts_dao.get_node_by_id(node_id) if qs: data = {'results': marshal(hosts_dao.get_status_logs_of_a_node(qs, args['searchterm']) .offset(args['start']).limit(args['limit']).all(), host_wrappers.node_status_log_wrapper), 'count': hosts_dao.get_status_logs_of_a_node(qs, args['searchterm']).count(), 'total_count': hosts_dao.get_status_logs_total_count(qs)} message = "Successfully fetched the host's status logs" status = "success" else: message = "Host identifier or node id passed is not correct!" else: message = "Please pass one of node id or host identifier!" return marshal(prepare_response(message, status, data), parent_wrappers.common_response_wrapper, skip_none=True) @ns.route('/additional_config', endpoint='node_additional_config') class HostAdditionalConfig(Resource): """ Additional Config of a Node """ parser = requestparse(['host_identifier', 'node_id'], [str, int], ["host identifier of the node", "id of the node"], [False, False]) @ns.expect(parser) def post(self): args = self.parser.parse_args() config = None status = "failure" if args['node_id'] is not None or args['host_identifier'] is not None: if args['host_identifier'] is not None: node = hosts_dao.get_node_by_host_identifier(args['host_identifier']) else: node_id = args['node_id'] node = hosts_dao.get_node_by_id(node_id) if node: config = assemble_additional_configuration(node) current_app.logger.debug("Additional config of Node '{0}' is:\n{1}".format(node, config)) status = "success" message = "Successfully fetched additional config of the node for the host identifier passed" else: message = "Host identifier or node id passed is not correct!" else: message = "At least one of host identifier or node id should be given!" return marshal(prepare_response(message, status, config), parent_wrappers.common_response_wrapper, skip_none=True) @ns.route('/config', endpoint='node_full_config') class HostFullConfig(Resource): """ Full Config of a Node """ parser = requestparse(['host_identifier', 'node_id'], [str, int], ["host identifier of the node", "id of the node"], [False, False]) @ns.expect(parser) def post(self): args = self.parser.parse_args() config = None config_details = None status = "failure" if args['node_id'] is not None or args['host_identifier'] is not None: if args['host_identifier'] is not None: node = hosts_dao.get_node_by_host_identifier(args['host_identifier']) else: node_id = args['node_id'] node = hosts_dao.get_node_by_id(node_id) if node: config = assemble_configuration(node) config_details = config[1] config = config[0] current_app.logger.debug("Full config of Node '{0}' is:\n{1}".format(node, config_details)) status = "success" message = "Successfully fetched full config of the node for the host identifier passed" else: message = "Host identifier or node id passed is not correct!" else: message = "At least one of host identifier or node id should be given!" return marshal({'status': status, 'message': message, 'data': config, 'config': config_details}, config_wrappers.node_config, skip_none=True) @ns.route('/recent_activity/count', endpoint='node_recent_activity_count') class RecentActivityCount(Resource): """ Recent Activity count of a Node """ parser = requestparse(['host_identifier', 'node_id'], [str, int], ["host identifier of the node", "id of the node"], [False, False]) @ns.expect(parser) def post(self): args = self.parser.parse_args() status = "failure" data = None if args['node_id'] is not None or args['host_identifier'] is not None: if args['host_identifier'] is not None: node = hosts_dao.get_node_by_host_identifier(args['host_identifier']) if node: node_id = node.id else: node_id = None else: node_id = args['node_id'] if not node_id: message = "Please pass correct host identifier or node id to get the results" else: data = [{'name': query[0], 'count': query[1]} for query in hosts_dao.get_result_log_count(node_id)] status = "success" message = "Successfully fetched the count of schedule query results count of host identifier passed" else: message = "At least one of host identifier or node id should be given!" return marshal(prepare_response(message, status, data), parent_wrappers.common_response_wrapper, skip_none=True) @ns.route('/recent_activity', endpoint='node_recent_activity_results') class RecentActivityResults(Resource): """ Recent Activity results of a query of a Node """ parser = requestparse(['host_identifier', 'node_id', 'query_name', 'start', 'limit', 'searchterm', 'column_name', 'column_value'], [str, int, str, int, int, str, str, str], ["host identifier of the node", "node_id", "query", "start id", "limit", "searchterm", "column_name", "column_value"], [False, False, True, False, False, False, False, False], [None, None, None, None, None, None, None, None], [None, None, None, 0, 10, "", None, None]) @ns.expect(parser) def post(self): args = self.parser.parse_args() status = "failure" data = {} if args['column_value']: column_values = tuple([x.strip() for x in str(args['column_value']).split(',') if x]) else: column_values = None if args['node_id'] is not None or args['host_identifier'] is not None: if args['host_identifier'] is not None: node = hosts_dao.get_node_by_host_identifier(args['host_identifier']) if node: node_id = node.id else: node_id = None else: node_id = args['node_id'] if not node_id: message = "Please pass correct host identifier or node id to get the results" else: try: qs = hosts_dao.get_result_log_of_a_query_opt(node_id, args['query_name'], args['start'], args['limit'], args['searchterm'], args['column_name'], column_values) data = {'count': qs[0], 'total_count': qs[2], 'categorized_count': qs[3], 'results': [ {'id': list_ele[0], 'timestamp': list_ele[1].strftime('%m/%d/%Y %H/%M/%S'), 'action': list_ele[2], 'columns': list_ele[3]} for list_ele in qs[1]]} status = "success" message = "Successfully fetched the count of schedule query results count of host identifier passed" except Exception as e: message = "Unable to fetch scheduled query results for the node '<Node {0}>' " \
# -- Imports ------------------------------------------------------------------ # base import json import gc # third party import fastText as ft import pandas as pd # project from lib.data import Loader from lib.clustering import Clusterer, ClusterConstructor from lib.labelling import EditDistance, WordGram, CharGram, Hypernyms from lib.utils import Gatekeeper, KNN # -- Load Configs ------------------------------------------------------------- class Optimus: def __init__(self, config_path='', **kw): """ Constructor for the Optimus pipeline object This initialised the main access point to the Optimus pipeline. Note: ----- The constructor accepts keyword arguments that will overwrite the default settings of Optimus. For example distance=2 will overwrite the distance parameter for the object. Example ---------- o = Optimus(config_path='here/lies/my/config.json', distance = 2, cutoff = 10, stepsize = 3, ... ) Parameters ---------- config_path : str a path to a custom config file created by the user. It has to be a a .json file following the structure of the default config.json file found in the etc folder Returns ------- Optimus object """ self.kw = kw # perform a check and raise a warning to the user if they haven't # provided a config file path. if not config_path: print( """ WARNING: A path to the config.json file is not specified in the config_path parameter of the Optimus object. The default file located in the 'etc' folder will be used and may lead to undexpected results and issues. """) self.config_path = config_path def load_config(self, path='', default_config='./etc/config.json'): """ Load the provided config file into memory and assign it to the Optimus object as the config attribute. If no path is provided upon construction of the Optimus object, this method will default to the './etc/config.json' file for the settings. It will also pick up any keyword arguments passed to the __init__ function. Parameters ---------- path : str a path string to the config file. under normal circumstances this will be picked up from the __init__ function default_config : str a default config path to use for base configs. (default='./etc/config.json') Returns ------- dict a config dictionary containing all the required configurations for the rest of the pipeline """ # load the default configs try: config = json.load(open(default_config)) except FileNotFoundError: raise FileNotFoundError( f'Default configs failed to load from {default_config}') # update config with user config from file if path: try: user_config = json.load(open(path)) except FileNotFoundError: print( '-- WARNING: User config file failed to load.\n', '-- WARNING: Please check location provided') else: for key, value in user_config.items(): config[key] = value else: self.vprint("-- Custom user config were not found.") self.vprint("-- Using defaults as base configs.") # update config with user configs from kwargs if self.kw: self.vprint('-- Following valid configs were passed manually:') for key, value in self.kw.items(): if key in config.keys(): self.vprint(f' - {key}: {value}') config[key] = value return config def catch_input(self, input): """ Catch the input provided by the user and ensure it is a pandas Series object. If this is the case then reformat it into a list of form [[str]] Parameters ---------- input : pandas.core.series.Series the input column that the function will check and transform Returns ------- list a list of the form [[str]] """ # check if the type is correct if type(input) == pd.core.series.Series: return [[str(w)] for w in input] # process it into the right shape else: if hasattr(input, "__iter__"): # check if input is iterable raise ValueError( 'Non pandas Series iterables are not supported') else: return None def handle_output(self, prowl, save_csv=False, full=False, out_col=-1): """ The function that handles how the output of the pipeline is presented to the user. If the user does not set the full parameter to True, only the last column from prowl will be returned. Parameters ---------- prowl : pandas.core.frame.DataFrame a pandas dataframe that is constructed during the runing of the pipeline save_csv : bool if true will save the full prowl to a csv file (default=False) full : bool if true will return the whole of prowl to the user, otherwise only the last column of prowl (the last depth iteration results) will be returned to the user (default=False) Returns ------- pd.core.series.Series / pd.core.frame.DataFrame full or partial results of the pipeline """ # return the encoded words to their original stat if self.config['decode_output']: def decoder(s): """Decode all the encoded trouble words in the output""" s = str(s) # ensuring that we don't get errors if we get a string d = self.config['trouble'] # the reason its sorted is because if a label contains HZ and # there is a H and a HZ to decode the H could get decoded first # leaving just capital Zs for key in sorted(d, key=lambda k: len(d[k]), reverse=True): s = s.replace(d[key], key) return s prowl.iloc[:,1:] = prowl.iloc[:,1:].applymap(decoder) if save_csv: prowl.to_csv('optimus_results.csv', header=True, index=False) if full: return prowl else: return prowl.iloc[:, out_col] def vprint(self, string): """ A very simple function which will either print or not depending on the verbosity setting of the Optimus object Parameters ---------- string : str a string to print if self.verbose == True Returns ------- None """ if self.verbose: print(string) def __call__(self, data=None, save_csv=False, full=False, verbose=True, runKNN=False): """ By calling this function the user will start the processing of the pipeline. If no data is provided to this function under the data parameter it will take the path provided in the config['data'] entry, load it and use it. It is useful as a fallback option, however it is expected that as part of the integration of Optimus into a pipeline, some data will be passed to this function call. Parameters ---------- data : pd.core.series.Series a series object containing the strings that need to be processed (default=None) save_csv : bool this dictates if the full prowl will be saved as a csv (default=False) full : bool this dictates if the data returned to the user in the form of a full dataframe or just a series of predicted labels (default=False) verbose : bool this parameter dictates how much will be printed. if false only a few lines will be output. (default=True) runKNN : bool this parameter dictates if the K Nearest Neighbour algorythm will be applied to the labels that are not picked up in the normal run of optimus Returns ------- pd.core.series.Series / pd.core.frame.DataFrame depending on the full setting this will return the output of the last depth or a full dataframe with outputs from each iteration """ # set the verbosity setting self.verbose = verbose # notes self.vprint('-- Performing setup') self.vprint('_' * 79) # load config before each run self.config = self.load_config(self.config_path) # reformat provided series into accepted format data = self.catch_input(data) # build looping mechanism, adding 1 to the depth of # ratchet and changing the dataset passing through the classes # free text loading self.vprint("-- Loading descriptions") if data: self.vprint("-- Ingesting provided series") L = Loader(self.config, data) else: self.vprint("-- No custom data provided, using data from config") L = Loader(self.config) # start a dataframe that will track the labels at each level prowl = pd.DataFrame.from_dict(L.linked, orient='index') prowl = prowl.reset_index() prowl.columns = ['original', 'current_labels'] # embed the words using fastText if hasattr(self, "matrix"): self.vprint("-- Model already loaded") else: self.vprint("-- Loading model") self.matrix = ft.load_model(self.config['model']) self.vprint("-- Embedding") clusterer = Clusterer(L, self.matrix, self.config) # clustering self.vprint("-- Clustering") CC = ClusterConstructor(clusterer, self.config) # start the loop for each depth self.vprint('_' * 79) # some decoration while CC.iterate: self.vprint(f"-- Depth: {CC.distance}") # some decoration self.vprint('_' * 79) # some decoration # edit distance based metrics ED = EditDistance(CC, self.config) # push the rejected clusters back to the ClusterConstructor # for the next phase CC.clusters = ED.rejected self.vprint( f" ** | Edit Distance | classified: {len(ED.accepted)}") # class for character and word n-gram and scoring WG = WordGram(CC, self.config) # push the rejected clusters back to CC for the next phase CC.clusters = WG.rejected self.vprint( f" ** | Word Grams | classified: {len(WG.accepted)}") # class for character and word n-gram and scoring CG = CharGram(CC, self.config) # push
Reason] 1) Result - FAIL if there is any exception in the operation or pool state does not change to expected state in given time else PASS 2) Reason - Reason for failure""" return validate_state(api_client, self, state, timeout, interval) @staticmethod def state_check_function(objects, state): return str(objects[0].state).lower().decode("string_escape") == str(state).lower() class Network(BaseAbstract): """Manage Network pools""" def __init__(self, items): super(Network, self).__init__(items) @classmethod def create(cls, api_client, services=None, accountid=None, domainid=None, networkofferingid=None, projectid=None, subdomainaccess=None, zoneid=None, gateway=None, netmask=None, cidr=None, vpcid=None, aclid=None, vlan=None, ipexclusionlist=None, domain=None, account=None, vpc=None, zone=None, acl=None, data=None): """Create Network for account""" if data: services = data cmd = {'name': services["name"], 'displaytext': services["displaytext"]} if networkofferingid: cmd['networkofferingid'] = networkofferingid elif "networkoffering" in services: cmd['networkofferingid'] = services["networkoffering"] elif "networkofferingname" in services: networkoffering = get_network_offering(api_client, services["networkofferingname"]) cmd['networkofferingid'] = networkoffering.id if zoneid: cmd['zoneid'] = zoneid elif "zoneid" in services: cmd['zoneid'] = services["zoneid"] elif zone: cmd['zoneid'] = zone.id if ipexclusionlist: cmd['ipexclusionlist'] = ipexclusionlist elif "ipexclusionlist" in services: cmd['ipexclusionlist'] = services["ipexclusionlist"] if subdomainaccess is not None: cmd['subdomainaccess'] = subdomainaccess if gateway: cmd['gateway'] = gateway elif "gateway" in services: cmd['gateway'] = services["gateway"] if netmask: cmd['netmask'] = netmask elif "netmask" in services: cmd['netmask'] = services["netmask"] if cidr: cmd['cidr'] = cidr elif "cidr" in services: cmd['cidr'] = services["cidr"] if "startip" in services: cmd['startip'] = services["startip"] if "endip" in services: cmd['endip'] = services["endip"] if vlan: cmd['vlan'] = vlan elif "vlan" in services: cmd['vlan'] = services["vlan"] if "acltype" in services: cmd['acltype'] = services["acltype"] if accountid: cmd['account'] = accountid elif account: cmd['account'] = account.name elif vpc: cmd['account'] = vpc.account elif account: cmd['account'] = account.name if domainid: cmd['domainid'] = domainid elif domain: cmd['domainid'] = domain.id elif vpc: cmd['domainid'] = vpc.domainid elif account: cmd['domainid'] = account.domainid if projectid: cmd['projectid'] = projectid if vpcid: cmd['vpcid'] = vpcid elif vpc: cmd['vpcid'] = vpc.id if aclid: cmd['aclid'] = aclid elif acl: cmd['aclid'] = acl.id elif "aclname" in services: acl = get_network_acl(api_client, services['aclname']) cmd['aclid'] = acl.id network_resp = api_client.createNetwork(**cmd) return Network(network_resp['network']) def delete(self, api_client): """Delete Account""" cmd = {'id': self.id, 'fetch_result': True} api_client.deleteNetwork(**cmd) def update(self, api_client, **kwargs): """Updates network with parameters passed""" cmd = {'id': self.id} cmd.update(kwargs) return api_client.updateNetwork(**cmd) def restart(self, api_client, cleanup=None): """Restarts the network""" cmd = {'id': self.id} if cleanup: cmd['cleanup'] = cleanup return api_client.restartNetwork(**cmd) @classmethod def list(cls, api_client, **kwargs): """List all Networks matching criteria""" cmd = {} cmd.update(kwargs) if 'account' in kwargs.keys() and 'domainid' in kwargs.keys(): cmd['listall'] = True return api_client.listNetworks(**cmd) class NetworkACL(BaseAbstract): """Manage Network ACL lifecycle""" def __init__(self, items): super(NetworkACL, self).__init__(items) @classmethod def create(cls, api_client, services=None, networkid=None, protocol=None, number=None, aclid=None, action='Allow', traffictype=None, cidrlist=None, acl=None, data=None): """Create network ACL rules(Ingress/Egress)""" if data: services = data if cidrlist is None: cidrlist = [] cmd = {} if "networkid" in services: cmd['networkid'] = services["networkid"] elif networkid: cmd['networkid'] = networkid if "protocol" in services: cmd['protocol'] = services["protocol"] if services["protocol"] == 'ICMP': cmd['icmptype'] = -1 cmd['icmpcode'] = -1 elif protocol: cmd['protocol'] = protocol if "startport" in services: cmd['startport'] = services["startport"] if "endport" in services: cmd['endport'] = services["endport"] if "cidrlist" in services: cmd['cidrlist'] = services["cidrlist"] elif cidrlist: cmd['cidrlist'] = cidrlist if "traffictype" in services: cmd['traffictype'] = services["traffictype"] elif traffictype: cmd['traffictype'] = traffictype if "action" in services: cmd['action'] = services["action"] elif action: cmd['action'] = action if "number" in services: cmd['number'] = services["number"] elif number: cmd['number'] = number if "aclid" in services: cmd['aclid'] = services["aclid"] elif aclid: cmd['aclid'] = aclid elif acl: cmd['aclid'] = acl.id # Defaulted to Ingress nwacl_resp = NetworkACL(api_client.createNetworkACL(**cmd)) waitforjob(api_client, nwacl_resp.jobid) return cls.list(api_client, id=nwacl_resp.id)[0] def delete(self, api_client): """Delete network acl""" cmd = {'id': self.id} return api_client.deleteNetworkACL(**cmd) @classmethod def list(cls, api_client, **kwargs): """List Network ACLs""" cmd = {} cmd.update(kwargs) if 'account' in kwargs.keys() and 'domainid' in kwargs.keys(): cmd['listall'] = True return super(NetworkACL, cls).list(api_client.listNetworkACLs(**cmd)['networkacl']) class NetworkACLList(BaseAbstract): """Manage Network ACL lists lifecycle""" def __init__(self, items): super(NetworkACLList, self).__init__(items) @classmethod def create( cls, api_client, services=None, name=None, description=None, vpcid=None, vpc=None, data=None): """Create network ACL container list""" if data: services = data cmd = {} if "name" in services: cmd['name'] = services["name"] elif name: cmd['name'] = name if "description" in services: cmd['description'] = services["description"] elif description: cmd['description'] = description if "vpcid" in services: cmd['vpcid'] = services["vpcid"] elif vpcid: cmd['vpcid'] = vpcid elif vpc: cmd['vpcid'] = vpc.id response = NetworkACLList(api_client.createNetworkACLList(**cmd)) waitforjob(api_client, response.jobid) return cls.list(api_client, id=response.id)[0] def delete(self, api_client): """Delete network acl list""" cmd = {'id': self.id} return api_client.deleteNetworkACLList(**cmd) @classmethod def list(cls, api_client, **kwargs): """List Network ACL lists""" cmd = {} cmd.update(kwargs) if 'account' in kwargs.keys() and 'domainid' in kwargs.keys(): cmd['listall'] = True return super(NetworkACLList, cls).list(api_client.listNetworkACLLists(**cmd)['networkacllist']) def attach(self, api_client, network=None): cmd = {'aclid': self.id} if network: cmd['networkid'] = network.id return api_client.replaceNetworkACLList(**cmd) class Vpn(BaseAbstract): """Manage VPN life cycle""" def __init__(self, items): super(Vpn, self).__init__(items) @classmethod def create(cls, api_client, publicipid, account=None, domainid=None, projectid=None, networkid=None, vpcid=None, openfirewall=None, iprange=None, fordisplay=False): """Create VPN for Public IP address""" cmd = {'publicipid': publicipid} if account: cmd['account'] = account if domainid: cmd['domainid'] = domainid if projectid: cmd['projectid'] = projectid if networkid: cmd['networkid'] = networkid if vpcid: cmd['vpcid'] = vpcid if iprange: cmd['iprange'] = iprange if openfirewall: cmd['openfirewall'] = openfirewall cmd['fordisplay'] = fordisplay cmd['fetch_result'] = True return Vpn(api_client.createRemoteAccessVpn(**cmd).get('remoteaccessvpn')) @classmethod def createVpnGateway(cls, api_client, vpcid=None, vpc=None): """Create VPN Gateway """ cmd = {} if vpcid: cmd['vpcid'] = vpcid elif vpc: cmd['vpcid'] = vpc.id cmd['fetch_result'] = True return Vpn(api_client.createVpnGateway(**cmd).get('vpngateway')) @classmethod def createVpnConnection(cls, api_client, s2scustomergatewayid, s2svpngatewayid, passive=False): """Create VPN Connection """ cmd = {'s2scustomergatewayid': s2scustomergatewayid, 's2svpngatewayid': s2svpngatewayid, 'fetch_result': True} if passive: cmd['passive'] = passive return api_client.createVpnConnection(**cmd).get('vpnconnection') @classmethod def resetVpnConnection(cls, api_client, id): """Reset VPN Connection """ cmd = {'id': id} cmd['fetch_result'] = True return api_client.resetVpnConnection(**cmd) @classmethod def deleteVpnConnection(cls, api_client, id): """Delete VPN Connection """ cmd = {'id': id} cmd['fetch_result'] = True return api_client.deleteVpnConnection(**cmd) @classmethod def listVpnGateway(cls, api_client, **kwargs): """List all VPN Gateways matching criteria""" cmd = {} cmd.update(kwargs) return super(Vpn, cls).list(api_client.listVpnGateways(**cmd)) @classmethod def listVpnConnection(cls, api_client, **kwargs): """List all VPN Connections matching criteria""" cmd = {} cmd.update(kwargs) return super(Vpn, cls).list(api_client.listVpnConnections(**cmd).get('vpnconnection', [])) def delete(self, api_client): """Delete remote VPN access""" cmd = {'publicipid': self.publicipid} api_client.deleteRemoteAccessVpn(**cmd) @classmethod def list(cls, api_client, **kwargs): """List all VPN matching criteria""" cmd = {} cmd.update(kwargs) if 'account' in kwargs.keys() and 'domainid' in kwargs.keys(): cmd['listall'] = True return super(Vpn, cls).list(api_client.listRemoteAccessVpns(**cmd).get('remoteaccessvpn')) class VpnUser(BaseAbstract): """Manage VPN user""" def __init__(self, items): super(VpnUser, self).__init__(items) @classmethod def create(cls, api_client, username, password, account=None, domainid=None, projectid=None, rand_name=True): """Create VPN user""" cmd = {'username': "-".join([username, random_gen()]) if rand_name else username, 'password': password} if account: cmd['account'] = account if domainid: cmd['domainid'] = domainid if projectid: cmd['projectid'] = projectid cmd['fetch_result'] = True return VpnUser(api_client.addVpnUser(**cmd).get('vpnuser')) def delete(self, api_client, projectid=None): """Remove VPN user""" cmd = {'username': self.username} if projectid: cmd['projectid'] = projectid else: cmd['account'] = self.account cmd['domainid'] = self.domainid api_client.removeVpnUser(**cmd) @classmethod def list(cls, api_client, **kwargs): """List all VPN Users matching criteria""" cmd = {} cmd.update(kwargs) if 'account' in kwargs.keys() and 'domainid' in kwargs.keys(): cmd['listall'] = True return super(VpnUser, cls).list(api_client.listVpnUsers(**cmd).get('vpnuser')) class Zone(BaseAbstract): """Manage Zone""" def __init__(self, items): super(Zone, self).__init__(items) @classmethod def create(cls, api_client, services, domainid=None): """Create zone""" cmd = {'dns1': services["dns1"], 'internaldns1': services["internaldns1"], 'name': services["name"], 'networktype': services["networktype"]} if "dns2" in services: cmd['dns2'] = services["dns2"] if "internaldns2" in services: cmd['internaldns2'] = services["internaldns2"] if domainid: cmd['domainid'] = domainid return Zone(api_client.createZone(**cmd).get('zone')) def delete(self, api_client): """Delete Zone""" cmd = {'id': self.id} api_client.deleteZone(**cmd) def update(self, api_client, **kwargs): """Update the zone""" cmd = {'id': self.id} cmd.update(kwargs) return api_client.updateZone(**cmd) @classmethod def list(cls, api_client, **kwargs): """List all Zones matching criteria""" cmd = {} cmd.update(kwargs) if 'account' in kwargs.keys() and 'domainid' in kwargs.keys(): cmd['listall'] = True return super(Zone, cls).list(api_client.listZones(**cmd).get('zone')) class Pod: """Manage Pod""" def __init__(self, items): self.__dict__.update(items) @classmethod def create(cls, api_client, services): """Create Pod""" cmd = {'gateway': services["gateway"], 'netmask': services["netmask"], 'name': services["name"], 'startip': services["startip"], 'endip': services["endip"], 'zoneid': services["zoneid"]} return Pod(api_client.createPod(**cmd)) def delete(self, api_client): """Delete Pod""" cmd = {'id': self.id} api_client.deletePod(**cmd) @classmethod def list(cls, api_client, **kwargs): """Returns a default pod for specified zone""" cmd = {} cmd.update(kwargs) if 'account' in kwargs.keys() and 'domainid' in kwargs.keys(): cmd['listall'] = True return api_client.listPods(**cmd) @classmethod def update(cls, api_client, **kwargs): """Update the pod""" cmd = {} cmd.update(kwargs) return api_client.updatePod(**cmd) class PublicIpRange: """Manage VlanIpRange""" def __init__(self, items): self.__dict__.update(items) @classmethod def create(cls, api_client, services, account=None, domainid=None): """Create VlanIpRange""" cmd = {'gateway': services["gateway"], 'netmask': services["netmask"], 'forvirtualnetwork': services["forvirtualnetwork"], 'startip': services["startip"],
is ok except Exception as e: logger.warning('removing invalid xml_cachefile %s : %s' % (xml_cachefile, str(e))) os.unlink(xml_cachefile) root = None if root is not None: cache_status = True else: # request not cached try: xmlout = requests.get(urlapi, auth=(user, password), params=params) except: raise_from(ConnectionError("Unable to connect to %s" % urlapi), None) try: root = remove_dom(etree.fromstring(xmlout.content)) except Exception as e: content = nice_string(xmlout.content) if 'Timeout occured while waiting response from server' in content: retry -= 1 logger.warning('Timeout while processing request : %s' % str_query) logger.warning('left retry : %s' % retry) if retry == 0: warnings.warn('Giving up trying to connect %s ' % urlapi, ScihubError) break continue logger.critical("Error while parsing xml answer") logger.critical("query was: %s" % str_query) logger.critical("answer is: \n {}".format(content)) warnings.warn('Schihub query error %s ' % urlapi, ScihubError) if xml_cachefile is not None: try: int(root.find(".//totalResults").text) # this should enought to test the xml is ok try: with open(xml_cachefile, 'w') as f: f.write(nice_string(root)) except Exception as e: logger.warning('unable to write xml_cachefile %s : %s' % (xml_cachefile, str(e))) except: logger.warning('not writing corrupted xml cachefile') # <opensearch:totalResults>442</opensearch:totalResults>\n try: count = int(root.find(".//totalResults").text) except: # there was an error in request logger.error('response was:\n {}'.format(nice_string(root))) if xml_cachefile is not None and os.path.exists(xml_cachefile): os.unlink(xml_cachefile) warnings.warn('invalid request %s ' % str_query, ScihubError) break # reset retry since last request is ok retry = retry_init # logger.debug("totalResults : %s" % root.find(".//totalResults").text ) logger.debug("%s" % root.find(".//subtitle").text) # logger.debug("got %d entry starting at %d" % (len(root.findall(".//entry")),start)) if len(root.findall(".//entry")) > 0: chunk_safes_df = pd.DataFrame(columns=answer_fields) t = time.time() for field in answer_fields: if field.startswith('url'): if field == 'url': elts = [d for d in root.xpath(".//entry/link") if 'rel' not in d.attrib] else: rel = field.split('_')[1] elts = root.xpath(".//entry/link[@rel='%s']" % rel) tag = 'str' values = [d.attrib['href'] for d in elts] else: elts = root.xpath(".//entry/*[@name='%s']" % field) if len(elts) != 0: tag = elts[0].tag # ie str,int,date .. values = [d.text for d in elts] else: tag = None values = [] logger.debug("Ignoring field %s (not found)." % field) if len(values) >= 1: chunk_safes_df[field] = values if tag in decode_tags: chunk_safes_df[field] = chunk_safes_df[field].apply(decode_tags[tag]) try: shp_footprints = chunk_safes_df['footprint'].apply(wkt.loads) except: pass chunk_safes_df['footprint'] = shp_footprints chunk_safes = gpd.GeoDataFrame(chunk_safes_df, geometry='footprint', crs=scihub_crs) chunk_safes['footprint'] = chunk_safes.buffer(0) start += len(chunk_safes) logger.debug("xml parsed in %.2f secs" % (time.time() - t)) # remove cachefile if some safes are recents if xml_cachefile is not None and os.path.exists(xml_cachefile): dateage = (datetime.datetime.utcnow().replace(tzinfo=pytz.UTC) - chunk_safes[ 'beginposition'].max()) # used for cache age if dateage < cacherefreshrecent: logger.debug("To recent answer. Removing cachefile %s" % xml_cachefile) os.unlink(xml_cachefile) # sort by sensing date safes = safes.append(chunk_safes, ignore_index=True, sort=False) safes = safes.sort_values('beginposition') safes.reset_index(drop=True, inplace=True) safes = safes.set_geometry('footprint') # safes['footprint'] = gpd.GeoSeries(safes['footprint']) safes.crs = scihub_crs if return_cache_status: return safes, cache_status else: return safes def _colocalize(safes, gdf, crs=scihub_crs, coloc=[geopandas_coloc.colocalize_loop], progress=False): """colocalize safes and gdf if crs is default and 'geometry_east' and 'geometry_west' exists in gdf, they will be used instead of .geometry (scihub mode) if crs is not default the crs will be used on .geometry for the coloc. the returned safes will be returned in scihub crs (ie 4326 : not the user specified) """ # initialise an empty index for both gdf idx_safes = safes.index.delete(slice(None)) idx_gdf = gdf.index.delete(slice(None)) logger.info('========= safes : %s',safes) if len(safes) == 0: # set same index as gdf, even if empty, to not throw an error on possible merge later safes.index = idx_gdf return safes gdf = gdf.copy() gdf['geometry'] = gdf.geometry gdf['startdate'] = gdf['beginposition'] gdf['stopdate'] = gdf['endposition'] safes['startdate'] = safes['beginposition'] safes['stopdate'] = safes['endposition'] safes_coloc = safes.iloc[0:0, :].copy() safes_coloc.crs = scihub_crs scihub_mode = False safes_crs = safes.copy() geometry_list = ['geometry'] if is_geographic(crs) and 'geometry_east' in gdf and 'geometry_west' in gdf: # never reached. replaced with 'scihub_geometry_east_list' raise DeprecationWarning('This should be deprecated') scihub_mode = True # remove unused geometry old_geometry = gdf.geometry.name gdf.set_geometry('geometry_east', inplace=True) gdf.drop(labels=[old_geometry], inplace=True, axis=1) geometry_list = ['geometry_east', 'geometry_west'] elif not is_geographic(crs): gdf.set_geometry('geometry', inplace=True) gdf.to_crs(crs, inplace=True) safes_crs.to_crs(crs, inplace=True) safes_coloc.to_crs(crs, inplace=True) for geometry in geometry_list: t = time.time() idx_safes_cur, idx_gdf_cur = coloc[0](safes_crs, gdf.set_geometry(geometry), progress=progress) logger.debug('sub coloc %s done in %ds' % (coloc[0].__name__, time.time() - t)) idx_safes = idx_safes.append(idx_safes_cur) idx_gdf = idx_gdf.append(idx_gdf_cur) for imethod in range(1, len(coloc)): # check with other coloc method t = time.time() idx_safes_cur_check, idx_gdf_cur_check = coloc[imethod](safes_crs, gdf.set_geometry(geometry), progress=progress) logger.debug('sub coloc %s done in %.1fs' % (coloc[imethod].__name__, time.time() - t)) if not (idx_gdf_cur_check.sort_values().equals( idx_gdf_cur.sort_values()) and idx_safes_cur_check.sort_values().equals( idx_safes_cur.sort_values())): raise RuntimeError('difference between colocation method') safes_coloc = safes.loc[idx_safes] safes_coloc.index = idx_gdf return safes_coloc.drop(['startdate', 'stopdate'], axis=1).to_crs(crs=scihub_crs) def remove_duplicates(safes_ori, keep_list=[]): """ Remove duplicate safe (ie same footprint with same date, but different prodid) """ safes = safes_ori.copy() if not safes.empty: # remove duplicate safes # add a temporary col with filename radic safes['__filename_radic'] = [f[0:62] for f in safes['filename']] uniques_radic = safes['__filename_radic'].unique() for filename_radic in uniques_radic: sames_safes = safes[safes['__filename_radic'] == filename_radic] if len(sames_safes['filename'].unique()) > 1: logger.debug("prodid count > 1: %s" % ([s for s in sames_safes['filename'].unique()])) force_keep = list(set(sames_safes['filename']).intersection(keep_list)) to_keep = sames_safes[ 'ingestiondate'].max() # warning : may induce late reprocessing (ODL link) . min() is safer, but not the best quality if force_keep: _to_keep = sames_safes[sames_safes['filename'] == force_keep[0]]['ingestiondate'].iloc[0] if _to_keep != to_keep: logger.warning('remove_duplicate : force keep safe %s' % force_keep[0]) to_keep = _to_keep logger.debug("only keep : %s " % set([f for f in safes[safes['ingestiondate'] == to_keep]['filename']])) safes = safes[(safes['ingestiondate'] == to_keep) | (safes['__filename_radic'] != filename_radic)] safes.drop('__filename_radic', axis=1, inplace=True) return safes def get_datatakes(safes, datatake=0, user=None, password=None, cachedir=None, cacherefreshrecent=None): # get default keywords values if user is None: user = default_user if password is None: password = <PASSWORD> if cachedir is None: cachedir = default_cachedir if cacherefreshrecent is None: cacherefreshrecent = default_cacherefreshrecent safes['datatake_index'] = 0 for safe in list(safes['filename']): safe_index = safes[safes['filename'] == safe].index[0] takeid = safe.split('_')[-2] safe_rad = "_".join(safe.split('_')[0:4]) safes_datatake = scihubQuery_raw('filename:%s_*_*_*_%s_*' % (safe_rad, takeid), user=user, password=password, cachedir=cachedir, cacherefreshrecent=cacherefreshrecent) # FIXME duplicate are removed, even if duplicate=True safes_datatake = remove_duplicates(safes_datatake, keep_list=[safe]) try: ifather = safes_datatake[safes_datatake['filename'] == safe].index[0] except: logger.warn('Father safe was not the most recent one (scihub bug ?)') # ifather=safes_datatake.index.get_loc(father) # convert index to iloc safes_datatake['datatake_index'] = safes_datatake.index - ifather # get adjacent safes safes_datatake = safes_datatake[abs(safes_datatake['datatake_index']) <= datatake] # set same index as father safe safes_datatake.set_index(pd.Index([safe_index] * len(safes_datatake)), inplace=True) # remove datatake allready in safes (ie father and allready colocated ) for safe_datatake in safes_datatake['filename']: if (safes['filename'] == safe_datatake).any(): # FIXME take the lowest abs(datatake_index) safes_datatake = safes_datatake[safes_datatake['filename'] != safe_datatake] safes = safes.append(safes_datatake, sort=False) return safes def normalize_gdf(gdf, startdate=None, stopdate=None, date=None, dtime=None, timedelta_slice=None, progress=False): """ return a normalized gdf list start/stop date name will be 'beginposition' and 'endposition' """ t = time.time() if timedelta_slice is None: timedelta_slice = default_timedelta_slice if gdf is not None: if not gdf.index.is_unique: raise IndexError("Index must be unique. Duplicate founds : %s" % list( gdf.index[gdf.index.duplicated(keep=False)].unique())) if len(gdf) == 0: return [] norm_gdf = gdf.copy() else: norm_gdf = gpd.GeoDataFrame({ 'beginposition': startdate, 'endposition': stopdate, 'geometry': Polygon() }, geometry='geometry', index=[0], crs=scihub_crs) # no slicing timedelta_slice = None # convert naives dates to utc for date_col in norm_gdf.select_dtypes(include=['datetime64']).columns: try: norm_gdf[date_col] = norm_gdf[date_col].dt.tz_localize('UTC') logger.warning("Assuming UTC date on col %s" % date_col) except TypeError: # already localized pass # check valid input geometry if not all(norm_gdf.is_valid): raise ValueError("Invalid geometries found. Check them with gdf.is_valid") norm_gdf['wrap_dlon'] = False crs_ori = norm_gdf.crs if crs_ori is None: logger.warning('no crs provided. assuming lon/lat with greenwich/antimeridian handling') norm_gdf['wrap_dlon'] = norm_gdf.geometry.apply(lambda s: not hasattr(s, '__iter__')) norm_gdf.geometry = norm_gdf.geometry.apply(geoshp.smallest_dlon) norm_gdf.crs = scihub_crs # scihub requests are enlarged/simplified if is_geographic(norm_gdf.crs): buff = 2 simp = 1.9 else: # assume meters buff = 200 * 1000 simp = 190 * 1000 with warnings.catch_warnings(): # disable geographic warning warnings.simplefilter("ignore") norm_gdf['scihub_geometry'] = norm_gdf.geometry.buffer(buff).simplify(simp) if crs_ori is None: # re apply smallest dlon if needed norm_gdf['scihub_geometry'] = norm_gdf.set_geometry('scihub_geometry').apply( lambda row: geoshp.smallest_dlon(row['scihub_geometry']) if row['wrap_dlon'] else GeometryCollection( [row['scihub_geometry']]), axis=1) if not is_geographic(norm_gdf.crs): # convert scihub geometry to lon/lat (original geometry untouched !) norm_gdf_ori = norm_gdf.copy() crs_ori = norm_gdf.crs norm_gdf['scihub_geometry'] = norm_gdf.set_geometry('scihub_geometry').geometry.apply( lambda s: geoshp.split_shape_crs(s, crs=norm_gdf.crs)) norm_gdf['scihub_geometry'] = norm_gdf.set_geometry('scihub_geometry').geometry.to_crs(scihub_crs) # norm_gdf['scihub_geometry'] = # check valid output geometry if not all(norm_gdf.set_geometry('scihub_geometry').geometry.is_valid): raise NotImplementedError("Internal error
import nn class PerceptronModel(object): def __init__(self, dimensions): """ Initialize a new Perceptron instance. A perceptron classifies data points as either belonging to a particular class (+1) or not (-1). `dimensions` is the dimensionality of the data. For example, dimensions=2 would mean that the perceptron must classify 2D points. """ self.w = nn.Parameter(1, dimensions) def get_weights(self): """ Return a Parameter instance with the current weights of the perceptron. """ return self.w def run(self, x): """ Calculates the score assigned by the perceptron to a data point x. Inputs: x: a node with shape (1 x dimensions) Returns: a node containing a single number (the score) """ "*** YOUR CODE HERE ***" return nn.DotProduct(x, self.w) def get_prediction(self, x): """ Calculates the predicted class for a single data point `x`. Returns: 1 or -1 """ "*** YOUR CODE HERE ***" return 1 if nn.as_scalar(nn.DotProduct(self.w, x)) >= 0 else -1 def train(self, dataset): """ Train the perceptron until convergence. """ "*** YOUR CODE HERE ***" while True: DONE = True for x, y in dataset.iterate_once(1): if nn.as_scalar(y) != self.get_prediction(x): DONE = False self.w.update(x, nn.as_scalar(y)) if DONE: break class RegressionModel(object): """ A neural network model for approximating a function that maps from real numbers to real numbers. The network should be sufficiently large to be able to approximate sin(x) on the interval [-2pi, 2pi] to reasonable precision. """ def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.batch_size = 50 self.w0 = nn.Parameter(1, 80) self.b0 = nn.Parameter(1, 80) self.w1 = nn.Parameter(80, 1) self.b1 = nn.Parameter(1, 1) self.alpha = 0.005 def run(self, x): """ Runs the model for a batch of examples. Inputs: x: a node with shape (batch_size x 1) Returns: A node with shape (batch_size x 1) containing predicted y-values """ "*** YOUR CODE HERE ***" xw1 = nn.Linear(x, self.w0) r1 = nn.ReLU(nn.AddBias(xw1, self.b0)) xw2 = nn.Linear(r1, self.w1) return nn.AddBias(xw2, self.b1) def get_loss(self, x, y): """ Computes the loss for a batch of examples. Inputs: x: a node with shape (batch_size x 1) y: a node with shape (batch_size x 1), containing the true y-values to be used for training Returns: a loss node """ "*** YOUR CODE HERE ***" return nn.SquareLoss(self.run(x), y) def train(self, dataset): """ Trains the model. """ "*** YOUR CODE HERE ***" while True: # print(nn.Constant(dataset.x), nn.Constant(dataset.y)) for x, y in dataset.iterate_once(self.batch_size): loss = self.get_loss(x, y) grad = nn.gradients(loss, [self.w0, self.w1, self.b0, self.b1]) # print(nn.as_scalar(nn.DotProduct(grad[0],grad[0]))) self.w0.update(grad[0], -self.alpha) self.w1.update(grad[1], -self.alpha) self.b0.update(grad[2], -self.alpha) self.b1.update(grad[3], -self.alpha) # print(nn.as_scalar(self.get_loss(nn.Constant(dataset.x), nn.Constant(dataset.y)))) if nn.as_scalar(self.get_loss(nn.Constant(dataset.x), nn.Constant(dataset.y))) < 0.01: return class DigitClassificationModel(object): """ A model for handwritten digit classification using the MNIST dataset. Each handwritten digit is a 28x28 pixel grayscale image, which is flattened into a 784-dimensional vector for the purposes of this model. Each entry in the vector is a floating point number between 0 and 1. The goal is to sort each digit into one of 10 classes (number 0 through 9). (See RegressionModel for more information about the APIs of different methods here. We recommend that you implement the RegressionModel before working on this part of the project.) """ def __init__(self): # Initialize your model parameters here "*** YOUR CODE HERE ***" self.batch_size = 5 self.w0 = nn.Parameter(784, 100) self.b0 = nn.Parameter(1, 100) self.w1 = nn.Parameter(100, 10) self.b1 = nn.Parameter(1, 10) self.multiplier = -0.004 def run(self, x): """ Runs the model for a batch of examples. Your model should predict a node with shape (batch_size x 10), containing scores. Higher scores correspond to greater probability of the image belonging to a particular class. Inputs: x: a node with shape (batch_size x 784) Output: A node with shape (batch_size x 10) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" w1x = nn.Linear(x, self.w0) w1x_plus_b1 = nn.AddBias(w1x, self.b0) r1 = nn.ReLU(w1x_plus_b1) w2x = nn.Linear(r1, self.w1) w2x_plus_b2 = nn.AddBias(w2x, self.b1) return w2x_plus_b2 def get_loss(self, x, y): """ Computes the loss for a batch of examples. The correct labels `y` are represented as a node with shape (batch_size x 10). Each row is a one-hot vector encoding the correct digit class (0-9). Inputs: x: a node with shape (batch_size x 784) y: a node with shape (batch_size x 10) Returns: a loss node """ "*** YOUR CODE HERE ***" return nn.SoftmaxLoss(self.run(x), y) def train(self, dataset): """ Trains the model. """ "*** YOUR CODE HERE ***" while True: for x, y in dataset.iterate_once(self.batch_size): loss = self.get_loss(x, y) grad = nn.gradients(loss, [self.w0, self.w1, self.b0, self.b1]) self.w0.update(grad[0], self.multiplier) self.w1.update(grad[1], self.multiplier) self.b0.update(grad[2], self.multiplier) self.b1.update(grad[3], self.multiplier) print(""" * * *\n""", dataset.get_validation_accuracy(), """ * * * *""") if dataset.get_validation_accuracy() >= 0.974: return class LanguageIDModel(object): """ A model for language identification at a single-word granularity. (See RegressionModel for more information about the APIs of different methods here. We recommend that you implement the RegressionModel before working on this part of the project.) """ def __init__(self): # Our dataset contains words from five different languages, and the # combined alphabets of the five languages contain a total of 47 unique # characters. # You can refer to self.num_chars or len(self.languages) in your code self.num_chars = 47 self.languages = ["English", "Spanish", "Finnish", "Dutch", "Polish"] # Initialize your model parameters here "*** YOUR CODE HERE ***" self.batch_size = 10 self.alpha = 0.01 self.w0 = nn.Parameter(self.num_chars, 300) # w0 self.b0 = nn.Parameter(1, 300) # b0 self.w1 = nn.Parameter(300, 300) # w1 self.b1 = nn.Parameter(1, 300) # b1 self.wf = nn.Parameter(300, 5) # wf self.bf = nn.Parameter(1, 5) # bf def run(self, xs): """ Runs the model for a batch of examples. Although words have different lengths, our data processing guarantees that within a single batch, all words will be of the same length (L). Here `xs` will be a list of length L. Each element of `xs` will be a node with shape (batch_size x self.num_chars), where every row in the array is a one-hot vector encoding of a character. For example, if we have a batch of 8 three-letter words where the last word is "cat", then xs[1] will be a node that contains a 1 at position (7, 0). Here the index 7 reflects the fact that "cat" is the last word in the batch, and the index 0 reflects the fact that the letter "a" is the inital (0th) letter of our combined alphabet for this task. Your model should use a Recurrent Neural Network to summarize the list `xs` into a single node of shape (batch_size x hidden_size), for your choice of hidden_size. It should then calculate a node of shape (batch_size x 5) containing scores, where higher scores correspond to greater probability of the word originating from a particular language. Inputs: xs: a list with L elements (one per character), where each element is a node with shape (batch_size x self.num_chars) Returns: A node with shape (batch_size x 5) containing predicted scores (also called logits) """ "*** YOUR CODE HERE ***" h = nn.ReLU(nn.AddBias(nn.Linear(xs[0], self.w0), self.b0)) z = h for i, x in enumerate(xs[1:]): z = nn.Add(nn.ReLU(nn.AddBias(nn.Linear(z, self.w1), self.b1)), nn.ReLU(nn.AddBias(nn.Linear(x, self.w0), self.b0))) z = nn.Add(nn.ReLU(nn.Linear(z, self.w1)), nn.ReLU(nn.Linear(x, self.w0))) # z = nn.Add(nn.Linear(x, self.w0), nn.Linear(z, self.w1)) # print(i, nn.Linear(z, self.wf)) return nn.AddBias(nn.Linear(z, self.wf), self.bf) def get_loss(self, xs, y): """ Computes the loss for a batch of examples. The correct labels `y` are represented as a node with shape (batch_size x 5). Each row is a one-hot vector encoding the correct language. Inputs: xs: a list with L elements (one per character), where each element is a node with shape (batch_size x self.num_chars) y: a node with shape (batch_size x 5) Returns: a loss node """ "*** YOUR CODE HERE ***" return nn.SoftmaxLoss(self.run(xs), y) def train(self, dataset): """ Trains the model. """ "*** YOUR CODE HERE ***" while True: # print(nn.Constant(dataset.x), nn.Constant(dataset.y)) for x, y in dataset.iterate_once(self.batch_size): loss = self.get_loss(x, y) grad
"unknown") == "inactive": logg.warning("the service is already down once") return True for cmd in conf.getlist("Service", "ExecStop", []): exe, newcmd = self.exec_newcmd(cmd, env, conf) logg.info("%s stop %s", runs, shell_cmd(newcmd)) forkpid = os.fork() if not forkpid: self.execve_from(conf, newcmd, env) # pragma: no cover run = subprocess_waitpid(forkpid) if run.returncode and exe.check: returncode = run.returncode service_result = "failed" break if True: if returncode: self.set_status_from(conf, "ExecStopCode", strE(returncode)) self.write_status_from(conf, AS="failed") else: self.clean_status_from(conf) # "inactive" ### fallback Stop => Kill for ["simple","notify","forking"] elif not conf.getlist("Service", "ExecStop", []): logg.info("no ExecStop => systemctl kill") if True: self.do_kill_unit_from(conf) self.clean_pid_file_from(conf) self.clean_status_from(conf) # "inactive" elif runs in [ "simple", "notify", "idle" ]: status_file = self.get_status_file_from(conf) size = os.path.exists(status_file) and os.path.getsize(status_file) logg.info("STATUS %s %s", status_file, size) pid = 0 for cmd in conf.getlist("Service", "ExecStop", []): env["MAINPID"] = strE(self.read_mainpid_from(conf)) exe, newcmd = self.exec_newcmd(cmd, env, conf) logg.info("%s stop %s", runs, shell_cmd(newcmd)) forkpid = os.fork() if not forkpid: self.execve_from(conf, newcmd, env) # pragma: no cover run = subprocess_waitpid(forkpid) run = must_have_failed(run, newcmd) # TODO: a workaround # self.write_status_from(conf, MainPID=run.pid) # no ExecStop if run.returncode and exe.check: returncode = run.returncode service_result = "failed" break pid = to_intN(env.get("MAINPID")) if pid: if self.wait_vanished_pid(pid, timeout): self.clean_pid_file_from(conf) self.clean_status_from(conf) # "inactive" else: logg.info("%s sleep as no PID was found on Stop", runs) time.sleep(MinimumTimeoutStopSec) pid = self.read_mainpid_from(conf) if not pid or not pid_exists(pid) or pid_zombie(pid): self.clean_pid_file_from(conf) self.clean_status_from(conf) # "inactive" elif runs in [ "forking" ]: status_file = self.get_status_file_from(conf) pid_file = self.pid_file_from(conf) for cmd in conf.getlist("Service", "ExecStop", []): # active = self.is_active_from(conf) if pid_file: new_pid = self.read_mainpid_from(conf) if new_pid: env["MAINPID"] = strE(new_pid) exe, newcmd = self.exec_newcmd(cmd, env, conf) logg.info("fork stop %s", shell_cmd(newcmd)) forkpid = os.fork() if not forkpid: self.execve_from(conf, newcmd, env) # pragma: no cover run = subprocess_waitpid(forkpid) if run.returncode and exe.check: returncode = run.returncode service_result = "failed" break pid = to_intN(env.get("MAINPID")) if pid: if self.wait_vanished_pid(pid, timeout): self.clean_pid_file_from(conf) else: logg.info("%s sleep as no PID was found on Stop", runs) time.sleep(MinimumTimeoutStopSec) pid = self.read_mainpid_from(conf) if not pid or not pid_exists(pid) or pid_zombie(pid): self.clean_pid_file_from(conf) if returncode: if os.path.isfile(status_file): self.set_status_from(conf, "ExecStopCode", strE(returncode)) self.write_status_from(conf, AS="failed") else: self.clean_status_from(conf) # "inactive" else: logg.error("unsupported run type '%s'", runs) return False # POST sequence if not self.is_active_from(conf): env["SERVICE_RESULT"] = service_result for cmd in conf.getlist("Service", "ExecStopPost", []): exe, newcmd = self.exec_newcmd(cmd, env, conf) logg.info("post-stop %s", shell_cmd(newcmd)) forkpid = os.fork() if not forkpid: self.execve_from(conf, newcmd, env) # pragma: no cover run = subprocess_waitpid(forkpid) logg.debug("post-stop done (%s) <-%s>", run.returncode or "OK", run.signal or "") if _what_kind not in ["none", "keep"]: self.remove_service_directories(conf) return service_result == "success" def do_stop_socket_from(self, conf): runs = "socket" timeout = self.get_SocketTimeoutSec(conf) accept = conf.getbool("Socket", "Accept", "no") service_unit = self.get_socket_service_from(conf) service_conf = self.load_unit_conf(service_unit) if service_conf is None: logg.debug("unit could not be loaded (%s)", service_unit) logg.error("Unit %s not found.", service_unit) return False env = self.get_env(conf) if not self._quiet: okee = self.exec_check_unit(conf, env, "Socket", "ExecStop") if not okee and _no_reload: return False if not accept: # we do not listen but have the service started right away done = self.do_stop_service_from(service_conf) service_result = done and "success" or "failed" else: done = self.do_stop_service_from(service_conf) service_result = done and "success" or "failed" # service_directories = self.env_service_directories(conf) # env.update(service_directories) # POST sequence if not self.is_active_from(conf): env["SERVICE_RESULT"] = service_result for cmd in conf.getlist("Socket", "ExecStopPost", []): exe, newcmd = self.exec_newcmd(cmd, env, conf) logg.info("post-stop %s", shell_cmd(newcmd)) forkpid = os.fork() if not forkpid: self.execve_from(conf, newcmd, env) # pragma: no cover run = subprocess_waitpid(forkpid) logg.debug("post-stop done (%s) <-%s>", run.returncode or "OK", run.signal or "") return service_result == "success" def wait_vanished_pid(self, pid, timeout): if not pid: return True if not self.is_active_pid(pid): return True logg.info("wait for PID %s to vanish (%ss)", pid, timeout) for x in xrange(int(timeout)): time.sleep(1) # until TimeoutStopSec if not self.is_active_pid(pid): logg.info("wait for PID %s is done (%s.)", pid, x) return True logg.info("wait for PID %s failed (%s.)", pid, timeout) return False def reload_modules(self, *modules): """ [UNIT]... -- reload these units """ self.wait_system() found_all = True units = [] for module in modules: matched = self.match_units(to_list(module)) if not matched: logg.error("Unit %s not found.", unit_of(module)) self.error |= NOT_FOUND found_all = False continue for unit in matched: if unit not in units: units += [ unit ] return self.reload_units(units) and found_all def reload_units(self, units): """ fails if any unit fails to reload """ self.wait_system() done = True for unit in self.sortedAfter(units): if not self.reload_unit(unit): done = False return done def reload_unit(self, unit): conf = self.load_unit_conf(unit) if conf is None: logg.error("Unit %s not found.", unit) return False if self.not_user_conf(conf): logg.error("Unit %s not for --user mode", unit) return False return self.reload_unit_from(conf) def reload_unit_from(self, conf): if not conf: return False if self.syntax_check(conf) > 100: return False with waitlock(conf): logg.info(" reload unit %s => %s", conf.name(), strQ(conf.filename())) return self.do_reload_unit_from(conf) def do_reload_unit_from(self, conf): if conf.name().endswith(".service"): return self.do_reload_service_from(conf) elif conf.name().endswith(".socket"): service_unit = self.get_socket_service_from(conf) service_conf = self.load_unit_conf(service_unit) if service_conf: return self.do_reload_service_from(service_conf) else: logg.error("no %s found for unit type: %s", service_unit, conf.name()) return False elif conf.name().endswith(".target"): return self.do_reload_target_from(conf) else: logg.error("reload not implemented for unit type: %s", conf.name()) return False def do_reload_service_from(self, conf): runs = conf.get("Service", "Type", "simple").lower() env = self.get_env(conf) if not self._quiet: okee = self.exec_check_unit(conf, env, "Service", "ExecReload") if not okee and _no_reload: return False initscript = conf.filename() if self.is_sysv_file(initscript): status_file = self.get_status_file_from(conf) if initscript: newcmd = [initscript, "reload"] env["SYSTEMCTL_SKIP_REDIRECT"] = "yes" logg.info("%s reload %s", runs, shell_cmd(newcmd)) forkpid = os.fork() if not forkpid: self.execve_from(conf, newcmd, env) # pragma: nocover run = subprocess_waitpid(forkpid) self.set_status_from(conf, "ExecReloadCode", run.returncode) if run.returncode: self.write_status_from(conf, AS="failed") return False else: self.write_status_from(conf, AS="active") return True service_directories = self.env_service_directories(conf) env.update(service_directories) if runs in [ "simple", "notify", "forking", "idle" ]: if not self.is_active_from(conf): logg.info("no reload on inactive service %s", conf.name()) return True for cmd in conf.getlist("Service", "ExecReload", []): env["MAINPID"] = strE(self.read_mainpid_from(conf)) exe, newcmd = self.exec_newcmd(cmd, env, conf) logg.info("%s reload %s", runs, shell_cmd(newcmd)) forkpid = os.fork() if not forkpid: self.execve_from(conf, newcmd, env) # pragma: no cover run = subprocess_waitpid(forkpid) if run.returncode and exe.check: logg.error("Job for %s failed because the control process exited with error code. (%s)", conf.name(), run.returncode) return False time.sleep(MinimumYield) return True elif runs in [ "oneshot" ]: logg.debug("ignored run type '%s' for reload", runs) return True else: logg.error("unsupported run type '%s'", runs) return False def restart_modules(self, *modules): """ [UNIT]... -- restart these units """ found_all = True units = [] for module in modules: matched = self.match_units(to_list(module)) if not matched: logg.error("Unit %s not found.", unit_of(module)) self.error |= NOT_FOUND found_all = False continue for unit in matched: if unit not in units: units += [ unit ] return self.restart_units(units) and found_all def restart_units(self, units): """ fails if any unit fails to restart """ self.wait_system() done = True for unit in self.sortedAfter(units): if not self.restart_unit(unit): done = False return done def restart_unit(self, unit): conf = self.load_unit_conf(unit) if conf is None: logg.error("Unit %s not found.", unit) return False if self.not_user_conf(conf): logg.error("Unit %s not for --user mode", unit) return False return self.restart_unit_from(conf) def restart_unit_from(self, conf): if not conf: return False if self.syntax_check(conf) > 100: return False with waitlock(conf): if conf.name().endswith(".service"): logg.info(" restart service %s => %s", conf.name(), strQ(conf.filename())) if not self.is_active_from(conf): return self.do_start_unit_from(conf) else: return self.do_restart_unit_from(conf) else: return self.do_restart_unit_from(conf) def do_restart_unit_from(self, conf): logg.info("(restart) => stop/start %s", conf.name()) self.do_stop_unit_from(conf) return self.do_start_unit_from(conf) def try_restart_modules(self, *modules): """ [UNIT]... -- try-restart these units """ found_all = True units = [] for module in modules: matched = self.match_units(to_list(module)) if not matched: logg.error("Unit %s not found.", unit_of(module)) self.error |= NOT_FOUND found_all = False continue for unit in matched: if unit not in units: units += [ unit ] return self.try_restart_units(units) and found_all def try_restart_units(self, units): """ fails if any module fails to try-restart """ self.wait_system() done = True for unit in self.sortedAfter(units): if not self.try_restart_unit(unit): done = False return done def try_restart_unit(self, unit): """ only do 'restart' if 'active' """ conf = self.load_unit_conf(unit) if conf is None: logg.error("Unit %s not found.", unit) return False if self.not_user_conf(conf): logg.error("Unit %s not for --user mode", unit) return False with waitlock(conf): logg.info(" try-restart unit %s => %s", conf.name(), strQ(conf.filename())) if self.is_active_from(conf): return
self.__dict__ == other.__dict__ def __ne__(self, other: 'TableBodyCells') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class TableCellKey(): """ A key in a key-value pair. :attr str cell_id: (optional) The unique ID of the key in the table. :attr TableElementLocation location: (optional) The numeric location of the identified element in the document, represented with two integers labeled `begin` and `end`. :attr str text: (optional) The text content of the table cell without HTML markup. """ def __init__(self, *, cell_id: str = None, location: 'TableElementLocation' = None, text: str = None) -> None: """ Initialize a TableCellKey object. :param str cell_id: (optional) The unique ID of the key in the table. :param TableElementLocation location: (optional) The numeric location of the identified element in the document, represented with two integers labeled `begin` and `end`. :param str text: (optional) The text content of the table cell without HTML markup. """ self.cell_id = cell_id self.location = location self.text = text @classmethod def from_dict(cls, _dict: Dict) -> 'TableCellKey': """Initialize a TableCellKey object from a json dictionary.""" args = {} valid_keys = ['cell_id', 'location', 'text'] bad_keys = set(_dict.keys()) - set(valid_keys) if bad_keys: raise ValueError( 'Unrecognized keys detected in dictionary for class TableCellKey: ' + ', '.join(bad_keys)) if 'cell_id' in _dict: args['cell_id'] = _dict.get('cell_id') if 'location' in _dict: args['location'] = TableElementLocation._from_dict( _dict.get('location')) if 'text' in _dict: args['text'] = _dict.get('text') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a TableCellKey object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'cell_id') and self.cell_id is not None: _dict['cell_id'] = self.cell_id if hasattr(self, 'location') and self.location is not None: _dict['location'] = self.location._to_dict() if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this TableCellKey object.""" return json.dumps(self._to_dict(), indent=2) def __eq__(self, other: 'TableCellKey') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'TableCellKey') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class TableCellValues(): """ A value in a key-value pair. :attr str cell_id: (optional) The unique ID of the value in the table. :attr TableElementLocation location: (optional) The numeric location of the identified element in the document, represented with two integers labeled `begin` and `end`. :attr str text: (optional) The text content of the table cell without HTML markup. """ def __init__(self, *, cell_id: str = None, location: 'TableElementLocation' = None, text: str = None) -> None: """ Initialize a TableCellValues object. :param str cell_id: (optional) The unique ID of the value in the table. :param TableElementLocation location: (optional) The numeric location of the identified element in the document, represented with two integers labeled `begin` and `end`. :param str text: (optional) The text content of the table cell without HTML markup. """ self.cell_id = cell_id self.location = location self.text = text @classmethod def from_dict(cls, _dict: Dict) -> 'TableCellValues': """Initialize a TableCellValues object from a json dictionary.""" args = {} valid_keys = ['cell_id', 'location', 'text'] bad_keys = set(_dict.keys()) - set(valid_keys) if bad_keys: raise ValueError( 'Unrecognized keys detected in dictionary for class TableCellValues: ' + ', '.join(bad_keys)) if 'cell_id' in _dict: args['cell_id'] = _dict.get('cell_id') if 'location' in _dict: args['location'] = TableElementLocation._from_dict( _dict.get('location')) if 'text' in _dict: args['text'] = _dict.get('text') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a TableCellValues object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'cell_id') and self.cell_id is not None: _dict['cell_id'] = self.cell_id if hasattr(self, 'location') and self.location is not None: _dict['location'] = self.location._to_dict() if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this TableCellValues object.""" return json.dumps(self._to_dict(), indent=2) def __eq__(self, other: 'TableCellValues') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'TableCellValues') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class TableColumnHeaderIds(): """ An array of values, each being the `id` value of a column header that is applicable to the current cell. :attr str id: (optional) The `id` value of a column header. """ def __init__(self, *, id: str = None) -> None: """ Initialize a TableColumnHeaderIds object. :param str id: (optional) The `id` value of a column header. """ self.id = id @classmethod def from_dict(cls, _dict: Dict) -> 'TableColumnHeaderIds': """Initialize a TableColumnHeaderIds object from a json dictionary.""" args = {} valid_keys = ['id'] bad_keys = set(_dict.keys()) - set(valid_keys) if bad_keys: raise ValueError( 'Unrecognized keys detected in dictionary for class TableColumnHeaderIds: ' + ', '.join(bad_keys)) if 'id' in _dict: args['id'] = _dict.get('id') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a TableColumnHeaderIds object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'id') and self.id is not None: _dict['id'] = self.id return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this TableColumnHeaderIds object.""" return json.dumps(self._to_dict(), indent=2) def __eq__(self, other: 'TableColumnHeaderIds') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'TableColumnHeaderIds') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class TableColumnHeaderTexts(): """ An array of values, each being the `text` value of a column header that is applicable to the current cell. :attr str text: (optional) The `text` value of a column header. """ def __init__(self, *, text: str = None) -> None: """ Initialize a TableColumnHeaderTexts object. :param str text: (optional) The `text` value of a column header. """ self.text = text @classmethod def from_dict(cls, _dict: Dict) -> 'TableColumnHeaderTexts': """Initialize a TableColumnHeaderTexts object from a json dictionary.""" args = {} valid_keys = ['text'] bad_keys = set(_dict.keys()) - set(valid_keys) if bad_keys: raise ValueError( 'Unrecognized keys detected in dictionary for class TableColumnHeaderTexts: ' + ', '.join(bad_keys)) if 'text' in _dict: args['text'] = _dict.get('text') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a TableColumnHeaderTexts object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this TableColumnHeaderTexts object.""" return json.dumps(self._to_dict(), indent=2) def __eq__(self, other: 'TableColumnHeaderTexts') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'TableColumnHeaderTexts') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class TableColumnHeaderTextsNormalized(): """ If you provide customization input, the normalized version of the column header texts according to the customization; otherwise, the same value as `column_header_texts`. :attr str text_normalized: (optional) The normalized version of a column header text. """ def __init__(self, *, text_normalized: str = None) -> None: """ Initialize a TableColumnHeaderTextsNormalized object. :param str text_normalized: (optional) The normalized version of a column header text. """ self.text_normalized = text_normalized @classmethod def from_dict(cls, _dict: Dict) -> 'TableColumnHeaderTextsNormalized': """Initialize a TableColumnHeaderTextsNormalized object from a json dictionary.""" args = {} valid_keys = ['text_normalized'] bad_keys = set(_dict.keys()) - set(valid_keys) if bad_keys: raise ValueError( 'Unrecognized keys detected in dictionary for class TableColumnHeaderTextsNormalized: ' + ',
""" Store and visualise the webclient status in the browser. This modules keeps track of whether user is logged in, displays the LED status buttons for server, RFID reader status etc. """ import typing import qailib.common.base as base import qailib.common.serversocketbase as serversocketbase import qailib.transcryptlib.websocket as websocket import qailib.transcryptlib.serversocket as serversock import qailib.transcryptlib.genutils as genutils import qailib.transcryptlib.htmlelements as html import qailib.transcryptlib.forms as forms import qailib.transcryptlib.simpletable as simpletable from webclient.commonmsg import CommonMSG RFID_ON = CommonMSG.RFID_ON RFID_OFF = CommonMSG.RFID_OFF RFID_TIMEOUT = CommonMSG.RFID_TIMEOUT log = genutils.log # NOTE: May not import wccontroller or wcviews STARATTR_ONCLICK = html.base_element.STARATTR_ONCLICK CMD_LOGOUT = 'logout' CMD_TRY_RFID_SERVER = 'try_rfid_server' class WCstatus(base.base_obj): """Visualise and store * the webclient's bluetooth status * logged-in status to QAIChangedct * the stocky webserver status over websocket. Also store the stock information on the webclient that was sent from the stocky server. """ # the size of the spinners in pixels SPIN_SZ_PIXELS = 30 NUM_ROW = 3 NUM_COL = 2 # SRV: status of comms to stocky server # RFID: status of RFID reader # QAI: whether logged in to QAI SRV_ROW = 0 RFID_ROW = 1 QAI_ROW = 2 LED_COL = 0 INFO_COL = 1 # set this to INFO_COL.. QAI_UPD_COL = 1 LOC_NOSEL_ID = "NOSEL_ID" LOC_NOSEL_NAME = "No Defined Location" def __init__(self, idstr: str, ws: serversocketbase.base_server_socket, msg_listener: base.base_obj, login_popup: forms.modaldiv) -> None: """Initialise the webclient status bar. Args: idstr: the instance's name ws: the websocket used for communication with the stocky server. msg_listener: the object that should receive messages from the RFID websocket. login_popup: the popup to be used to log a user in. Note: All visual HTML elements of this class are built into a predefined div in the DOM called state-div. If this div is missing in the DOM, then nothing is built. """ super().__init__(idstr) self._rfid_ws: typing.Optional[serversock.JSONserver_socket] = None self._server_ws = ws self._msg_listener = msg_listener ws.addObserver(self, base.MSGD_COMMS_ARE_UP) ws.addObserver(self, base.MSGD_COMMS_ARE_DOWN) self._stat_is_loggedin = False self._stat_WS_isup = False # empty stock information.. these are set in _setstockdata self._stockloc_lst: typing.List[dict] = [] self._locid_item_dct: dict = {} self._ritemdct: dict = {} # empty locmut data.. self.locmut_hash = "bla" self.locmut_dct = {} self.srv_config_data: typing.Optional[typing.Dict[str, str]] = None # self.login_popup = login_popup self.statediv = statediv = html.getPyElementById("state-div") if statediv is None: log('STATE DIV MISSSING') return else: log("STATE DIV OK") tabattrdct: typing.Dict[str, str] = {} mytab = self.mytab = simpletable.simpletable(statediv, "statetable", tabattrdct, WCstatus.NUM_ROW, WCstatus.NUM_COL) self.ledlst: typing.List[html.LEDElement] = [] for title, rownum in [("Stocky Server Status", WCstatus.SRV_ROW), ("RFID Scanner Status", WCstatus.RFID_ROW), ("Click to log in to QAI", WCstatus.QAI_ROW)]: ledattrdct = {"title": title} cell = mytab.getcell(rownum, WCstatus.LED_COL) if cell is not None: newled = html.LEDElement(cell, 'statusled', ledattrdct, None, html.LEDElement.RED) self.ledlst.append(newled) else: log("cell table error 1") return # mytab.set_alignment(rownum, WCstatus.INFO_COL, "center") rfid_led = self.ledlst[WCstatus.RFID_ROW] rfid_led.setAttribute(STARATTR_ONCLICK, {'cmd': CMD_TRY_RFID_SERVER}) rfid_led.addObserver(self, base.MSGD_BUTTON_CLICK) # the login led is an opener for the login form # login_popup.attach_opener(self.ledlst[WCstatus.QAI_ROW]) # Set up the information for the QAI user name cell = mytab.getcell(WCstatus.QAI_ROW, WCstatus.INFO_COL) if cell is not None: self.uname_text = html.spantext(cell, "unametext", {'class': "w3-tag w3-red", "title": "Click here to log in to QAI"}, "not logged in") # the txt is opener for the login form # login_popup.attach_opener(txt) else: log("cell table error 2") # self.uname_text = None return # install a general purpose busy spinner cell = mytab.getcell(WCstatus.SRV_ROW, WCstatus.QAI_UPD_COL) if cell is not None: spin_attrdct = {'title': "Server activity"} self.spinner = forms.spinner(cell, "busyspinner", spin_attrdct, forms.spinner.SPN_SPINNER, WCstatus.SPIN_SZ_PIXELS) else: log("cell table error 2a") return # Set up the QAI last update tag cell = mytab.getcell(WCstatus.QAI_ROW, WCstatus.QAI_UPD_COL) if cell is not None: ustr = "The time of last QAI Stock list download. (log in and download stock list to update)" self.qai_upd_text = html.spantext(cell, "unametext", {'class': "w3-tag w3-red", "title": ustr}, "unknown") else: log("cell table error 2b") return # set up the RFID activity spinner cell = mytab.getcell(WCstatus.RFID_ROW, WCstatus.INFO_COL) if cell is not None: self.actspinner = forms.spinner(cell, "rfidspin", {"title": "RFID Scanner Activity"}, forms.spinner.SPN_COG, WCstatus.SPIN_SZ_PIXELS) else: # self.actspinner = None log("cell table error 3") return def send_WS_msg(self, msg: CommonMSG) -> None: """Send a message to the server via websocket.""" if self.is_WS_up(): self._server_ws.send(msg.as_dict()) else: print("SEND IS NOOOT HAPPENING") def set_login_response(self, resdct: dict) -> None: """Set the visual QAI logged in status according to resdct.""" statusled = self.ledlst[WCstatus.QAI_ROW] self._stat_is_loggedin = is_logged_in = resdct['ok'] in_col = "w3-green" out_col = "w3-red" txt = self.uname_text if is_logged_in: # success: uname = labtext = resdct.get('username', 'unknown') statusled.setcolour(html.LEDElement.GREEN) statusled.setAttribute("title", "Not '{}'? Click here to log in".format(uname)) txthelptext = "Logged in to QAI. Click here to log out" txt.removeClass(out_col) txt.addClass(in_col) txt.setAttribute(STARATTR_ONCLICK, {'cmd': CMD_LOGOUT}) # the username text is NOT an opener for the login form # self.login_popup.remove_opener(txt) txt.addObserver(self, base.MSGD_BUTTON_CLICK) else: # error: labtext = "not logged in" txthelptext = "Click here to log in to QAI" statusled.setcolour(html.LEDElement.RED) statusled.setAttribute("title", txthelptext) txt.removeClass(in_col) txt.addClass(out_col) txt.setAttribute(STARATTR_ONCLICK, dict(msg=forms.modaldiv._OPN_MSG)) # the username text is an opener for the login form self.login_popup.attach_opener(txt) txt.remObserver(self, base.MSGD_BUTTON_CLICK) txt.set_text(labtext) txt.setAttribute("title", txthelptext) def set_logout_status(self) -> None: """Set the visual status to 'logged out'""" self.set_login_response(dict(ok=False)) def is_QAI_logged_in(self) -> bool: """Query the logged in status. Returns: True iff the user is logged in to QAI. """ return self._stat_is_loggedin def set_RFID_state(self, newstate: int) -> None: """Set the visual RFID LED state. The LED colour is set to on (green), off (red) or timeout (ORANGE). Args: newstate: this should be one of the predefined constants defined in CommonMSG (RFID_ON, RFID_OFF, RFID_TIMEOUT) """ statusled = self.ledlst[WCstatus.RFID_ROW] if newstate == RFID_ON: # set to green statusled.setcolour(html.LEDElement.GREEN) elif newstate == RFID_OFF: # set to red statusled.setcolour(html.LEDElement.RED) elif newstate == RFID_TIMEOUT: statusled.setcolour(html.LEDElement.YELLOW) else: print("INVALID RFID LED STATE!") def set_busy(self, isbusy: bool) -> None: """Set the state of the 'internet is busy' spinner. Args: isbusy: True makes the spinner spin. False makes it stop. """ self.spinner.set_spin(isbusy) def set_rfid_activity(self, on: bool) -> None: """Set the RFID spinner on/off Args: on: True makes the spinner spin. False makes it stop. """ self.actspinner.set_spin(on) def set_WS_state(self, is_up: bool) -> None: """Set the colour of the LED indicating websocket communication to the stocky server. Args: is_up: True if the server is up (green light displayed). False to down (red light displayed). """ print("WC status : {}".format(is_up)) statusled = self.ledlst[WCstatus.SRV_ROW] self._stat_WS_isup = is_up if is_up: # set to green statusled.setcolour(html.LEDElement.GREEN) else: # set to red statusled.setcolour(html.LEDElement.RED) self._enable_login_popup(is_up) def _enable_login_popup(self, do_enable: bool) -> None: """Enable or disable the login popup. Args: do_enable: True will enable the QAI login popup. Note: The popup should be disabled if the websocket comms are down, as its the stocky server that will ultimately communicate with the QAI server. """ login_popup = self.login_popup txt = self.uname_text if do_enable: # the login led is an opener for the login form login_popup.attach_opener(self.ledlst[WCstatus.QAI_ROW]) if self.is_QAI_logged_in(): # the username text is NOT an opener for the login form login_popup.remove_opener(txt) else: # if we are NOT logged in to QAI already, # the txt is opener for the login form login_popup.attach_opener(txt) else: login_popup.remove_opener(self.ledlst[WCstatus.QAI_ROW]) login_popup.remove_opener(txt) def is_WS_up(self) -> bool: """Return the status of the websocket communication to the stocky server. Returns: True iff communication to the stocky server is up. """ return self._stat_WS_isup def rcvMsg(self, whofrom: base.base_obj, msgdesc: base.MSGdesc_Type, msgdat: typing.Optional[base.MSGdata_Type]) -> None: lverb = True if lverb: # print("{}.rcvMsg: {}: {} from {}".format(self._idstr, msgdesc, msgdat, whofrom._idstr)) print("{}.rcvMsg: {} from {}".format(self._idstr, msgdesc, whofrom._idstr)) if msgdesc == base.MSGD_BUTTON_CLICK: print("wcstatus GOT BUTTON CLICK msgdat={}".format(msgdat)) if msgdat is None: print("msgdat is None") return cmd = msgdat.get("cmd", None) print("wcstatus GOT BUTTON CLICK CMD {}".format(cmd)) if cmd == CMD_LOGOUT: # the logout button was pressed self.send_WS_msg(CommonMSG(CommonMSG.MSG_WC_LOGOUT_TRY, 1)) elif cmd == CMD_TRY_RFID_SERVER: self._check_for_RFID_server() else: print('wcstatus: unrecognised cmd {}'.format(cmd)) return elif msgdesc == base.MSGD_COMMS_ARE_UP: # this happens when the stocky server or RFID server websocket first comes online. # Use it for setting status and some initial data caching. print("COMMS ARE UP: {}".format(whofrom)) if whofrom == self._server_ws: self.set_WS_state(True) self.refresh_locmut_dct() elif whofrom == self._rfid_ws: print("MSG FROM RFID server!!") elif msgdesc == base.MSGD_COMMS_ARE_DOWN: # this happens when the stocky server crashes, taking # the websocket connection with it print("COMMS ARE DOWN: {}".format(whofrom)) if whofrom == self._server_ws: self.set_WS_state(False) elif whofrom == self._rfid_ws: self._rfid_ws = None self.set_RFID_state(RFID_OFF) def
<filename>regparser/grammar/amdpar.py # -*- coding: utf-8 -*- # @todo: this file is becoming too large; refactor import logging import string import attr from pyparsing import (CaselessLiteral, FollowedBy, LineEnd, Literal, OneOrMore, Optional, QuotedString, Suppress, Word, ZeroOrMore) from six.moves import reduce from regparser.grammar import atomic, tokens, unified from regparser.grammar.utils import Marker, QuickSearchable, WordBoundaries from regparser.tree.paragraph import hash_for_paragraph, p_levels from regparser.tree.reg_text import subjgrp_label logger = logging.getLogger(__name__) intro_text_marker = ( (Marker("introductory") + WordBoundaries(CaselessLiteral("text"))) | (Marker("subject") + Marker("heading")).setParseAction(lambda _: "text") ) of_connective = (Marker("of") | Marker("for") | Marker("to")) passive_marker = ( Marker("is") | Marker("are") | Marker("was") | Marker("were") | Marker("and").setResultsName("and_prefix").setParseAction( lambda _: True)) and_token = Marker("and").setParseAction(lambda _: tokens.AndToken()) # Verbs def generate_verb(word_list, verb, active): """Short hand for making tokens.Verb from a list of trigger words""" word_list = [CaselessLiteral(w) for w in word_list] if not active: word_list = [passive_marker + w for w in word_list] grammar = reduce(lambda l, r: l | r, word_list) grammar = WordBoundaries(grammar) grammar = grammar.setParseAction( lambda m: tokens.Verb(verb, active, bool(m.and_prefix))) return grammar put_active = generate_verb( ['revising', 'revise', 'correcting', 'correct'], tokens.Verb.PUT, active=True) put_passive = generate_verb( ['revised', 'corrected'], tokens.Verb.PUT, active=False) post_active = generate_verb(['adding', 'add'], tokens.Verb.POST, active=True) post_passive = generate_verb(['added'], tokens.Verb.POST, active=False) delete_active = generate_verb( ['removing', 'remove'], tokens.Verb.DELETE, active=True) delete_passive = generate_verb(['removed'], tokens.Verb.DELETE, active=False) move_active = generate_verb( ['redesignating', 'redesignate'], tokens.Verb.MOVE, active=True) move_passive = generate_verb(['redesignated'], tokens.Verb.MOVE, active=False) designate_active = generate_verb( ['designate'], tokens.Verb.DESIGNATE, active=True) reserve_active = generate_verb(['reserve', 'reserving'], tokens.Verb.RESERVE, active=True) insert_in_order = Literal("[insert-in-order]").setParseAction( lambda m: tokens.Verb(tokens.Verb.INSERT, active=True)) # Context context_certainty = Optional( Marker("in") | Marker("to") | Marker("of") | ( Marker("under") + Optional( Marker("subheading")))).setResultsName("certain") interp = ( context_certainty + atomic.comment_marker + unified.marker_part ).setParseAction( lambda m: tokens.Context([m.part, 'Interpretations'], bool(m.certain))) # This may be a regtext paragraph or it may be an interpretation paragraph_context = ( atomic.section + unified.depth1_p + ~ FollowedBy("-") ).setParseAction( lambda m: tokens.Context([None, None, m.section, m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6])) def _paren_join(elements): return '(' + ')('.join(el for el in elements if el) + ')' def _paren_from_match(match): values = [match.p1, match.p2, match.p3, match.p4, match.plaintext_p5, match.plaintext_p6] return _paren_join(values) marker_subpart = ( context_certainty + unified.marker_subpart ).setParseAction( lambda m: tokens.Context([None, 'Subpart:' + m.subpart], bool(m.certain))) comment_context_with_section = ( context_certainty + # Confusingly, these are sometimes "comments", sometimes "paragraphs" (Marker("comment") | Marker("paragraph")) + atomic.section + unified.depth1_p + ~ FollowedBy("-") ).setParseAction(lambda m: tokens.Context( [None, 'Interpretations', m.section, _paren_from_match(m)], bool(m.certain))) # Mild modification of the above; catches "under 2(b)" comment_context_under_with_section = ( Marker("under") + atomic.section + unified.depth1_p ).setParseAction(lambda m: tokens.Context( [None, 'Interpretations', m.section, _paren_from_match(m)], True)) comment_context_without_section = ( context_certainty + atomic.paragraph_marker + unified.depth2_p ).setParseAction( lambda m: tokens.Context( [None, 'Interpretations', None, _paren_join([m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6])], bool(m.certain))) appendix = ( context_certainty + unified.marker_appendix + Optional(Marker("to") + unified.marker_part) ).setParseAction( lambda m: tokens.Context([m.part, 'Appendix:' + m.appendix], bool(m.certain))) section = ( context_certainty + atomic.section_marker + unified.part_section ).setParseAction( lambda m: tokens.Context([m.part, None, m.section], bool(m.certain))) # Paragraph components (used when not replacing the whole paragraph) section_heading = Marker("heading").setParseAction( lambda _: tokens.Paragraph([], field=tokens.Paragraph.HEADING_FIELD)) intro_text = intro_text_marker.copy().setParseAction( lambda _: tokens.Paragraph([], field=tokens.Paragraph.TEXT_FIELD)) # Paragraphs comment_p = ( Word(string.digits).setResultsName("level2") + Optional( Suppress(".") + Word("ivxlcdm").setResultsName('level3') + Optional( Suppress(".") + Word(string.ascii_uppercase).setResultsName("level4")))) section_heading_of = ( Marker("heading") + of_connective + unified.marker_part_section ).setParseAction( lambda m: tokens.Paragraph.make(part=m.part, section=m.section, field=tokens.Paragraph.HEADING_FIELD)) section_paragraph_heading_of = ( Marker("heading") + of_connective + (atomic.paragraph_marker | Marker("comment")) + atomic.section + unified.depth1_p ).setParseAction( lambda m: tokens.Paragraph.make( is_interp=True, section=m.section, paragraphs=[_paren_join([m.p1, m.p2, m.p3, m.p4, m.p5])], field=tokens.Paragraph.HEADING_FIELD)) appendix_subheading = ( Marker("subheading") + unified.marker_appendix ).setParseAction( # Use '()' to pad the label out to what's expected of interpretations lambda m: tokens.Paragraph.make( is_interp=True, section=m.appendix, paragraphs=['()'], field=tokens.Paragraph.HEADING_FIELD)) paragraph_heading_of = ( Marker("heading") + of_connective + unified.marker_paragraph.copy() ).setParseAction( lambda m: tokens.Paragraph.make( paragraphs=[m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6], field=tokens.Paragraph.KEYTERM_FIELD)) comment_heading = ( Marker("heading") + Optional(of_connective) + atomic.section + unified.depth1_p ).setParseAction( lambda m: tokens.Paragraph.make( is_interp=True, section=m.section, paragraphs=[_paren_join([m.p1, m.p2, m.p3, m.p4, m.p5])], field=tokens.Paragraph.HEADING_FIELD)) # e.g. "introductory text of paragraph (a)(5)(ii)" intro_text_of = ( intro_text_marker + of_connective + atomic.paragraph_marker + unified.depth1_p ).setParseAction( lambda m: tokens.Paragraph.make( paragraphs=[m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6], field=tokens.Paragraph.TEXT_FIELD)) intro_text_of_interp = ( intro_text_marker + of_connective + atomic.paragraph_marker + comment_p ).setParseAction( lambda m: tokens.Paragraph.make( is_interp=True, paragraphs=[None, m.level2, m.level3, m.level4], field=tokens.Paragraph.TEXT_FIELD)) single_par = ( unified.marker_paragraph + Optional(intro_text_marker) ).setParseAction( lambda m: tokens.Paragraph.make( paragraphs=[m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6], field=(tokens.Paragraph.TEXT_FIELD if m[-1] == 'text' else None))) section_single_par = ( unified.marker_part_section + unified.depth1_p + Optional(intro_text_marker) ).setParseAction( lambda m: tokens.Paragraph.make( part=m.part, section=m.section, paragraphs=[m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6], field=(tokens.Paragraph.TEXT_FIELD if m[-1] == 'text' else None))) # Matches "paragraph (a)(1)(i) of § 12.44" single_par_section = ( Optional(atomic.paragraph_marker) + unified.depth1_p + of_connective + unified.marker_part_section ).setParseAction( lambda m: tokens.Paragraph.make( part=m.part, section=m.section, paragraphs=[m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6])) single_comment_with_section = ( (Marker("comment") | Marker("paragraph")) + atomic.section + unified.depth1_p + "-" + Optional("(") + comment_p + Optional(")") ).setParseAction( lambda m: tokens.Paragraph.make( is_interp=True, section=m.section, paragraphs=[_paren_from_match(m), m.level2, m.level3, m.level4])) single_comment_par = ( atomic.paragraph_marker + comment_p ).setParseAction( lambda m: tokens.Paragraph.make( is_interp=True, paragraphs=[None, m.level2, m.level3, m.level4])) # Token Lists def make_multiple(to_repeat): """Shorthand for handling repeated tokens ('and', ',', 'through')""" return ( (to_repeat + Optional(intro_text_marker)).setResultsName("head") + OneOrMore(( atomic.conj_phrases + to_repeat + Optional(intro_text_marker) ).setResultsName("tail", listAllMatches=True)) ) def _through_paren(prev_lab, next_lab): """Expand "through" for labels with embedded paragraphs (e.g. 12(c))""" lhs, rhs = prev_lab[-1], next_lab[-1] lhs_idx, rhs_idx = lhs.rindex('('), rhs.rindex('(') # Check if the previous and next labels are "through"-able. For example, # we can't compute A-14(a)(2) through B-14(a)(4) nor can we compute # A-14(a)(1) through A-14(b)(3) if lhs[:lhs_idx] != rhs[:rhs_idx] or prev_lab[:-1] != next_lab[:-1]: logger.warning("Bad use of 'through': %s %s", prev_lab, next_lab) return [] else: prefix = lhs[:lhs_idx + 1] lhs, rhs = lhs[lhs_idx + 1:-1], rhs[rhs_idx + 1:-1] for level in p_levels: if lhs in level and rhs in level: lidx, ridx = level.index(lhs), level.index(rhs) if lidx < ridx: return [tokens.Paragraph.make(prev_lab[:-1] + [prefix + level[i] + ')']) for i in range(lidx + 1, ridx)] logger.warning("Error with 'through': %s %s", prev_lab, next_lab) return [] def _through_sect(prev_lab, next_lab): """Expand "through" for labels ending in a section number.""" return [tokens.Paragraph.make(prev_lab[:2] + [str(i)]) for i in range(int(prev_lab[-1]) + 1, int(next_lab[-1]))] def _through_paragraph(prev_lab, next_lab): """Expand "through" for labels ending in a paragraph.""" depth = len(prev_lab) start = p_levels[depth - 4].index(prev_lab[-1]) + 1 end = p_levels[depth - 4].index(next_lab[-1]) return [tokens.Paragraph.make(prev_lab[:depth - 1] + [p_levels[depth - 4][i]]) for i in range(start, end)] def make_par_list(listify, force_text_field=False): """Shorthand for turning a pyparsing match into a tokens.Paragraph""" def curried(match=None): pars = [] matches = [match.head] + list(match.tail) for match in matches: match_as_list = listify(match) next_par = tokens.Paragraph.make(match_as_list) next_lab = next_par.label if match[-1] == 'text' or force_text_field: next_par = attr.assoc(next_par, field=tokens.Paragraph.TEXT_FIELD) if match.through: # Iterate through, creating paragraph tokens prev_lab = pars[-1].label if '(' in prev_lab[-1] and '(' in next_lab[-1]: pars.extend(_through_paren(prev_lab, next_lab)) elif len(prev_lab) == 3: pars.extend(_through_sect(prev_lab, next_lab)) elif len(prev_lab) > 3: pars.extend(_through_paragraph(prev_lab, next_lab)) pars.append(next_par) return tokens.TokenList(pars) return curried multiple_sections = ( atomic.sections_marker + make_multiple(unified.part_section) ).setParseAction(make_par_list(lambda m: [m.part, None, m.section])) multiple_paragraph_sections = ( atomic.section_marker + make_multiple(Optional(unified.part_section) + unified.any_depth_p) ).setParseAction(make_par_list(lambda m: [ m.part, None, m.section, m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6])) appendix_section = unified.appendix_with_section.copy().setParseAction( lambda m: tokens.Paragraph.make(appendix=m.appendix, section=m.appendix_section)) appendix_section_heading_of = ( Marker("heading") + of_connective + unified.appendix_with_section ).copy().setParseAction( lambda m: tokens.Paragraph.make( appendix=m.appendix, section=m.appendix_section, field=tokens.Paragraph.HEADING_FIELD)) multiple_appendices = make_multiple( unified.appendix_with_section ).setParseAction(make_par_list( lambda m: [None, 'Appendix:' + m.appendix, m.appendix_section])) multiple_comment_pars = ( atomic.paragraphs_marker + make_multiple(comment_p) ).setParseAction(make_par_list(lambda m: [ None, 'Interpretations', None, None, m.level2, m.level3, m.level4])) # Not a context as one wouldn't list these for contextual purposes multiple_comments = ( Marker("comments") + make_multiple(atomic.section + unified.depth1_p) ).setParseAction(make_par_list(lambda m: [ None, 'Interpretations', m.section, _paren_from_match(m)])) multiple_interp_entries = ( Marker("entries") + Marker("for") + (atomic.section + unified.depth1_p).setResultsName("head") + OneOrMore(( atomic.conj_phrases + unified.any_depth_p ).setResultsName("tail", listAllMatches=True)) ).setParseAction(make_par_list( lambda m: [None, None, m.section, m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6])) multiple_paragraphs = ( (atomic.paragraphs_marker | atomic.paragraph_marker) + make_multiple(unified.any_depth_p) ).setParseAction(make_par_list(lambda m: [ m.part, None, m.section, m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6])) # e.g. "introductory text of paragraphs (a)(5)(ii) and (d)(5)(ii)" multiple_intro_text_of = ( intro_text_marker + of_connective + atomic.paragraphs_marker + make_multiple(unified.any_depth_p) ).setParseAction(make_par_list( lambda m: [None, None, None, m.p1, m.p2, m.p3, m.p4, m.plaintext_p5, m.plaintext_p6], force_text_field=True)) def tokenize_override_ps(match): """ Create token.Paragraphs for the given override match """ # Part, Section or Appendix, p1, p2, p3, p4, p5, p6 match_list = list(match) par_list = [match.part, None, None, None, None, None, None, None] if match.section: par_list[1] = match.section elif match.appendix: par_list[1] = "Appendix:" + match.appendix # Set paragraph depths for p in match_list[2:]: par_list[match_list.index(p)] = p par = tokens.Paragraph.make(par_list) return [par] _keyterm_label_part = ( Suppress(Marker("keyterm")) + QuotedString(quoteChar='(', endQuoteChar=')') ).setParseAction(lambda m: "p{0}".format(hash_for_paragraph(m[0]))) _simple_label_part = Word(string.ascii_lowercase + string.ascii_uppercase + string.digits) _label_part = _keyterm_label_part | _simple_label_part override_label = ( Suppress("[") + Marker("label") + Suppress(":") + atomic.part + Suppress("-") + (atomic.section | atomic.appendix) + ZeroOrMore(Suppress("-") + _label_part) + Suppress("]") ).setParseAction(tokenize_override_ps) # Looks like: [subject-group(Some text Goes Here)] subject_group = ( context_certainty + Suppress("[subject-group") + QuotedString(quoteChar='(', endQuoteChar=')').setResultsName("subgroup") + Suppress("]") ).setParseAction(lambda m: tokens.Context( [None, 'Subjgrp:' + subjgrp_label(m.subgroup, [])], bool(m.certain))) # Phrases like '“Nonimmigrant visa”' become 'p12345678' _double_quote_label = QuotedString( quoteChar=u'“', endQuoteChar=u'”' ).setParseAction(lambda m: "p{0}".format(hash_for_paragraph(m[0]))) # Phrases like
518918400), (3839, 122, 17, 983, 207567360), (3840, 123, 17, 1073, 389188800), (3841, 124, 17, 127, 13343616), (3842, 125, 17, 12911, 544864320), (3843, 126, 17, -5693, 518918400), (3844, 127, 17, -37381, 4670265600), (3845, 72, 18, 419, 2113413120), (3846, 73, 18, 6133, 6706022400), (3847, 74, 18, 1087, 958003200), (3848, 75, 18, -205, 332107776), (3849, 76, 18, -26027, 10674892800), (3850, 77, 18, -4649, 2490808320), (3851, 78, 18, -461, 1162377216), (3852, 79, 18, 37391, 348713164800), (3853, 80, 18, 6703, 1779148800), (3854, 81, 18, 5807, 518918400), (3855, 82, 18, 12449, 1660538880), (3856, 83, 18, -118087, 37362124800), (3857, 84, 18, -28051, 12454041600), (3858, 85, 18, 2269, 1135134000), (3859, 86, 18, 14383, 1037836800), (3860, 87, 18, 1069, 59304960), (3861, 88, 18, -3137, 29059430400), (3862, 89, 18, 6817, 12454041600), (3863, 90, 18, 41, 53222400), (3864, 91, 18, -626359, 43589145600), (3865, 92, 18, 102953, 20756736000), (3866, 93, 18, 4471, 415134720), (3867, 94, 18, 26863, 5811886080), (3868, 95, 18, -787, 24908083200), (3869, 96, 18, 551, 197683200), (3870, 97, 18, 1057, 74131200), (3871, 98, 18, -2803, 968647680), (3872, 99, 18, -5333, 372556800), (3873, 100, 18, -68597, 5811886080), (3874, 101, 18, 4951, 943488000), (3875, 102, 18, 149, 79073280), (3876, 103, 18, -318893, 21794572800), (3877, 104, 18, -59, 8515584), (3878, 105, 18, 76777, 8717829120), (3879, 106, 18, 30497, 2075673600), (3880, 107, 18, -213949, 29059430400), (3881, 108, 18, 444607, 29059430400), (3882, 109, 18, -293609, 29059430400), (3883, 110, 18, 37127, 12454041600), (3884, 111, 18, 257, 88957440), (3885, 112, 18, -3277, 5811886080), (3886, 113, 18, 4451, 2874009600), (3887, 114, 18, -6569, 1037836800), (3888, 115, 18, -6079, 290594304), (3889, 116, 18, 5153, 830269440), (3890, 117, 18, 239, 118609920), (3891, 118, 18, -41, 53222400), (3892, 119, 18, 127, 10063872), (3893, 120, 18, -11183, 1383782400), (3894, 121, 18, -9643, 461260800), (3895, 122, 18, -101, 251596800), (3896, 123, 18, -118453, 9340531200), (3897, 124, 18, -6707, 968647680), (3898, 125, 18, 1973, 129153024), (3899, 126, 18, -7453, 1779148800), (3900, 127, 18, 3149, 209563200), (3901, 72, 19, 1627, 24908083200), (3902, 73, 19, 107, 620928000), (3903, 74, 19, -529, 1482624000), (3904, 75, 19, -13861, 7472424960), (3905, 76, 19, -2269, 830269440), (3906, 77, 19, -58021, 31135104000), (3907, 78, 19, -124067, 217945728000), (3908, 79, 19, -83, 2152550400), (3909, 80, 19, 43, 34594560), (3910, 81, 19, 839, 432432000), (3911, 82, 19, -19003, 4981616640), (3912, 83, 19, -269, 29189160), (3913, 84, 19, -697, 129729600), (3914, 85, 19, -257, 726485760), (3915, 86, 19, 3463, 691891200), (3916, 87, 19, 19559, 5448643200), (3917, 88, 19, -17011, 4151347200), (3918, 89, 19, -23, 56609280), (3919, 90, 19, 100117, 43589145600), (3920, 91, 19, -7781, 7264857600), (3921, 92, 19, 2269, 1297296000), (3922, 93, 19, 1901, 807206400), (3923, 94, 19, -22679, 12454041600), (3924, 95, 19, -191, 79833600), (3925, 96, 19, 953, 1297296000), (3926, 97, 19, 25447, 3632428800), (3927, 98, 19, 2749, 908107200), (3928, 99, 19, 647, 415134720), (3929, 100, 19, -2719, 3632428800), (3930, 101, 19, -527, 10378368000), (3931, 102, 19, -15493, 1585059840), (3932, 103, 19, -10783, 461260800), (3933, 104, 19, -12121, 778377600), (3934, 105, 19, -43943, 72648576000), (3935, 106, 19, -6439, 3962649600), (3936, 107, 19, -54211, 2905943040), (3937, 108, 19, 83, 159667200), (3938, 109, 19, 19967, 4843238400), (3939, 110, 19, 101, 79833600), (3940, 111, 19, 5363, 3632428800), (3941, 112, 19, 1927, 1779148800), (3942, 113, 19, 20693, 9340531200), (3943, 114, 19, 67, 134534400), (3944, 115, 19, -613, 1089728640), (3945, 116, 19, 2621, 4151347200), (3946, 117, 19, -1333, 249080832), (3947, 118, 19, -3127, 553512960), (3948, 119, 19, 1891, 444787200), (3949, 120, 19, -779, 4151347200), (3950, 121, 19, 60661, 14529715200), (3951, 122, 19, -4421, 1067489280), (3952, 123, 19, -807, 44844800), (3953, 124, 19, -22969, 1383782400), (3954, 125, 19, 4033, 13076743680), (3955, 126, 19, -81797, 10897286400), (3956, 127, 19, 661, 269068800), (3957, 72, 20, 703, 2724321600), (3958, 73, 20, 37391, 21794572800), (3959, 74, 20, 9161, 2075673600), (3960, 75, 20, 34631, 6227020800), (3961, 76, 20, 107, 29652480), (3962, 77, 20, 3937, 3113510400), (3963, 78, 20, 103721, 261534873600), (3964, 79, 20, 47549, 261534873600), (3965, 80, 20, 331, 69189120), (3966, 81, 20, 1933, 94348800), (3967, 82, 20, 1361, 46126080), (3968, 83, 20, 121, 7862400), (3969, 84, 20, 893, 1037836800), (3970, 85, 20, -503, 792529920), (3971, 86, 20, 199, 12972960), (3972, 87, 20, 83, 3706560), (3973, 88, 20, -53, 3459456), (3974, 89, 20, -3043, 103783680), (3975, 90, 20, -43, 5189184), (3976, 91, 20, -67, 1425600), (3977, 92, 20, 881, 148262400), (3978, 93, 20, 7099, 415134720), (3979, 94, 20, 1583, 148262400), (3980, 95, 20, -1567, 259459200), (3981, 96, 20, -5329, 807206400), (3982, 97, 20, 233, 25945920), (3983, 98, 20, -3277, 86486400), (3984, 99, 20, -7277, 86486400), (3985, 100, 20, -619, 18532800), (3986, 101, 20, 211, 15966720), (3987, 102, 20, 49, 1347840), (3988, 103, 20, 521, 19219200), (3989, 104, 20, 167, 47174400), (3990, 105, 20, 4201, 3353011200), (3991, 106, 20, 31, 1153152), (3992, 107, 20, -7493, 172972800), (3993, 108, 20, -9109, 86486400), (3994, 109, 20, -8101, 74131200), (3995, 110, 20, 601, 207567360), (3996, 111, 20, 17, 311351040), (3997, 112, 20, -359, 26611200), (3998, 113, 20, -7127, 518918400), (3999, 114, 20, -893, 51891840), (4000, 115, 20, -15641, 259459200), (4001, 116, 20, 4987, 415134720), (4002, 117, 20, 41, 3706560), (4003, 118, 20, -1381, 69189120), (4004, 119, 20, -47, 2217600), (4005, 120, 20, -7, 183040), (4006, 121, 20, -24977, 172972800), (4007, 122, 20, 15823, 1245404160), (4008, 123, 20, 5519, 311351040), (4009, 124, 20, 41, 15724800), (4010, 125, 20, 43181, 16345929600), (4011, 126, 20, -361, 19219200), (4012, 127, 20, -10289, 129729600), (4013, 72, 21, 4993, 5811886080), (4014, 73, 21, 78467, 14529715200), (4015, 74, 21, 20173, 1556755200), (4016, 75, 21, 6089, 415134720), (4017, 76, 21, 523, 69189120), (4018, 77, 21, 44189, 37362124800), (4019, 78, 21, 17, 419126400), (4020, 79, 21, 19711, 72648576000), (4021, 80, 21, 35183, 2075673600), (4022, 81, 21, 7373, 103783680), (4023, 82, 21, 213937, 2075673600), (4024, 83, 21, 12503, 207567360), (4025, 84, 21, 1102501, 87178291200), (4026, 85, 21, 108611, 43589145600), (4027, 86, 21, 3439, 51891840), (4028, 87, 21, 397, 2948400), (4029, 88, 21, 1537, 31449600), (4030, 89, 21, -23249, 990662400), (4031, 90, 21, 83, 5765760), (4032, 91, 21, -323, 5241600), (4033, 92, 21, 239, 10644480), (4034, 93, 21, 3233, 47174400), (4035, 94, 21, 7097, 115315200), (4036, 95, 21, 23, 1956864), (4037, 96, 21, -295523, 87178291200), (4038, 97, 21, 18971, 259459200), (4039, 98, 21, 1571, 86486400), (4040, 99, 21, -137251, 1210809600), (4041, 100, 21, -10151, 172972800), (4042, 101, 21, 22619, 518918400), (4043, 102, 21, 6277, 53222400), (4044, 103, 21, 6829, 74131200), (4045, 104, 21, 4561, 198132480), (4046, 105, 21, 35333, 2905943040), (4047, 106, 21, 16493, 103783680), (4048, 107, 21, 1315, 13837824), (4049, 108, 21, -11845, 145297152), (4050, 109, 21, -13009, 89689600), (4051, 110, 21, 5711, 415134720), (4052, 111, 21, 29053, 1556755200), (4053, 112, 21, -11159, 1037836800), (4054, 113, 21, -26683, 1452971520), (4055, 114, 21, -11867, 518918400), (4056, 115, 21, -30223, 259459200), (4057, 116, 21, 9941, 207567360), (4058, 117, 21, 5209, 69189120), (4059, 118, 21, 211, 26611200), (4060, 119, 21, -7493, 622702080), (4061, 120, 21, -433, 31449600), (4062, 121, 21, -2107, 9884160), (4063, 122, 21, 16633, 415134720), (4064, 123, 21, 4267, 74131200), (4065, 124, 21, 10463, 566092800), (4066, 125, 21, 7733, 396264960), (4067, 126, 21, 23, 985600), (4068, 127, 21, -744239, 10897286400), (4069, 72, 22, 23861, 29059430400), (4070, 73, 22, 1747, 372556800), (4071, 74, 22, 60019, 6227020800), (4072, 75, 22, 2879, 377395200), (4073, 76, 22, -12781, 24908083200), (4074, 77, 22, -11207, 2668723200), (4075, 78, 22, -16829, 8072064000), (4076, 79, 22, -12049, 58118860800), (4077, 80, 22, 6959, 415134720), (4078, 81, 22, 24047, 370656000), (4079, 82, 22, 69103, 830269440), (4080, 83, 22, 1585607, 43589145600), (4081, 84, 22, -23, 111196800), (4082, 85, 22, -30937, 54486432000), (4083, 86, 22, 18869, 259459200), (4084, 87, 22, 158101, 1037836800), (4085, 88, 22, 385001, 4843238400), (4086, 89, 22, 194297, 43589145600), (4087, 90, 22, 193, 4612608), (4088, 91, 22, -2281, 1037836800), (4089, 92, 22, 11449, 494208000), (4090, 93, 22, 5903, 86486400), (4091, 94, 22, 178897, 2905943040), (4092, 95, 22, 166609, 9686476800), (4093, 96, 22, 1559, 622702080), (4094, 97, 22, 3671,
display width of string values to at least 10 (it's annoying that SPSS displays e.g. a one-character variable in very narrow columns). This also sets all measurement levels to "unknown" and all variable alignments to "left". This function is only called if column widths, measurement levels and variable alignments are None.""" columnWidths = {} for varName, varType in self.varTypes.iteritems(): # zero = appropriate width determined by spss columnWidths[varName] = 10 if 0 < varType < 10 else 0 self.columnWidths = columnWidths self.measureLevels = dict([(v, "unknown") for v in self.varNames]) self.alignments = dict([(v, "left") for v in self.varNames]) @property @decode def alignments(self): """Get/Set VARIABLE ALIGNMENT. Returns/Takes a dictionary of the form {varName: alignment} Valid alignment values are: left, right, center. If used, variable alignment, measurement level and column width all need to be set. """ func = self.spssio.spssGetVarAlignment alignments = {0: "left", 1: "right", 2: "center"} alignment_ = c_int() varAlignments = {} for varName in self.varNames: vName = self.vNames[varName] retcode = func(c_int(self.fh), c_char_p(vName), byref(alignment_)) alignment = alignments[alignment_.value] varAlignments[varName] = alignment if retcode > 0: msg = ("Error getting variable alignment: %r" % varName) raise SPSSIOError(msg, retcode) return varAlignments @alignments.setter def alignments(self, varAlignments): if not varAlignments: return func = self.spssio.spssSetVarAlignment alignments = {"left": 0, "right": 1, "center": 2} for varName, varAlignment in varAlignments.iteritems(): if varAlignment.lower() not in alignments: msg = "Valid alignments are %" raise ValueError(msg % ", ".join(alignments.keys())) alignment = alignments.get(varAlignment.lower()) retcode = func(c_int(self.fh), c_char_p(varName), c_int(alignment)) if retcode > 0: msg = "Error setting variable alignment for variable %r" raise SPSSIOError(msg % varName, retcode) @property @decode def varSets(self): """Get/Set VARIABLE SET information. Returns/Takes a dictionary with SETNAME as keys and a list of SPSS variables as values. For example: {'SALARY': ['salbegin', 'salary'], 'DEMOGR': ['gender', 'minority', 'educ']}""" varSets = c_char_p() func = self.spssio.spssGetVariableSets retcode = func(c_int(self.fh), byref(varSets)) if retcode > 0: msg = "Problem getting variable set information" raise SPSSIOError(msg, retcode) if not varSets.value: return {} varSets_ = {} for varSet in varSets.value.split("\n")[:-1]: k, v = varSet.split("= ") varSets_[k] = v.split() # clean up self.freeMemory("spssFreeVariableSets", varSets) return varSets_ @varSets.setter def varSets(self, varSets): if not varSets: return varSets_ = [] for varName, varSet in varSets.iteritems(): varSets_.append("%s= %s" % (varName, " ".join(varSet))) varSets_ = c_char_p("\n".join(varSets_)) retcode = self.spssio.spssSetVariableSets(c_int(self.fh), varSets_) if retcode > 0: msg = "Problem setting variable set information" raise SPSSIOError(msg, retcode) @property @decode def varRoles(self): """Get/Set VARIABLE ROLES. Returns/Takes a dictionary of the form {varName: varRole}, where varRoles may be any of the following: 'both', 'frequency', 'input', 'none', 'partition', 'record ID', 'split', 'target'""" func = self.spssio.spssGetVarRole roles = {0: "input", 1: "target", 2: "both", 3: "none", 4: "partition", 5: "split", 6: "frequency", 7: "record ID"} varRoles = {} varRole_ = c_int() for varName in self.varNames: vName = self.vNames[varName] retcode = func(c_int(self.fh), c_char_p(vName), byref(varRole_)) varRole = roles.get(varRole_.value) varRoles[varName] = varRole if retcode > 0: msg = "Problem getting variable role for variable %r" raise SPSSIOError(msg % varName, retcode) return varRoles @varRoles.setter def varRoles(self, varRoles): if not varRoles: return roles = {"input": 0, "target": 1, "both": 2, "none": 3, "partition": 4, "split": 5, "frequency": 6, "record ID": 7} func = self.spssio.spssSetVarRole for varName, varRole in varRoles.iteritems(): varRole = roles.get(varRole) retcode = func(c_int(self.fh), c_char_p(varName), c_int(varRole)) if retcode > 0: msg = "Problem setting variable role %r for variable %r" raise SPSSIOError(msg % (varRole, varName), retcode) @property @decode def varAttributes(self): """Get/Set VARIABLE ATTRIBUTES. Returns/Takes dictionary of the form: {'var1': {'attr name x': 'attr value x','attr name y': 'attr value y'}, 'var2': {'attr name a': 'attr value a','attr name b': 'attr value b'}} """ # abbreviation for readability and speed func = self.spssio.spssGetVarAttributes # initialize arrays MAX_ARRAY_SIZE = 1000 attrNamesArr = (POINTER(c_char_p * MAX_ARRAY_SIZE))() attrValuesArr = (POINTER(c_char_p * MAX_ARRAY_SIZE))() attributes = {} for varName in self.varNames: vName = self.vNames[varName] # step 1: get array size nAttr = c_int() retcode = func(c_int(self.fh), c_char_p(vName), byref(attrNamesArr), byref(attrValuesArr), byref(nAttr)) if retcode > 0: msg = "@Problem getting attributes of variable %r (step 1)" raise SPSSIOError(msg % varName, retcode) # step 2: get attributes with arrays of proper size nAttr = c_int(nAttr.value) attrNamesArr = (POINTER(c_char_p * nAttr.value))() attrValuesArr = (POINTER(c_char_p * nAttr.value))() retcode = func(c_int(self.fh), c_char_p(vName), byref(attrNamesArr), byref(attrValuesArr), byref(nAttr)) if retcode > 0: msg = "Problem getting attributes of variable %r (step 2)" raise SPSSIOError(msg % varName, retcode) # get array contents if not nAttr.value: continue k, v, n = attrNamesArr[0], attrValuesArr[0], nAttr.value attribute = dict([(k[i], v[i]) for i in xrange(n)]) attributes[varName] = attribute # clean up args = (attrNamesArr, attrValuesArr, nAttr) self.freeMemory("spssFreeAttributes", *args) return attributes @varAttributes.setter def varAttributes(self, varAttributes): if not varAttributes: return func = self.spssio.spssSetVarAttributes for varName in self.varNames: attributes = varAttributes.get(varName) if not attributes: continue nAttr = len(attributes) attrNames = (c_char_p * nAttr)(*attributes.keys()) attrValues = (c_char_p * nAttr)(*attributes.values()) retcode = func(c_int(self.fh), c_char_p(varName), pointer(attrNames), pointer(attrValues), c_int(nAttr)) if retcode > 0: msg = "Problem setting variable attributes for variable %r" raise SPSSIOError(msg % varName, retcode) @property @decode def fileAttributes(self): """Get/Set DATAFILE ATTRIBUTES. Returns/Takes a dictionary of the form: {'attrName[1]': 'attrValue1', 'revision[1]': '2010-10-09', 'revision[2]': '2010-10-22', 'revision[3]': '2010-11-19'} """ # abbreviation for readability func = self.spssio.spssGetFileAttributes # step 1: get array size MAX_ARRAY_SIZE = 100 # assume never more than 100 file attributes attrNamesArr = (POINTER(c_char_p * MAX_ARRAY_SIZE))() attrValuesArr = (POINTER(c_char_p * MAX_ARRAY_SIZE))() nAttr = c_int() retcode = func(c_int(self.fh), byref(attrNamesArr), byref(attrValuesArr), byref(nAttr)) # step 2: get attributes with arrays of proper size nAttr = c_int(nAttr.value) attrNamesArr = (POINTER(c_char_p * nAttr.value))() attrValuesArr = (POINTER(c_char_p * nAttr.value))() retcode = func(c_int(self.fh), byref(attrNamesArr), byref(attrValuesArr), byref(nAttr)) if retcode > 0: raise SPSSIOError("Problem getting file attributes", retcode) # get array contents if not nAttr.value: return {} k, v = attrNamesArr[0], attrValuesArr[0] attributes = dict([(k[i], v[i]) for i in xrange(nAttr.value)]) # clean up args = (attrNamesArr, attrValuesArr, nAttr) self.freeMemory("spssFreeAttributes", *args) return attributes @fileAttributes.setter def fileAttributes(self, fileAttributes): if not fileAttributes: return attributes, valueLens = {}, [] for name, values in fileAttributes.iteritems(): valueLens.append(len(values)) for value in values: attributes[name] = value #nAttr = len(fileAttributes) nAttr = max(valueLens) # n elements per vector. But this may vary?? attrNames = (c_char_p * nAttr)(*attributes.keys()) attrValues = (c_char_p * nAttr)(*attributes.values()) func = self.spssio.spssSetFileAttributes retcode = func(c_int(self.fh), pointer(attrNames), pointer(attrValues), c_int(nAttr)) if retcode > 0: raise SPSSIOError("Problem setting file attributes", retcode) def _getMultRespDef(self, mrDef): """Get 'normal' multiple response defintions. This is a helper function for the multRespDefs getter function. A multiple response definition <mrDef> in the string format returned by the IO module is converted into a multiple response definition of the form multRespSet = {<setName>: {"setType": <setType>, "label": <lbl>, "varNames": <list_of_varNames>}}. SetType may be either 'D' (multiple dichotomy sets) or 'C' (multiple category sets). If setType is 'D', the multiple response definition also includes '"countedValue": countedValue'""" regex = "\$(?P<setName>\w+)=(?P<setType>[CD])\n?" m = re.search(regex + ".*", mrDef, re.I | re.U) if not m: return {} setType = m.group("setType") if setType == "C": # multiple category sets regex += " (?P<lblLen>\d+) (?P<lblVarNames>.+) ?\n?" matches = re.findall(regex, mrDef, re.I) setName, setType, lblLen, lblVarNames = matches[0] else: # multiple dichotomy sets regex += ("(?P<valueLen>\d+) (?P<countedValue>\w+)" + " (?P<lblLen>\d+) (?P<lblVarNames>.+) ?\n?") matches = re.findall(regex, mrDef, re.I | re.U) setName, setType, valueLen = matches[0][:3] countedValue, lblLen, lblVarNames = matches[0][3:] lbl = lblVarNames[:int(lblLen)] varNames = lblVarNames[int(lblLen):].split() multRespSet = {setName: {"setType": setType, "label": lbl, "varNames": varNames}} if setType == "D": multRespSet[setName]["countedValue"] = countedValue return multRespSet def _setMultRespDefs(self, multRespDefs): """Set 'normal' multiple response defintions. This is a helper function for the multRespDefs setter function. It translates the multiple response definition, specified as a dictionary, into a string that the IO module can use""" mrespDefs = [] for setName, rest in multRespDefs.iteritems(): print setName, rest, rest["setType"] if rest["setType"] not in ("C", "D"): continue rest["setName"] = setName mrespDef = "$%(setName)s=%(setType)s" % rest lblLen = len(rest["label"]) rest["lblLen"] = lblLen rest["varNames"] = " ".join(rest["varNames"]) tail = "%(varNames)s" if lblLen == 0 else "%(label)s %(varNames)s" if rest["setType"] == "C": # multiple category sets template = " %%(lblLen)s %s " % tail else: #
list() uval = packet.get_hex_uint8() while uval is not None: bytes.append(uval) uval = packet.get_hex_uint8() value_str = '0x' if g_byte_order == 'little': bytes.reverse() for byte in bytes: value_str += '%2.2x' % byte return '%s' % (value_str) def __str__(self): '''Dump the register info key/value pairs''' s = '' for key in self.info.keys(): if s: s += ', ' s += "%s=%s " % (key, self.info[key]) return s class Packet: """Class that represents a packet that contains string data""" def __init__(self, packet_str): self.str = packet_str def peek_char(self): ch = 0 if self.str: ch = self.str[0] return ch def get_char(self): ch = 0 if self.str: ch = self.str[0] self.str = self.str[1:] return ch def skip_exact_string(self, s): if self.str and self.str.startswith(s): self.str = self.str[len(s):] return True else: return False def get_thread_id(self, fail_value=-1): match = g_number_regex.match(self.str) if match: number_str = match.group(1) self.str = self.str[len(number_str):] return int(number_str, 0) else: return fail_value def get_hex_uint8(self): if self.str and len(self.str) >= 2 and self.str[ 0] in string.hexdigits and self.str[1] in string.hexdigits: uval = int(self.str[0:2], 16) self.str = self.str[2:] return uval return None def get_hex_uint16(self, byte_order): uval = 0 if byte_order == 'big': uval |= self.get_hex_uint8() << 8 uval |= self.get_hex_uint8() else: uval |= self.get_hex_uint8() uval |= self.get_hex_uint8() << 8 return uval def get_hex_uint32(self, byte_order): uval = 0 if byte_order == 'big': uval |= self.get_hex_uint8() << 24 uval |= self.get_hex_uint8() << 16 uval |= self.get_hex_uint8() << 8 uval |= self.get_hex_uint8() else: uval |= self.get_hex_uint8() uval |= self.get_hex_uint8() << 8 uval |= self.get_hex_uint8() << 16 uval |= self.get_hex_uint8() << 24 return uval def get_hex_uint64(self, byte_order): uval = 0 if byte_order == 'big': uval |= self.get_hex_uint8() << 56 uval |= self.get_hex_uint8() << 48 uval |= self.get_hex_uint8() << 40 uval |= self.get_hex_uint8() << 32 uval |= self.get_hex_uint8() << 24 uval |= self.get_hex_uint8() << 16 uval |= self.get_hex_uint8() << 8 uval |= self.get_hex_uint8() else: uval |= self.get_hex_uint8() uval |= self.get_hex_uint8() << 8 uval |= self.get_hex_uint8() << 16 uval |= self.get_hex_uint8() << 24 uval |= self.get_hex_uint8() << 32 uval |= self.get_hex_uint8() << 40 uval |= self.get_hex_uint8() << 48 uval |= self.get_hex_uint8() << 56 return uval def get_number(self, fail_value=-1): '''Get a number from the packet. The number must be in big endian format and should be parsed according to its prefix (starts with "0x" means hex, starts with "0" means octal, starts with [1-9] means decimal, etc)''' match = g_number_regex.match(self.str) if match: number_str = match.group(1) self.str = self.str[len(number_str):] return int(number_str, 0) else: return fail_value def get_hex_ascii_str(self, n=0): hex_chars = self.get_hex_chars(n) if hex_chars: return binascii.unhexlify(hex_chars) else: return None def get_hex_chars(self, n=0): str_len = len(self.str) if n == 0: # n was zero, so we need to determine all hex chars and # stop when we hit the end of the string of a non-hex character while n < str_len and self.str[n] in string.hexdigits: n = n + 1 else: if n > str_len: return None # Not enough chars # Verify all chars are hex if a length was specified for i in range(n): if self.str[i] not in string.hexdigits: return None # Not all hex digits if n == 0: return None hex_str = self.str[0:n] self.str = self.str[n:] return hex_str def get_hex_uint(self, byte_order, n=0): if byte_order == 'big': hex_str = self.get_hex_chars(n) if hex_str is None: return None return int(hex_str, 16) else: uval = self.get_hex_uint8() if uval is None: return None uval_result = 0 shift = 0 while uval is not None: uval_result |= (uval << shift) shift += 8 uval = self.get_hex_uint8() return uval_result def get_key_value_pairs(self): kvp = list() if ';' in self.str: key_value_pairs = string.split(self.str, ';') for key_value_pair in key_value_pairs: if len(key_value_pair): kvp.append(string.split(key_value_pair, ':')) return kvp def split(self, ch): return string.split(self.str, ch) def split_hex(self, ch, byte_order): hex_values = list() strings = string.split(self.str, ch) for str in strings: hex_values.append(Packet(str).get_hex_uint(byte_order)) return hex_values def __str__(self): return self.str def __len__(self): return len(self.str) g_thread_suffix_regex = re.compile(';thread:([0-9a-fA-F]+);') def get_thread_from_thread_suffix(str): if str: match = g_thread_suffix_regex.match(str) if match: return int(match.group(1), 16) return None def cmd_qThreadStopInfo(options, cmd, args): packet = Packet(args) tid = packet.get_hex_uint('big') print "get_thread_stop_info (tid = 0x%x)" % (tid) def cmd_stop_reply(options, cmd, args): print "get_last_stop_info()" return False def rsp_stop_reply(options, cmd, cmd_args, rsp): global g_byte_order packet = Packet(rsp) stop_type = packet.get_char() if stop_type == 'T' or stop_type == 'S': signo = packet.get_hex_uint8() key_value_pairs = packet.get_key_value_pairs() for key_value_pair in key_value_pairs: key = key_value_pair[0] if is_hex_byte(key): reg_num = Packet(key).get_hex_uint8() if reg_num < len(g_register_infos): reg_info = g_register_infos[reg_num] key_value_pair[0] = reg_info.name() key_value_pair[1] = reg_info.get_value_from_hex_string( key_value_pair[1]) elif key == 'jthreads' or key == 'jstopinfo': key_value_pair[1] = binascii.unhexlify(key_value_pair[1]) key_value_pairs.insert(0, ['signal', signo]) print 'stop_reply():' dump_key_value_pairs(key_value_pairs) elif stop_type == 'W': exit_status = packet.get_hex_uint8() print 'stop_reply(): exit (status=%i)' % exit_status elif stop_type == 'O': print 'stop_reply(): stdout = "%s"' % packet.str def cmd_unknown_packet(options, cmd, args): if args: print "cmd: %s, args: %s", cmd, args else: print "cmd: %s", cmd return False def cmd_qSymbol(options, cmd, args): if args == ':': print 'ready to serve symbols' else: packet = Packet(args) symbol_addr = packet.get_hex_uint('big') if symbol_addr is None: if packet.skip_exact_string(':'): symbol_name = packet.get_hex_ascii_str() print 'lookup_symbol("%s") -> symbol not available yet' % (symbol_name) else: print 'error: bad command format' else: if packet.skip_exact_string(':'): symbol_name = packet.get_hex_ascii_str() print 'lookup_symbol("%s") -> 0x%x' % (symbol_name, symbol_addr) else: print 'error: bad command format' def rsp_qSymbol(options, cmd, cmd_args, rsp): if len(rsp) == 0: print "Unsupported" else: if rsp == "OK": print "No more symbols to lookup" else: packet = Packet(rsp) if packet.skip_exact_string("qSymbol:"): symbol_name = packet.get_hex_ascii_str() print 'lookup_symbol("%s")' % (symbol_name) else: print 'error: response string should start with "qSymbol:": respnse is "%s"' % (rsp) def cmd_qXfer(options, cmd, args): # $qXfer:features:read:target.xml:0,1ffff#14 print "read target special data %s" % (args) return True def rsp_qXfer(options, cmd, cmd_args, rsp): data = string.split(cmd_args, ':') if data[0] == 'features': if data[1] == 'read': filename, extension = os.path.splitext(data[2]) if extension == '.xml': response = Packet(rsp) xml_string = response.get_hex_ascii_str() ch = xml_string[0] if ch == 'l': xml_string = xml_string[1:] xml_root = ET.fromstring(xml_string) for reg_element in xml_root.findall("./feature/reg"): if not 'value_regnums' in reg_element.attrib: reg_info = RegisterInfo([]) if 'name' in reg_element.attrib: reg_info.info[ 'name'] = reg_element.attrib['name'] else: reg_info.info['name'] = 'unspecified' if 'encoding' in reg_element.attrib: reg_info.info['encoding'] = reg_element.attrib[ 'encoding'] else: reg_info.info['encoding'] = 'uint' if 'offset' in reg_element.attrib: reg_info.info[ 'offset'] = reg_element.attrib['offset'] if 'bitsize' in reg_element.attrib: reg_info.info[ 'bitsize'] = reg_element.attrib['bitsize'] g_register_infos.append(reg_info) print 'XML for "%s":' % (data[2]) ET.dump(xml_root) def cmd_A(options, cmd, args): print 'launch process:' packet = Packet(args) while True: arg_len = packet.get_number() if arg_len == -1: break if not packet.skip_exact_string(','): break arg_idx = packet.get_number() if arg_idx == -1: break if not packet.skip_exact_string(','): break arg_value = packet.get_hex_ascii_str(arg_len) print 'argv[%u] = "%s"' % (arg_idx, arg_value) def cmd_qC(options, cmd, args): print "query_current_thread_id()" def rsp_qC(options, cmd, cmd_args, rsp): packet = Packet(rsp) if packet.skip_exact_string("QC"): tid = packet.get_thread_id() print "current_thread_id = %#x" % (tid) else: print "current_thread_id = old thread ID" def cmd_query_packet(options, cmd, args): if args: print "%s%s" % (cmd, args) else: print "%s" % (cmd) return False def rsp_ok_error(rsp): print "rsp: ", rsp def rsp_ok_means_supported(options, cmd, cmd_args, rsp): if rsp == 'OK': print "%s%s is supported" % (cmd, cmd_args) elif rsp == '': print "%s%s is not supported" % (cmd, cmd_args) else: print "%s%s -> %s" % (cmd, cmd_args, rsp) def rsp_ok_means_success(options, cmd, cmd_args, rsp): if rsp == 'OK': print "success" elif rsp == '': print "%s%s is not supported" % (cmd, cmd_args) else: print "%s%s -> %s" % (cmd, cmd_args, rsp) def dump_key_value_pairs(key_value_pairs): max_key_len = 0 for key_value_pair in key_value_pairs: key_len = len(key_value_pair[0]) if max_key_len < key_len: max_key_len = key_len for key_value_pair in key_value_pairs: key = key_value_pair[0] value = key_value_pair[1] print "%*s = %s" % (max_key_len, key, value) def rsp_dump_key_value_pairs(options, cmd, cmd_args, rsp): if rsp: print '%s response:' % (cmd) packet = Packet(rsp) key_value_pairs = packet.get_key_value_pairs() dump_key_value_pairs(key_value_pairs) else: print "not supported" def cmd_c(options, cmd, args): print "continue()" return False def cmd_s(options, cmd, args): print "step()" return False def cmd_vCont(options, cmd, args): if args == '?': print "%s: get supported extended continue modes" % (cmd) else: got_other_threads = 0 s = '' for thread_action in string.split(args[1:], ';'): (short_action, thread) = string.split(thread_action, ':') tid = int(thread, 16) if short_action == 'c': action = 'continue' elif short_action ==
cmds.manipScaleContext('Scale', q=True, cah=True) active_list = scl_move_active_list if mode == 1: if maya_ver >= 2015: handle_id = cmds.manipRotateContext('Rotate', q=True, cah=True) active_list = rot_active_list if mode == 2: if maya_ver >= 2015: handle_id = cmds.manipMoveContext('Move', q=True, cah=True) active_list = scl_move_active_list #print 'handle id :', handle_id for i, but in enumerate(self.all_axis_but_list[mode][0:3]): if ommit_manip_link: continue #print i, mode, handle_id #print 'check xyz but active :', active_list[handle_id][i] try: but.setChecked(active_list[handle_id][i]) except: return if keep: self.keep_srt_select(mode=mode) except Exception as e: #print e.message pass #マニピュレータコンテキストを初期化 pre_type = None def set_up_manip(self): #print 'set_up_manip' try: if cmds.selectMode(q=True, o=True): sel = cmds.ls(sl=True, l=True) if sel: type = cmds.nodeType(sel[-1]) else: type = None if maya_ver >=2015: cmds.manipScaleContext('Scale', e=True, prd=(lambda : set_child_comp(mode=True), type),#ドラッグ前に実行 pod=(self.editing_manip, type),#ドラッグ後に実行 prc=(self.select_from_current_context))#ツールを開始したときに実行 cmds.manipRotateContext('Rotate', e=True, prd=(lambda : set_child_comp(mode=True), type),#ドラッグ前に実行 pod=(self.editing_manip, type),#ドラッグ後に実行 prc=(self.select_from_current_context))#ツールを開始したときに実行 cmds.manipMoveContext('Move', e=True, prd=(lambda : set_child_comp(mode=True), type),#ドラッグ前に実行 pod=(self.editing_manip, type),#ドラッグ後に実行 prc=(self.select_from_current_context))#ツールを開始したときに実行 else: cmds.manipScaleContext('Scale', e=True, prd=(lambda : set_child_comp(mode=True), type),#ドラッグ前に実行 pod=(self.editing_manip, type))#ドラッグ後に実行 cmds.manipRotateContext('Rotate', e=True, prd=(lambda : set_child_comp(mode=True), type),#ドラッグ前に実行 pod=(self.editing_manip, type))#ドラッグ後に実行 cmds.manipMoveContext('Move', e=True, prd=(lambda : set_child_comp(mode=True), type),#ドラッグ前に実行 pod=(self.editing_manip, type))#ドラッグ後に実行 if cmds.selectMode(q=True, co=True): sel = cmds.ls(sl=True, l=True) if sel: #複数のコンポーネントタイプに対応Podはリストの最後のタイプでないとだめみたい type = cmds.nodeType(sel[-1]) #print 'check sel type :', type, self.pre_type else: type = self.pre_type if maya_ver >=2015: cmds.manipScaleContext('Scale', e=True, pod=(self.editing_manip, type), prc=(self.select_from_current_context)) cmds.manipRotateContext('Rotate', e=True, pod=(self.editing_manip, type), prc=(self.select_from_current_context)) cmds.manipMoveContext('Move', e=True, pod=(self.editing_manip, type), prc=(self.select_from_current_context)) else: cmds.manipScaleContext('Scale', e=True, pod=(self.editing_manip, type)) cmds.manipRotateContext('Rotate', e=True, pod=(self.editing_manip, type)) cmds.manipMoveContext('Move', e=True, pod=(self.editing_manip, type)) except Exception as e: #print 'set up manip error :', e.message return target_tool_list = ['scaleSuperContext', 'RotateSuperContext', 'moveSuperContext', 'selectSuperContext'] if self.pre_type != type: #print 'change set tool', type, self.pre_type current_tool = cmds.currentCtx() if current_tool in target_tool_list: #cmds.setToolTo('selectSuperContext') cmds.setToolTo(current_tool) #cmds.select(sel, r=True) self.pre_type = type def blank(self): pass #マニプ設定を素にもどす def reset_manip(self): #print 'reset_manip' try: sel = cmds.ls(sl=True, l=True) if sel: type = cmds.nodeType(sel[-1]) else: type = None if maya_ver >=2015: cmds.manipScaleContext('Scale', e=True, prd=(self.blank, type),#ドラッグ前に実行 pod=(self.blank, type),#ドラッグ後に実行 prc=(self.blank))#ツールを開始したときに実行 cmds.manipRotateContext('Rotate', e=True, prd=(self.blank, type),#ドラッグ前に実行 pod=(self.blank, type),#ドラッグ後に実行 prc=(self.blank))#ツールを開始したときに実行 cmds.manipMoveContext('Move', e=True, prd=(self.blank, type),#ドラッグ前に実行 pod=(self.blank, type),#ドラッグ後に実行 prc=(self.blank))#ツールを開始したときに実行 else: cmds.manipScaleContext('Scale', e=True, prd=(self.blank, type),#ドラッグ前に実行 pod=(self.blank, type))#ドラッグ後に実行 cmds.manipRotateContext('Rotate', e=True, prd=(self.blank, type),#ドラッグ前に実行 pod=(self.blank, type))#ドラッグ後に実行 cmds.manipMoveContext('Move', e=True, prd=(self.blank, type),#ドラッグ前に実行 pod=(self.blank, type))#ドラッグ後に実行 except Exception as e: #print 'set up manip error :', e.message #self.error_avoidance() return target_tool_list = ['scaleSuperContext', 'RotateSuperContext', 'moveSuperContext', 'selectSuperContext'] current_tool = cmds.currentCtx() if current_tool in target_tool_list: cmds.setToolTo(current_tool) #直接podから実行すると落ちるのでシグナル経由で更新関数実行 def reload_srt(self): sisidebar_sub.get_matrix() #メッシュ編集後に値を反映するシグナル reload = Signal() #スロット,postDragCommand(pod)と接続 def editing_manip(self): #print 'editing manip' try: if uni_vol_dict[view_but.text()] != -1 and select_scale.isChecked(): #print 'volmode' mode = uni_vol_dict[view_but.text()] #print mode sisidebar_sub.set_vol_mode(mode) self.pre_vol_id = uni_vol_dict[view_but.text()] #sisidebar_sub.volume_scaling(mode) cmds.evalDeferred(sisidebar_sub.volume_scaling) except Exception as e:#2018up2以降のウィンドウ閉じた不具合対応 print 'editing_manip error :', e.message cmds.evalDeferred( self.error_avoidance) return if maya_ver >= 2015: self.select_xyz_from_manip() else: #2014以前はアンドゥインフォから強引に軸を取得する cmds.evalDeferred(sisidebar_sub.current_handle_getter) self.reload.emit() # if ommit_manip_link: current_tool = cmds.currentCtx() tools_list = ['scaleSuperContext', 'RotateSuperContext', 'moveSuperContext'] try: #print 'Froce select handle' mode = tools_list.index(current_tool) self.select_manip_handle(mode=mode) except Exception as e: print e.message pass #センター一致を実行する→culcのget_matrix時に実行するように変更 def init_save(self): temp = __name__.split('.') self.dir_path = os.path.join( os.getenv('MAYA_APP_dir'), 'Scripting_Files') self.w_file = self.dir_path+'/'+temp[-1]+'_window_'+str(maya_ver)+'.json' def load(self, init_pos=False): #print 'load data' save_data = read_save_file(init_pos=init_pos) if maya_ver >= 2015: offset_w = -8 offset_h = -31 else: offset_w = 0 offset_h = 0 self.pw = save_data['pw'] + offset_w#誤差補正 self.ph = save_data['ph'] + offset_h#誤差補正 self.sw = save_data['sw'] self.sh = save_data['sh'] self.move(self.pw, self.ph) self.resize(self.sw, self.sh) self.dockable=True, self.area=save_data['area'], self.floating=save_data['floating'], self.width=save_data['sw'], self.height=save_data['sh'] try: self.ui_col = save_data['ui_col'] except: self.ui_col = 0 try: self.uni_obj_mode = save_data['vol_obj'] self.uni_cmp_mode = save_data['vol_cmp'] except: self.uni_obj_mode = -1 self.uni_cmp_mode = -1 global destroy_flag try: destroy_flag = save_data['destroy'] except: destroy_flag = False global evolution_flag try: evolution_flag = save_data['evolution'] if evolution_flag: global destroy_name destroy_name = 'Evolution' except: evolution_flag = False #print destroy_flag return save_data def save(self, display=True): #print 'save' if not os.path.exists(self.dir_path): os.makedirs(self.dir_path) save_data = {} save_data['display'] = display if maya_ver >= 2015: save_data['dockable'] = self.isDockable() else: save_data['dockable'] = False global maya_ver if maya_ver >= 2017: save_data['floating'] = cmds.workspaceControl(u'SiSideBarWorkspaceControl',q=True, fl=True) elif maya_ver >= 2015: save_data['floating'] = self.isFloating() else: save_data['floating'] = True if maya_ver >= 2015: save_data['area'] = self.dockArea() else: save_data['area'] = None #print 'dock area', self.dockArea() if save_data['dockable'] is True: dock_dtrl = self.parent() pos = dock_dtrl.mapToGlobal(QPoint(0, 0)) else: pos = self.pos() size = self.size() save_data['pw'] = pos.x() save_data['ph'] = pos.y() save_data['sw'] = size.width() save_data['sh'] = size.height() #print 'save ui col :', self.ui_col save_data['ui_col'] = self.ui_col save_data['vol_obj'] = self.uni_obj_mode save_data['vol_cmp'] = self.uni_cmp_mode #print 'save data :', save_data global destroy_flag save_data['destroy'] = destroy_flag global evolution_flag save_data['evolution'] = evolution_flag #print destroy_flag if not os.path.exists(self.dir_path): os.makedirs(self.dir_path) with open(self.w_file, 'w') as f: json.dump(save_data, f) return save_data #ウィンドウ閉じた時にジョブ削除ドックイベントはトリガーが通常と異なるので注意 global center_mode center_mode = None def dockCloseEventTriggered(self): print 'SI Side Bar : Close Event : Dock Window Closed' self.remove_job() self.display = False#ウィンドウスタートアップフラグを下げる self.save(display=False) #センターモードに入っていたら解除する if center_mode: toggle_center_mode(mode=False) #COGモードなら解除する if self.cog_but.isChecked(): print 'reset cog as close :' self.cog_but.setChecked(False) #self.setup_object_center() self.reset_cog_mode() if destroy_flag: #print 'timer stop' try: self.timer.stop() except: pass try: self.collapse_timer.stop() except: pass #サブウィンドウ開いてたら閉じる self.close_sub_windows() def close_sub_windows(self): option_window_list = ['prop_option', 'filter_window', 'sym_window', 'trs_setting_window', 'transform_manu_window', 'select_manu_window', 'extrude_edge_uv', 'append_polygon_ui', 'edit_manu_window'] for op_window in option_window_list: try: exec(op_window+'.close()') exec('del '+op_window) except: pass #タブ隠すだけで無効になるので使用中止 #def hideEvent(self, e): #if maya_ver >= 2017: #self.dockCloseEventTriggered() #Maya2014用 def closeEvent(self, e): if maya_ver <= 2014: self.dockCloseEventTriggered() attr_job_list = list() fcurve_job_list = list() trs_attr_list = ['.scaleX', '.scaleY', '.scaleZ', '.rotateX', '.rotateY', '.rotateZ', '.translateX', '.translateY', '.translateZ'] fcurve_job_ctrl_count = 0 #<EMAIL>.profileFunction() def create_fcurve_job(self): self.check_key_anim_from_fcurve#大量のオブジェクト処理でジョブ作成が間に合わない場合の保険 self.kill_attr_job() self.kill_fcurve_job() self.fcurve_job_list = list() if cmds.selectMode(q=True, co=True): return selection = cmds.ls(sl=True, l=True, tr=True) if len(selection) > 500: return #print selection, self.trs_attr_list for node, attr in itertools.product(selection, self.trs_attr_list): #print node+attr job = cmds.scriptJob(connectionChange=[node+attr, self.re_check_fcurve]) self.attr_job_list.append(job) fcurve = cmds.listConnections(node+attr, s=True, d=False) if not fcurve: #print 'not fcurve return :' continue #print 'set sub fcurve job :' anim_curve_list = ['animCurveTU', 'animCurveTA', 'animCurveTL'] if not cmds.nodeType(fcurve) in anim_curve_list: continue job = cmds.scriptJob(attributeChange=[fcurve[0]+'.outStippleRange', self.check_key_anim_from_fcurve]) self.fcurve_job_list.append(job) job = cmds.scriptJob(attributeChange=[fcurve[0]+'.apply', self.check_key_anim_from_fcurve]) self.fcurve_job_list.append(job) self.fcurve_job_ctrl_count += 1 def check_key_anim_from_fcurve(self): #print 'check key anim form fcurve' check_key_anim(from_fcurve=True) pre_fcurve_job_ctrl_count = -1 def re_check_fcurve(self): self.check_key_anim_from_fcurve() if self.pre_fcurve_job_ctrl_count == self.fcurve_job_ctrl_count: #print 'same fcurve layer : return' return self.kill_fcurve_job() #print 'recheck sub fcurve job :' self.pre_fcurve_job_ctrl_count = self.fcurve_job_ctrl_count selection = cmds.ls(sl=True, l=True, tr=True) for node, attr in itertools.product(selection, self.trs_attr_list): fcurve = cmds.listConnections(node+attr, s=True, d=False) if not fcurve: continue #print 'create_sub_fcurve_job :', node+attr, fcurve anim_curve_list = ['animCurveTU', 'animCurveTA', 'animCurveTL'] if not cmds.nodeType(fcurve) in anim_curve_list: continue job = cmds.scriptJob(attributeChange=[fcurve[0]+'.outStippleRange', self.check_key_anim_from_fcurve]) self.fcurve_job_list.append(job) job = cmds.scriptJob(attributeChange=[fcurve[0]+'.apply', self.check_key_anim_from_fcurve]) self.fcurve_job_list.append(job) #print self.fcurve_job_list def kill_attr_job(self): for job in self.attr_job_list: cmds.scriptJob(k=job, f=True) self.attr_job_list = list() def kill_fcurve_job(self): for job in self.fcurve_job_list: cmds.scriptJob(k=job, f=True) self.fcurve_job_list = list() #スクリプトジョブ作成 def create_job(self): global script_job_flag global script_job global context_job global timeline_job global undo_job global redo_job global workspace_job global fcurve_job if 'script_job_flag' in globals(): if script_job: return script_job_flag = True script_job = cmds.scriptJob(cu=True, e=("SelectionChanged", sisidebar_sub.change_selection)) timeline_job = cmds.scriptJob(cu=True, e=("timeChanged", sisidebar_sub.change_selection)) undo_job = cmds.scriptJob(cu=True, e=("Undo", sisidebar_sub.change_selection)) redo_job = cmds.scriptJob(cu=True, e=("Redo", sisidebar_sub.change_selection)) context_job = cmds.scriptJob(cu=True, e=("ToolChanged", sisidebar_sub.change_context)) workspace_job = cmds.scriptJob(e=("SceneOpened", setup.check_open), kws=False) fcurve_job = cmds.scriptJob(cu=True, e=("SelectionChanged", self.create_fcurve_job)) #ジョブ番号確認用 # print 'script job :', script_job # print 'timeline_job :', timeline_job # print 'undo_job :', undo_job # print 'redo_job :', redo_job # print 'context_job :', context_job # print 'workspace_job :', workspace_job # print 'fcurve_job :', fcurve_job global job_list#ウィンドウ破壊後も使えるようにグローバルで持つ job_list = [script_job, context_job, timeline_job, undo_job, redo_job, workspace_job, fcurve_job] #print 'check job for create :', script_job #スクリプトジョブ削除 def remove_job(self): #print 'remove job', inspect.stack() #self.reset_manip() global script_job_flag global script_job global context_job global timeline_job global undo_job global redo_job global workspace_job global fcurve_job global job_list #print 'remove job list :', job_list for i, job in enumerate(job_list): if job: try: cmds.scriptJob(k=job, f=True) except Exception as e: #print 'remove job error :', job, e.message continue job_list[i] = None script_job = None try: self.kill_attr_job() except Exception as e: #print 'remove attr job error :', e.message pass try: self.kill_fcurve_job() except Exception as e: #print 'remove fcurve job error :', e.message pass cmds.undoInfo(swf=True) cmds.evalDeferred(self.reset_manip) #2018up2以降はクローズイベントが発生しないのにウィジェットなくなったことになるから別処理 def error_avoidance(self): self.remove_job() #サブウィンドウ閉じる self.close_sub_windows() #センターモードから抜ける if center_mode: toggle_center_mode(mode=False) #センター位置を戻す self.reset_cog_mode() pre_vol_id = -1 pre_obj_vol = -1 pre_cmp_vol = -1 #以前の設定からUni/Volボタン状態を復旧する def rebuild_uni_vol(self, mode): #print 'rebuild_uni_vol', mode if mode == 2: view_but.setText('Uni') view_but.setChecked(True) elif mode == 5: #print 'Vol' view_but.setText('Vol')
from crispy_forms.bootstrap import InlineCheckboxes from crispy_forms.helper import FormHelper from crispy_forms.layout import HTML, ButtonHolder, Div, Fieldset, Layout, Submit from datetimewidget.widgets import DateWidget from django import forms from .models import ( CHF, CKD, IBD, PVD, Alcohol, AllopurinolHypersensitivity, Angina, Anticoagulation, Bleed, ColchicineInteractions, Cyclosporine, Diabetes, Diuretics, Erosions, FebuxostatHypersensitivity, Fructose, Gout, HeartAttack, Hypertension, Hyperuricemia, OrganTransplant, Osteoporosis, Shellfish, Stroke, Tophi, UrateKidneyStones, XOIInteractions, ) ### Medical History ModelForms ### class AnginaForm(forms.ModelForm): prefix = "Angina" class Meta: model = Angina fields = ("value",) def __init__(self, *args, **kwargs): super(AnginaForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset( "", "value", id="angina_for_profile", ), ) class AnticoagulationForm(forms.ModelForm): prefix = "Anticoagulation" class Meta: model = Anticoagulation fields = ( "value", "apixaban", "clopidogrel", "dabigatran", "enoxaparin", "rivaroxaban", "warfarin", ) def __init__(self, *args, **kwargs): super(AnticoagulationForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset( "Anticoagulation", "value", "apixaban", "clopidogrel", "dabigatran", "enoxaparin", "rivaroxaban", "warfarin", id="anticoagulation_for_profile", ), ) class AnticoagulationSimpleForm(AnticoagulationForm): class Meta: model = Anticoagulation fields = ("value",) def __init__(self, *args, **kwargs): super(AnticoagulationSimpleForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset( "", "value", id="anticoagulation_for_profile", ), ) class CKDSimpleForm(forms.ModelForm): prefix = "CKD" class Meta: model = CKD fields = ( "value", "dialysis", ) def __init__(self, *args, **kwargs): super(CKDSimpleForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.fields["dialysis"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False # You can dynamically adjust your layout self.helper.layout = Layout( Fieldset( "", "value", "dialysis", id="CKD_for_profile", ), ) class CKDForm(CKDSimpleForm): class Meta: model = CKD fields = ( "value", "dialysis", "stage", ) def __init__(self, *args, **kwargs): super(CKDForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.fields["dialysis"].widget = forms.CheckboxInput() self.fields["stage"].empty_label = None self.fields["stage"].required = False self.fields["stage"].widget = forms.RadioSelect() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset( "", "value", InlineCheckboxes("stage"), "dialysis", id="CKD_for_profile", ), ) class ColchicineInteractionsForm(forms.ModelForm): prefix = "Colchicine Interactions" class Meta: model = ColchicineInteractions fields = ( "value", "clarithromycin", "simvastatin", ) def __init__(self, *args, **kwargs): super(ColchicineInteractionsForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False # You can dynamically adjust your layout self.helper.layout = Layout( Fieldset( "Colchicine Interactions", "value", "clarithromycin", "simvastatin", id="colchicine_interactions_for_profile", ), ) class ColchicineInteractionsSimpleForm(ColchicineInteractionsForm): class Meta: model = ColchicineInteractions fields = ("value",) def __init__(self, *args, **kwargs): super(ColchicineInteractionsSimpleForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset( "", "value", id="colchicine_interactions_for_profile", ), ) class HypertensionForm(forms.ModelForm): prefix = "hypertension" class Meta: model = Hypertension fields = ( "value", "medication", ) def __init__(self, *args, **kwargs): super(HypertensionForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.fields["medication"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False # You can dynamically adjust your layout self.helper.layout = Layout( Fieldset("", "value", "medication", id="hypertension_for_profile"), ) class HypertensionSimpleForm(HypertensionForm): class Meta: model = Hypertension fields = ("value",) def __init__(self, *args, **kwargs): super(HypertensionForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False # You can dynamically adjust your layout self.helper.layout = Layout( Fieldset("", "value", id="hypertension_for_profile"), ) class PVDForm(forms.ModelForm): prefix = "PVD" class Meta: model = PVD fields = ("value",) def __init__(self, *args, **kwargs): super(PVDForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False # You can dynamically adjust your layout self.helper.layout = Layout( Fieldset("", "value", id="PVD_for_profile"), ) class HyperuricemiaForm(forms.ModelForm): prefix = "hyperuricemia" class Meta: model = Hyperuricemia fields = ("value",) def __init__(self, *args, **kwargs): super(HyperuricemiaForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("", "value", id="hyperuricemia_for_profile"), ) class IBDForm(forms.ModelForm): prefix = "IBD" class Meta: model = IBD fields = ("value",) def __init__(self, *args, **kwargs): super(IBDForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("Inflammatory Bowel Disease", "value", id="IBD_for_profile"), ) class IBDSimpleForm(IBDForm): def __init__(self, *args, **kwargs): super(IBDSimpleForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("", "value", id="IBD_for_profile"), ) class OsteoporosisForm(forms.ModelForm): prefix = "Osteoporosis" class Meta: model = Osteoporosis fields = ("value",) def __init__(self, *args, **kwargs): super(OsteoporosisForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("Osteoporosis", "value", id="osteoporosis_for_profile"), ) class OsteoporosisSimpleForm(OsteoporosisForm): def __init__(self, *args, **kwargs): super(OsteoporosisSimpleForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("", "value", id="osteoporosis_for_profile"), ) class CHFForm(forms.ModelForm): prefix = "CHF" class Meta: model = CHF fields = ( "value", "systolic", ) def __init__(self, *args, **kwargs): super(CHFForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.fields["systolic"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False # You can dynamically adjust your layout self.helper.layout = Layout( Fieldset("", "value", "systolic", id="CHF_for_profile"), ) class CHFSimpleForm(CHFForm): class Meta: model = CHF fields = ("value",) def __init__(self, *args, **kwargs): super(CHFForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False # You can dynamically adjust your layout self.helper.layout = Layout( Fieldset("", "value", id="CHF_for_profile"), ) class DiabetesForm(forms.ModelForm): prefix = "diabetes" class Meta: model = Diabetes fields = ( "value", "type", "insulin", ) def __init__(self, *args, **kwargs): super(DiabetesForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("Diabetes", "value", "type", "insulin", id="diabetes_for_profile"), ) class DiabetesSimpleForm(DiabetesForm): class Meta: model = Diabetes fields = ("value",) def __init__(self, *args, **kwargs): super(DiabetesSimpleForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("", "value", id="diabetes_for_profile"), ) class ErosionsForm(forms.ModelForm): prefix = "erosions" class Meta: model = Erosions fields = ("value",) def __init__(self, *args, **kwargs): super(ErosionsForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("", "value", id="erosions_for_profile"), ) class OrganTransplantForm(forms.ModelForm): prefix = "organ_transplant" class Meta: model = OrganTransplant fields = ( "value", "organ", ) def __init__(self, *args, **kwargs): super(OrganTransplantForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.fields["organ"].required = False self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("", "value", InlineCheckboxes("organ"), id="organ_transplant_for_profile"), ) class TophiForm(forms.ModelForm): prefix = "tophi" class Meta: model = Tophi fields = ("value",) def __init__(self, *args, **kwargs): super(TophiForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("", "value", id="tophi_for_profile"), ) class UrateKidneyStonesForm(forms.ModelForm): prefix = "urate_kidney_stones" class Meta: model = UrateKidneyStones fields = ("value",) def __init__(self, *args, **kwargs): super(UrateKidneyStonesForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("", "value", id="urate_kidney_stones_for_profile"), ) class StrokeForm(forms.ModelForm): prefix = "stroke" class Meta: model = Stroke fields = ( "value", "number", "date", ) widgets = { "date": DateWidget(attrs={"id": "stroke_date.pk"}, usel10n=True, bootstrap_version=3), } def __init__(self, *args, **kwargs): super(StrokeForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("Stroke", "value", "number", "date", id="stroke_for_contraindications"), ) class StrokeSimpleForm(StrokeForm): class Meta: model = Stroke fields = ("value",) def __init__(self, *args, **kwargs): super(StrokeSimpleForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset("", "value", id="stroke_for_contraindications"), ) class HeartAttackForm(forms.ModelForm): prefix = "heartattack" class Meta: model = HeartAttack fields = ( "value", "number", "date", "stent", "stent_date", "cabg", "cabg_date", ) dateTimeOptions = { "autoclose": True, "pickerPosition": "bottom-left", } widgets = { "date": DateWidget(attrs={"id": "heartattack_date.pk"}, usel10n=True, bootstrap_version=3), "stent_date": DateWidget(attrs={"id": "stent_date.pk"}, usel10n=True, bootstrap_version=3), "cabg_date": DateWidget(attrs={"id": "cabg_date.pk"}, usel10n=True, bootstrap_version=3), } def __init__(self, *args, **kwargs): super(HeartAttackForm, self).__init__(*args, **kwargs) self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset( "Heart Attack", "value", "number", "date", "stent", "stent_date", "cabg", "cabg_date", id="heart_attack_for_contraindications", ), ) class HeartAttackSimpleForm(HeartAttackForm): class Meta: model = HeartAttack fields = ("value",) def __init__(self, *args, **kwargs): super(HeartAttackSimpleForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset( "", "value", id="heart_attack_for_contraindications", ), ) class BleedForm(forms.ModelForm): prefix = "bleed" class Meta: model = Bleed fields = ( "value", "number", "date", "GIB", "GIB_date", "CNS", "CNS_date", "transfusion", ) dateTimeOptions = { "autoclose": True, "pickerPosition": "bottom-left", } widgets = { "date": DateWidget( options=dateTimeOptions, attrs={"id": "bleed_date.pk"}, usel10n=True, bootstrap_version=3 ), "GIB_date": DateWidget( options=dateTimeOptions, attrs={"id": "GIB_date.pk"}, usel10n=True, bootstrap_version=3 ), "CNS_date": DateWidget( options=dateTimeOptions, attrs={"id": "CNS_date.pk"}, usel10n=True, bootstrap_version=3 ), } def __init__(self, *args, **kwargs): super(BleedForm, self).__init__(*args, **kwargs) self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset( "Bleed (major)", "value", "number", "date", "GIB", "GIB_date", "CNS", "CNS_date", "transfusion", id="bleed_for_profile", ), ) class BleedSimpleForm(BleedForm): class Meta: model = Bleed fields = ("value",) def __init__(self, *args, **kwargs): super(BleedSimpleForm, self).__init__(*args, **kwargs) self.fields["value"].widget = forms.CheckboxInput() self.helper = FormHelper(self) self.helper.form_tag = False self.helper.layout = Layout( Fieldset( "", "value", id="bleed_for_contraindications", ), ) class AllopurinolHypersensitivityForm(forms.ModelForm): prefix = "AllopruinolHypersensitivity" class Meta: model =
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for AutoCastVariable.""" import os import threading import numpy as np import tensorflow.compat.v2 as tf from absl.testing import parameterized from keras.mixed_precision import autocast_variable from keras.optimizers.optimizer_v2 import adadelta from keras.optimizers.optimizer_v2 import adagrad from keras.optimizers.optimizer_v2 import adam from keras.optimizers.optimizer_v2 import adamax from keras.optimizers.optimizer_v2 import ftrl from keras.optimizers.optimizer_v2 import ( gradient_descent as gradient_descent_v2, ) from keras.optimizers.optimizer_v2 import nadam from keras.optimizers.optimizer_v2 import rmsprop maybe_distribute = tf.__internal__.test.combinations.combine( distribution=[ tf.__internal__.distribute.combinations.default_strategy, tf.__internal__.distribute.combinations.mirrored_strategy_with_cpu_1_and_2, # noqa: E501 ] ) def get_var(val, dtype, name=None): return tf.Variable(val, dtype=dtype, name=name) def set_cpu_logical_devices_to_at_least(num): """Create cpu logical devices of at least a given number.""" physical_devices = tf.config.list_physical_devices("CPU") if not physical_devices: raise RuntimeError("No CPU found") if len(physical_devices) >= num: return # By default each physical device corresponds to one logical device. We # create multiple logical devices for the last physical device so that we # have `num` logical devices. num = num - len(physical_devices) + 1 logical_devices = [] for _ in range(num): logical_devices.append(tf.config.LogicalDeviceConfiguration()) # Create logical devices from the last device since sometimes the first GPU # is the primary graphic card and may have less memory available. tf.config.set_logical_device_configuration( physical_devices[-1], logical_devices ) @tf.__internal__.distribute.combinations.generate( tf.__internal__.test.combinations.combine(mode=["graph", "eager"]) ) class AutoCastVariableTest(tf.test.TestCase, parameterized.TestCase): def setUp(self): set_cpu_logical_devices_to_at_least(3) super().setUp() @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_read(self, distribution): with distribution.scope(): x = get_var(1.0, tf.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) # outside of auto cast scope. self.assertEqual(x.dtype, tf.float32) self.assertEqual(x.value().dtype, tf.float32) self.assertEqual(x.read_value().dtype, tf.float32) self.assertEqual(tf.identity(x).dtype, tf.float32) # within auto cast scope of different dtype with autocast_variable.enable_auto_cast_variables(tf.float16): self.assertEqual(x.dtype, tf.float32) self.assertEqual(x.value().dtype, tf.float16) self.assertEqual(x.read_value().dtype, tf.float16) self.assertEqual(tf.identity(x).dtype, tf.float16) # within auto cast scope of same dtype with autocast_variable.enable_auto_cast_variables(tf.float32): self.assertEqual(x.dtype, tf.float32) self.assertEqual(x.value().dtype, tf.float32) self.assertEqual(x.read_value().dtype, tf.float32) self.assertEqual(tf.identity(x).dtype, tf.float32) def test_sparse_reads(self): x = get_var([1.0, 2], tf.float32) # DistributedVariables do not support sparse_read or gather_nd, so we # pass distribute=False x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) self.assertEqual(x.sparse_read([0]).dtype, tf.float32) self.assertEqual(x.gather_nd([0]).dtype, tf.float32) with autocast_variable.enable_auto_cast_variables(tf.float16): self.assertEqual(x.sparse_read([0]).dtype, tf.float16) self.assertEqual(x.gather_nd([0]).dtype, tf.float16) @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_read_nested_scopes(self, distribution): with distribution.scope(): x = get_var(1.0, tf.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) with autocast_variable.enable_auto_cast_variables(tf.float16): self.assertEqual(x.read_value().dtype, tf.float16) with autocast_variable.enable_auto_cast_variables(tf.float32): self.assertEqual(x.read_value().dtype, tf.float32) self.assertEqual(x.read_value().dtype, tf.float16) @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_dtype_is_not_string(self, distribution): with distribution.scope(): x = get_var(1.0, tf.float32) x = autocast_variable.create_autocast_variable(x) self.assertEqual(x.dtype, tf.float32) self.assertIsInstance(x.dtype, tf.DType) self.assertEqual(x.true_dtype, tf.float32) self.assertIsInstance(x.true_dtype, tf.DType) dtype = tf.float16 with autocast_variable.enable_auto_cast_variables(dtype): self.assertEqual(x.dtype, tf.float32) self.assertIsInstance(x.dtype, tf.DType) self.assertEqual(x.true_dtype, tf.float32) self.assertIsInstance(x.true_dtype, tf.DType) @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_method_delegations(self, distribution): # Test AutoCastVariable correctly delegates Variable methods to the # underlying variable. with self.test_session(), distribution.scope(): for read_dtype in (tf.float32, tf.float16): if tf.distribute.has_strategy() and not tf.executing_eagerly(): # MirroredVariable.assign will (incorrectly) return a # Mirrored value instead of a MirroredVariable in graph # mode. So we cannot properly wrap it in an # AutoCastVariable. evaluate = self.evaluate else: def evaluate(var): self.assertIsInstance( var, autocast_variable.AutoCastVariable ) self.assertEqual(tf.identity(var).dtype, read_dtype) return self.evaluate(var) x = get_var(7.0, tf.float32) x = autocast_variable.create_autocast_variable(x) with autocast_variable.enable_auto_cast_variables(read_dtype): self.evaluate(x.initializer) self.assertEqual(self.evaluate(x.value()), 7) self.assertEqual(self.evaluate(x.read_value()), 7) self.assertTrue(x.trainable) self.assertEqual( x.synchronization, x._variable.synchronization ) self.assertEqual(x.aggregation, x._variable.aggregation) self.assertEqual(self.evaluate(x.initialized_value()), 7) if not tf.executing_eagerly(): if not tf.distribute.has_strategy(): # These functions are not supported for # DistributedVariables x.load(9) self.assertEqual(x.eval(), 9) self.assertEqual(self.evaluate(x.initial_value), 7) self.assertEqual(x.op, x._variable.op) self.assertEqual(x.graph, x._variable.graph) if not tf.distribute.has_strategy(): # These attributes are not supported for # DistributedVariables self.assertIsNone(x.constraint) self.assertEqual(x.initializer, x._variable.initializer) self.assertEqual(evaluate(x.assign(8)), 8) self.assertEqual(evaluate(x.assign_add(2)), 10) self.assertEqual(evaluate(x.assign_sub(3)), 7) self.assertEqual(x.name, x._variable.name) self.assertEqual(x.device, x._variable.device) self.assertEqual(x.shape, ()) self.assertEqual(x.get_shape(), ()) if not tf.distribute.has_strategy(): # Test scatter_* methods. These are not supported for # DistributedVariables x = get_var([7, 8], tf.float32) x = autocast_variable.create_autocast_variable(x) with autocast_variable.enable_auto_cast_variables( read_dtype ): self.evaluate(x.initializer) self.assertAllEqual(self.evaluate(x.value()), [7, 8]) def slices(val, index): return tf.IndexedSlices( values=tf.constant(val, dtype=tf.float32), indices=tf.constant(index, dtype=tf.int32), dense_shape=tf.constant([2], dtype=tf.int32), ) self.assertAllEqual( evaluate(x.scatter_sub(slices(1.0, 0))), [6, 8] ) self.assertAllEqual( evaluate(x.scatter_add(slices(1.0, 0))), [7, 8] ) self.assertAllEqual( evaluate(x.scatter_max(slices(9.0, 1))), [7, 9] ) self.assertAllEqual( evaluate(x.scatter_min(slices(8.0, 1))), [7, 8] ) self.assertAllEqual( evaluate(x.scatter_mul(slices(2.0, 1))), [7, 16] ) self.assertAllEqual( evaluate(x.scatter_div(slices(2.0, 1))), [7, 8] ) self.assertAllEqual( evaluate(x.scatter_update(slices(4.0, 1))), [7, 4] ) self.assertAllEqual( evaluate(x.scatter_nd_sub([[0], [1]], [1.0, 2.0])), [6, 2], ) self.assertAllEqual( evaluate(x.scatter_nd_add([[0], [1]], [1.0, 2.0])), [7, 4], ) self.assertAllEqual( evaluate( x.scatter_nd_update([[0], [1]], [1.0, 2.0]) ), [1, 2], ) @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_operator_overloads(self, distribution): with distribution.scope(): for read_dtype in (tf.float32, tf.float16): x = get_var(7.0, tf.float32) x = autocast_variable.create_autocast_variable(x) with autocast_variable.enable_auto_cast_variables(read_dtype): self.evaluate(x.initializer) self.assertAlmostEqual(8, self.evaluate(x + 1)) self.assertAlmostEqual(10, self.evaluate(3 + x)) self.assertAlmostEqual(14, self.evaluate(x + x)) self.assertAlmostEqual(5, self.evaluate(x - 2)) self.assertAlmostEqual(6, self.evaluate(13 - x)) self.assertAlmostEqual(0, self.evaluate(x - x)) self.assertAlmostEqual(14, self.evaluate(x * 2)) self.assertAlmostEqual(21, self.evaluate(3 * x)) self.assertAlmostEqual(49, self.evaluate(x * x)) self.assertAlmostEqual(3.5, self.evaluate(x / 2)) self.assertAlmostEqual(1.5, self.evaluate(10.5 / x)) self.assertAlmostEqual(3, self.evaluate(x // 2)) self.assertAlmostEqual(2, self.evaluate(15 // x)) if read_dtype == tf.float32: # The "mod" operator does not support float16 self.assertAlmostEqual(1, self.evaluate(x % 2)) self.assertAlmostEqual(2, self.evaluate(16 % x)) self.assertTrue(self.evaluate(x < 12)) self.assertTrue(self.evaluate(x <= 12)) self.assertFalse(self.evaluate(x > 12)) self.assertFalse(self.evaluate(x >= 12)) self.assertFalse(self.evaluate(12 < x)) self.assertFalse(self.evaluate(12 <= x)) self.assertTrue(self.evaluate(12 > x)) self.assertTrue(self.evaluate(12 >= x)) self.assertAlmostEqual( 343, self.evaluate(pow(x, 3)), places=4 ) self.assertAlmostEqual( 128, self.evaluate(pow(2, x)), places=4 ) self.assertAlmostEqual(-7, self.evaluate(-x)) self.assertAlmostEqual(7, self.evaluate(abs(x))) x = get_var([7, 8, 9], tf.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) self.assertEqual(self.evaluate(x[1]), 8) if tf.__internal__.tf2.enabled() and tf.executing_eagerly(): self.assertAllEqual( x == [7.0, 8.0, 10.0], [True, True, False] ) self.assertAllEqual( x != [7.0, 8.0, 10.0], [False, False, True] ) @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_assign(self, distribution): with distribution.scope(): x = get_var(0.0, tf.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) # outside of auto cast scope. v1 = tf.constant(3.0, dtype=tf.float32) v2 = tf.constant(3.0, dtype=tf.float16) def run_and_check(): # Assign float32 values self.assertAllClose(3.0, self.evaluate(x.assign(v1))) self.assertAllClose(3.0 * 2, self.evaluate(x.assign_add(v1))) self.assertAllClose(3.0, self.evaluate(x.assign_sub(v1))) # Attempt to assign float16 values with self.assertRaisesRegex( ValueError, "conversion requested dtype float32 for Tensor with dtype " "float16", ): self.evaluate(x.assign(v2)) with self.assertRaisesRegex( ValueError, "conversion requested dtype float32 for Tensor with dtype " "float16", ): self.evaluate(x.assign_add(v2)) with self.assertRaisesRegex( ValueError, "conversion requested dtype float32 for Tensor with dtype " "float16", ): self.evaluate(x.assign_sub(v2)) # Assign Python floats self.assertAllClose(0.0, self.evaluate(x.assign(0.0))) self.assertAllClose(3.0, self.evaluate(x.assign(3.0))) self.assertAllClose(3.0 * 2, self.evaluate(x.assign_add(3.0))) self.assertAllClose(3.0, self.evaluate(x.assign_sub(3.0))) # Assign multiple times # This currently doesn't work in graph mode if a strategy is # used if not tf.distribute.has_strategy() or tf.executing_eagerly(): assign = x.assign(1.0) self.assertAllClose(1.0, self.evaluate(assign)) self.assertAllClose(0.0, self.evaluate(assign.assign(0.0))) assign_add = x.assign_add(3.0) self.assertAllClose(3.0, self.evaluate(assign_add)) self.assertAllClose( 3.0 * 3, self.evaluate(x.assign_add(3.0).assign_add(3.0)), ) self.assertAllClose(3.0 * 3, x) assign_sub = x.assign_sub(3.0) self.assertAllClose(3.0 * 2, self.evaluate(assign_sub)) self.assertAllClose( 0.0, self.evaluate(x.assign_sub(3.0).assign_sub(3.0)) ) # Assign with read_value=False self.assertIsNone( self.evaluate(x.assign(1.0, read_value=False)) ) self.assertAllClose(1.0, self.evaluate(x)) self.assertIsNone( self.evaluate(x.assign_add(2.0, read_value=False)) ) self.assertAllClose(3.0, self.evaluate(x)) self.assertIsNone( self.evaluate(x.assign_sub(3.0, read_value=False)) ) self.assertAllClose(0.0, self.evaluate(x)) # Use the tf.assign functions instead of the var.assign methods. self.assertAllClose( 0.0, self.evaluate(tf.compat.v1.assign(x, 0.0)) ) self.assertAllClose( 3.0, self.evaluate(tf.compat.v1.assign(x, 3.0)) ) self.assertAllClose( 3.0 * 2, self.evaluate(tf.compat.v1.assign_add(x, 3.0)) ) self.assertAllClose( 3.0, self.evaluate(tf.compat.v1.assign_sub(x, 3.0)) ) run_and_check() # reset x self.evaluate(x.assign(0.0)) # within auto cast scope. with autocast_variable.enable_auto_cast_variables(tf.float16): # assign still expect float32 value even if in float16 scope run_and_check() @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_assign_tf_function(self, distribution): if not tf.executing_eagerly(): self.skipTest("Test is not compatible with graph mode") with distribution.scope(): x = get_var(0.0, tf.float32) x = autocast_variable.create_autocast_variable(x) @tf.function def run_assign(): return ( x.assign(1.0) .assign_add(3.0) .assign_add(3.0) .assign_sub(2.0) ) with autocast_variable.enable_auto_cast_variables(tf.float16): self.assertAllClose(5.0, self.evaluate(run_assign())) @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_op_attribute(self, distribution): with distribution.scope(): x = get_var(0.0, tf.float32) x = autocast_variable.create_autocast_variable(x) # Variable.op raises an AttributeError in Eager mode and is an op in # graph mode. Variable.assign(...).op is None in Eager mode and an # op in Graph mode or a tf.function. We test this is also true of # AutoCastVariable. if tf.executing_eagerly(): with self.assertRaises(AttributeError): x.op self.assertIsNone(x.assign(1.0).op) self.assertIsNone(x.assign_add(1.0).op) self.assertIsNone(x.assign_sub(1.0).op) else: self.assertIsNotNone(x.op) self.assertIsNotNone(x.assign(1.0).op) self.assertIsNotNone(x.assign_add(1.0).op) self.assertIsNotNone(x.assign_sub(1.0).op) @tf.function def func(): self.assertIsNotNone(x.assign(1.0).op) self.assertIsNotNone(x.assign_add(1.0).op) self.assertIsNotNone(x.assign_sub(1.0).op) func() @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_tf_function_control_dependencies(self, distribution): if not tf.executing_eagerly(): self.skipTest("Test is not compatible with graph mode") with distribution.scope(): x = get_var(0.0, tf.float32) x = autocast_variable.create_autocast_variable(x) @tf.function def func(): update = x.assign_add(1.0) with tf.control_dependencies([update]): x.assign_add(1.0) func() self.assertAllClose(2.0, self.evaluate(x)) @tf.__internal__.distribute.combinations.generate(maybe_distribute) def test_assign_stays_in_true_dtype(self, distribution): with distribution.scope(): x = get_var(1.0, tf.float32) x = autocast_variable.create_autocast_variable(x) self.evaluate(x.initializer) # small_val is a value such that 1.0 + small_val == 1.0 in fp16, but # not in fp32 small_val = np.finfo("float16").eps / 2 small_tensor = tf.constant(small_val, dtype=tf.float32) with autocast_variable.enable_auto_cast_variables(tf.float16): # Variable should be increased, despite it appearing to be the # same
case, to replace # nom_val_str: could this be avoided while avoiding to # duplicate the formula for nom_val_str for the common # case (robust_format(...))? nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '') value_str = nom_val_str + value_end # Global width, if any: if fmt_parts['width']: # An individual alignment is needed: # Default alignment, for numbers: to the right (if no # alignment is specified, a string is aligned to the # left): value_str = robust_align( value_str, fmt_parts['fill'], fmt_parts['align'] or '>', fmt_parts['width']) else: # +/- notation: # The common exponent is factored or not, depending on the # width. This gives nice columns for the nominal values and # the errors (no shift due to a varying exponent), when a need # is given: any_exp_factored = not fmt_parts['width'] # True when the error part has any exponent directly attached # (case of an individual exponent for both the nominal value # and the error, when the error is a non-0, real number). # The goal is to avoid the strange notation nane-10, and to # avoid the 0e10 notation for an exactly zero uncertainty, # because .0e can give this for a non-zero error (the goal is # to have a zero uncertainty be very explicit): error_has_exp = not any_exp_factored and not special_error # Like error_has_exp, but only for real number handling # (there is no special meaning to a zero nominal value): nom_has_exp = not any_exp_factored and not isinfinite(nom_val_main) # Prefix for the parts: if fmt_parts['width']: # Individual widths # If zeros are needed, then the width is taken into # account now (before the exponent is added): if fmt_parts['zero']: width = int(fmt_parts['width']) # Remaining (minimum) width after including the # exponent: remaining_width = max(width-len(exp_str), 0) fmt_prefix_n = '%s%s%d%s' % ( fmt_parts['sign'], fmt_parts['zero'], remaining_width if nom_has_exp else width, fmt_parts['comma']) fmt_prefix_e = '%s%d%s' % ( fmt_parts['zero'], remaining_width if error_has_exp else width, fmt_parts['comma']) else: fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma'] fmt_prefix_e = fmt_parts['comma'] else: # Global width fmt_prefix_n = fmt_parts['sign']+fmt_parts['comma'] fmt_prefix_e = fmt_parts['comma'] ## print "ANY_EXP_FACTORED", any_exp_factored ## print "ERROR_HAS_EXP", error_has_exp ## print "NOM_HAS_EXP", nom_has_exp #################### # Nominal value formatting: # !! The following fails with Python < 2.6 when the format is # not accepted by the % operator. This can happen when # special_error is true, as the format used for the nominal # value is essentially the format provided by the user, which # may be empty: # print "FMT_PREFIX_N", fmt_prefix_n # print "FMT_SUFFIX_N", fmt_suffix_n nom_val_str = robust_format(nom_val_main, fmt_prefix_n+fmt_suffix_n) # print "NOM_VAL_STR", nom_val_str #################### # Error formatting: # !! Note: .0f applied to a float has no decimal point, but # this does not appear to be documented # (http://docs.python.org/2/library/string.html#format-specification-mini-language). This # feature is used anyway, because it allows a possible comma # format parameter to be handled more conveniently than if the # 'd' format was used. # # The following uses a special integer representation of a # zero uncertainty: if upper_main: # The handling of NaN/inf in the nominal value identical to # the handling of NaN/inf in the standard deviation: if (isinfinite(nom_val_main) # Only some formats have a nicer representation: and fmt_parts['type'] in ('', 'g', 'G')): # The error can be formatted independently: fmt_suffix_e = (fmt_parts['prec'] or '')+fmt_parts['type'] else: fmt_suffix_e = '.%d%s' % (prec, main_pres_type) else: fmt_suffix_e = '.0%s' % main_pres_type error_str_upper = robust_format(upper_main, fmt_prefix_e+fmt_suffix_e) if lower_main: # The handling of NaN/inf in the nominal value identical to # the handling of NaN/inf in the standard deviation: if (isinfinite(nom_val_main) # Only some formats have a nicer representation: and fmt_parts['type'] in ('', 'g', 'G')): # The error can be formatted independently: fmt_suffix_e = (fmt_parts['prec'] or '')+fmt_parts['type'] else: fmt_suffix_e = '.%d%s' % (prec, main_pres_type) else: fmt_suffix_e = '.0%s' % main_pres_type error_str_lower = robust_format(lower_main, fmt_prefix_e+fmt_suffix_e) ########## # Overriding of nom_val_str and error_str for LaTeX: if 'L' in options: if isnan(nom_val_main): nom_val_str = r'\mathrm{%s}' % nom_val_str elif isinf(nom_val_main): nom_val_str = r'%s\infty' % ('-' if nom_val_main < 0 else '') if isnan(upper_main): error_str_upper = r'\mathrm{%s}' % error_str_upper elif isinf(upper_main): error_str_upper = r'\infty' if isnan(lower_main): error_str_lower = r'\mathrm{%s}' % error_str_lower elif isinf(lower_main): error_str_lower = r'\infty' if nom_has_exp: nom_val_str += exp_str if error_has_exp: error_str_upper += exp_str error_str_lower += exp_str #################### # Final alignment of each field, if needed: if fmt_parts['width']: # An individual alignment is needed: # Default alignment, for numbers: to the right (if no # alignment is specified, a string is aligned to the # left): effective_align = fmt_parts['align'] or '>' # robust_format() is used because it may handle alignment # options, where the % operator does not: nom_val_str = robust_align( nom_val_str, fmt_parts['fill'], effective_align, fmt_parts['width']) error_str_upper = robust_align( error_str_upper, fmt_parts['fill'], effective_align, fmt_parts['width']) error_str_lower = robust_align( error_str_lower, fmt_parts['fill'], effective_align, fmt_parts['width']) #################### if 'P' in options: # Unicode has priority over LaTeX, so that users with a # Unicode-compatible LaTeX source can use ±: (LEFT_BRACKET, RIGHT_BRACKET) = '', '' pm_symbol = '±' p_symbol = '+' m_symbol = '-' elif 'L' in options: (LEFT_BRACKET, RIGHT_BRACKET) = '{', '}' pm_symbol = r' \pm ' p_symbol = r' ^' + LEFT_BRACKET + '+' m_symbol = r' _' + LEFT_BRACKET + '-' else: (LEFT_BRACKET, RIGHT_BRACKET) = '', '' pm_symbol = '+/-' p_symbol = '+' m_symbol = '-' #################### # Construction of the final value, value_str, possibly with # grouping (typically inside parentheses): (LEFT_GROUPING, RIGHT_GROUPING) = GROUP_SYMBOLS[print_type] # The nominal value and the error might have to be explicitly # grouped together with parentheses, so as to prevent an # ambiguous notation. This is done in parallel with the # percent sign handling because this sign may too need # parentheses. if any_exp_factored and common_exp is not None: value_str = ''.join(( LEFT_GROUPING, nom_val_str, p_symbol, error_str_upper, RIGHT_BRACKET, m_symbol, error_str_lower, RIGHT_BRACKET, RIGHT_GROUPING, exp_str, percent_str)) else: value_str = ''.join([ nom_val_str, p_symbol, error_str_upper, RIGHT_BRACKET, m_symbol, error_str_lower, RIGHT_BRACKET]) if percent_str: value_str = ''.join(( LEFT_GROUPING, value_str, RIGHT_GROUPING, percent_str)) return value_str def uformat(nominal_value, std_dev, format_spec=''): """Formats a number with uncertainty. The format specification are the same as for format() for floats, as defined for Python 2.6+ (restricted to what the % operator accepts, if using an earlier version of Python), except that the n presentation type is not supported. In particular, the usual precision, alignment, sign flag, etc. can be used. The behavior of the various presentation types (e, f, g, none, etc.) is similar. Moreover, the format is extended: the number of digits of the uncertainty can be controlled, as is the way the uncertainty is indicated (with +/- or with the short-hand notation 3.14(1), in LaTeX or with a simple text string,...). Beyond the use of options at the end of the format specification, the main difference with floats is that a "u" just before the presentation type (f, e, g, none, etc.) activates the "uncertainty control" mode (e.g.: ".6u"). This mode is also activated when not using any explicit precision (e.g.: "g", "10f", "+010,e" format specifications). If the uncertainty does not have a meaningful number of significant digits (0 and NaN uncertainties), this mode is automatically deactivated. The nominal value and the uncertainty always use the same precision. This implies trailing zeros, in general, even with the g format type (contrary to the float case). However, when the number of significant digits of the uncertainty is not defined (zero or NaN uncertainty), it has no precision, so there is no matching. In this case, the original format specification is used for the nominal value (any "u" is ignored). Any precision (".p", where p is a number) is interpreted (if meaningful), in the uncertainty control mode, as indicating the number p of significant digits of the displayed uncertainty. Example: .1uf will return a string with one significant digit in the uncertainty (and no exponent). If no precision is given, the rounding rules from the Particle Data Group are used, if possible (http://pdg.lbl.gov/2010/reviews/rpp2010-rev-rpp-intro.pdf). For
"""Plotting methods for model evaluation. This module can be used to evaluate any kind of weather model (machine learning, NWP, heuristics, human forecasting, etc.). This module is completely agnostic of where the forecasts come from. --- REFERENCES --- <NAME>., and <NAME>, 1986: "The attributes diagram: A geometrical framework for assessing the quality of probability forecasts". International Journal of Forecasting, 2 (3), 285-293. """ import numpy from descartes import PolygonPatch from matplotlib import pyplot import matplotlib.colors from gewittergefahr.gg_utils import model_evaluation as model_eval from gewittergefahr.gg_utils import polygons from gewittergefahr.gg_utils import number_rounding as rounder from gewittergefahr.gg_utils import error_checking from gewittergefahr.plotting import plotting_utils # TODO(thunderhoser): Variable and method names are way too verbose. ROC_CURVE_COLOUR = numpy.array([228, 26, 28], dtype=float) / 255 ROC_CURVE_WIDTH = 3. RANDOM_ROC_COLOUR = numpy.full(3, 152. / 255) RANDOM_ROC_WIDTH = 2. PERF_DIAGRAM_COLOUR = numpy.array([228, 26, 28], dtype=float) / 255 PERF_DIAGRAM_WIDTH = 3. FREQ_BIAS_COLOUR = numpy.full(3, 152. / 255) FREQ_BIAS_WIDTH = 2. FREQ_BIAS_STRING_FORMAT = '%.2f' FREQ_BIAS_PADDING = 10 FREQ_BIAS_LEVELS = numpy.array([0.25, 0.5, 0.75, 1, 1.5, 2, 3, 5]) CSI_LEVELS = numpy.linspace(0, 1, num=11, dtype=float) PEIRCE_SCORE_LEVELS = numpy.linspace(0, 1, num=11, dtype=float) RELIABILITY_COLOUR = numpy.array([228, 26, 28], dtype=float) / 255 RELIABILITY_WIDTH = 3. PERFECT_RELIA_COLOUR = numpy.full(3, 152. / 255) PERFECT_RELIA_WIDTH = 2. ZERO_BSS_COLOUR = numpy.array([31, 120, 180], dtype=float) / 255 ZERO_BSS_LINE_WIDTH = 2. CLIMO_COLOUR = numpy.full(3, 152. / 255) CLIMO_LINE_WIDTH = 2. BAR_FACE_COLOUR = numpy.array([228, 26, 28], dtype=float) / 255 BAR_EDGE_COLOUR = numpy.full(3, 0.) BAR_EDGE_WIDTH = 2. HISTOGRAM_LEFT_EDGE = 0.2 HISTOGRAM_BOTTOM_EDGE = 0.575 HISTOGRAM_AXES_WIDTH = 0.25 HISTOGRAM_AXES_HEIGHT = 0.25 HISTOGRAM_X_VALUES = numpy.linspace(0., 1., num=6) HISTOGRAM_Y_SPACING = 0.1 POLYGON_OPACITY = 0.5 POSITIVE_BSS_OPACITY = 0.2 FONT_SIZE = 30 pyplot.rc('font', size=FONT_SIZE) pyplot.rc('axes', titlesize=FONT_SIZE) pyplot.rc('axes', labelsize=FONT_SIZE) pyplot.rc('xtick', labelsize=FONT_SIZE) pyplot.rc('ytick', labelsize=FONT_SIZE) pyplot.rc('legend', fontsize=FONT_SIZE) pyplot.rc('figure', titlesize=FONT_SIZE) def _get_csi_colour_scheme(): """Returns colour scheme for CSI (critical success index). :return: colour_map_object: Colour scheme (instance of `matplotlib.colors.ListedColormap`). :return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`, defining the scale of the colour map. """ this_colour_map_object = pyplot.get_cmap('Blues') this_colour_norm_object = matplotlib.colors.BoundaryNorm( CSI_LEVELS, this_colour_map_object.N) rgba_matrix = this_colour_map_object(this_colour_norm_object(CSI_LEVELS)) colour_list = [ rgba_matrix[i, ..., :-1] for i in range(rgba_matrix.shape[0]) ] colour_map_object = matplotlib.colors.ListedColormap(colour_list) colour_map_object.set_under(numpy.full(3, 1.)) colour_norm_object = matplotlib.colors.BoundaryNorm( CSI_LEVELS, colour_map_object.N) return colour_map_object, colour_norm_object def _get_peirce_colour_scheme(): """Returns colour scheme for Peirce score. :return: colour_map_object: Colour scheme (instance of `matplotlib.colors.ListedColormap`). :return: colour_norm_object: Instance of `matplotlib.colors.BoundaryNorm`, defining the scale of the colour map. """ this_colour_map_object = pyplot.get_cmap('Blues') this_colour_norm_object = matplotlib.colors.BoundaryNorm( PEIRCE_SCORE_LEVELS, this_colour_map_object.N) rgba_matrix = this_colour_map_object( this_colour_norm_object(PEIRCE_SCORE_LEVELS) ) colour_list = [ rgba_matrix[i, ..., :-1] for i in range(rgba_matrix.shape[0]) ] colour_map_object = matplotlib.colors.ListedColormap(colour_list) colour_map_object.set_under(numpy.full(3, 1.)) colour_norm_object = matplotlib.colors.BoundaryNorm( PEIRCE_SCORE_LEVELS, colour_map_object.N) return colour_map_object, colour_norm_object def _confidence_interval_to_polygon( x_coords_bottom, y_coords_bottom, x_coords_top, y_coords_top, for_performance_diagram=False): """Generates polygon for confidence interval. P = number of points in bottom curve = number of points in top curve :param x_coords_bottom: length-P numpy with x-coordinates of bottom curve (lower end of confidence interval). :param y_coords_bottom: Same but for y-coordinates. :param x_coords_top: length-P numpy with x-coordinates of top curve (upper end of confidence interval). :param y_coords_top: Same but for y-coordinates. :param for_performance_diagram: Boolean flag. If True, confidence interval is for a performance diagram, which means that coordinates will be sorted in a slightly different way. :return: polygon_object: Instance of `shapely.geometry.Polygon`. """ nan_flags_top = numpy.logical_or( numpy.isnan(x_coords_top), numpy.isnan(y_coords_top) ) if numpy.all(nan_flags_top): return None nan_flags_bottom = numpy.logical_or( numpy.isnan(x_coords_bottom), numpy.isnan(y_coords_bottom) ) if numpy.all(nan_flags_bottom): return None real_indices_top = numpy.where(numpy.invert(nan_flags_top))[0] real_indices_bottom = numpy.where(numpy.invert(nan_flags_bottom))[0] if for_performance_diagram: y_coords_top = y_coords_top[real_indices_top] sort_indices_top = numpy.argsort(y_coords_top) y_coords_top = y_coords_top[sort_indices_top] x_coords_top = x_coords_top[real_indices_top][sort_indices_top] y_coords_bottom = y_coords_bottom[real_indices_bottom] sort_indices_bottom = numpy.argsort(-y_coords_bottom) y_coords_bottom = y_coords_bottom[sort_indices_bottom] x_coords_bottom = x_coords_bottom[real_indices_bottom][ sort_indices_bottom ] else: x_coords_top = x_coords_top[real_indices_top] sort_indices_top = numpy.argsort(-x_coords_top) x_coords_top = x_coords_top[sort_indices_top] y_coords_top = y_coords_top[real_indices_top][sort_indices_top] x_coords_bottom = x_coords_bottom[real_indices_bottom] sort_indices_bottom = numpy.argsort(x_coords_bottom) x_coords_bottom = x_coords_bottom[sort_indices_bottom] y_coords_bottom = y_coords_bottom[real_indices_bottom][ sort_indices_bottom ] polygon_x_coords = numpy.concatenate(( x_coords_top, x_coords_bottom, numpy.array([x_coords_top[0]]) )) polygon_y_coords = numpy.concatenate(( y_coords_top, y_coords_bottom, numpy.array([y_coords_top[0]]) )) return polygons.vertex_arrays_to_polygon_object( polygon_x_coords, polygon_y_coords) def _plot_background_of_attributes_diagram(axes_object, climatology): """Plots background (references lines and polygons) of attributes diagram. For more on the attributes diagram, see Hsu and Murphy (1986). BSS = Brier skill score. For more on the BSS, see `model_evaluation.get_brier_skill_score`. :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param climatology: Event frequency for the entire dataset. """ error_checking.assert_is_geq(climatology, 0.) error_checking.assert_is_leq(climatology, 1.) (x_coords_left_skill_area, y_coords_left_skill_area, x_coords_right_skill_area, y_coords_right_skill_area ) = model_eval.get_skill_areas_in_reliability_curve(climatology) skill_area_colour = matplotlib.colors.to_rgba( plotting_utils.colour_from_numpy_to_tuple(ZERO_BSS_COLOUR), POSITIVE_BSS_OPACITY ) left_polygon_object = polygons.vertex_arrays_to_polygon_object( x_coords_left_skill_area, y_coords_left_skill_area ) left_polygon_patch = PolygonPatch( left_polygon_object, lw=0, ec=skill_area_colour, fc=skill_area_colour ) axes_object.add_patch(left_polygon_patch) right_polygon_object = polygons.vertex_arrays_to_polygon_object( x_coords_right_skill_area, y_coords_right_skill_area ) right_polygon_patch = PolygonPatch( right_polygon_object, lw=0, ec=skill_area_colour, fc=skill_area_colour ) axes_object.add_patch(right_polygon_patch) no_skill_x_coords, no_skill_y_coords = ( model_eval.get_no_skill_reliability_curve(climatology) ) axes_object.plot( no_skill_x_coords, no_skill_y_coords, color=plotting_utils.colour_from_numpy_to_tuple(ZERO_BSS_COLOUR), linestyle='solid', linewidth=ZERO_BSS_LINE_WIDTH ) climo_x_coords, climo_y_coords = ( model_eval.get_climatology_line_for_reliability_curve(climatology) ) axes_object.plot( climo_x_coords, climo_y_coords, color=plotting_utils.colour_from_numpy_to_tuple(CLIMO_COLOUR), linestyle='dashed', linewidth=CLIMO_LINE_WIDTH ) no_resolution_x_coords, no_resolution_y_coords = ( model_eval.get_no_resolution_line_for_reliability_curve(climatology) ) axes_object.plot( no_resolution_x_coords, no_resolution_y_coords, color=plotting_utils.colour_from_numpy_to_tuple(CLIMO_COLOUR), linestyle='dashed', linewidth=CLIMO_LINE_WIDTH ) def _plot_inset_histogram_for_attributes_diagram( figure_object, num_examples_by_bin): """Plots forecast histogram inset in attributes diagram. For more on the attributes diagram, see Hsu and Murphy (1986). B = number of forecast bins :param figure_object: Instance of `matplotlib.figure.Figure`. :param num_examples_by_bin: length-B numpy array with number of examples in each forecast bin. """ error_checking.assert_is_integer_numpy_array(num_examples_by_bin) error_checking.assert_is_numpy_array(num_examples_by_bin, num_dimensions=1) error_checking.assert_is_geq_numpy_array(num_examples_by_bin, 0) num_forecast_bins = len(num_examples_by_bin) error_checking.assert_is_geq(num_forecast_bins, 2) example_frequency_by_bin = ( num_examples_by_bin.astype(float) / numpy.sum(num_examples_by_bin) ) forecast_bin_edges = numpy.linspace(0., 1., num=num_forecast_bins + 1) forecast_bin_width = forecast_bin_edges[1] - forecast_bin_edges[0] forecast_bin_centers = forecast_bin_edges[:-1] + forecast_bin_width / 2 inset_axes_object = figure_object.add_axes([ HISTOGRAM_LEFT_EDGE, HISTOGRAM_BOTTOM_EDGE, HISTOGRAM_AXES_WIDTH, HISTOGRAM_AXES_HEIGHT ]) inset_axes_object.bar( forecast_bin_centers, example_frequency_by_bin, forecast_bin_width, color=plotting_utils.colour_from_numpy_to_tuple(BAR_FACE_COLOUR), edgecolor=plotting_utils.colour_from_numpy_to_tuple(BAR_EDGE_COLOUR), linewidth=BAR_EDGE_WIDTH ) max_y_tick_value = rounder.floor_to_nearest( 1.05 * numpy.max(example_frequency_by_bin), HISTOGRAM_Y_SPACING ) num_y_ticks = 1 + int(numpy.round( max_y_tick_value / HISTOGRAM_Y_SPACING )) y_tick_values = numpy.linspace(0., max_y_tick_value, num=num_y_ticks) pyplot.xticks(HISTOGRAM_X_VALUES, axes=inset_axes_object) pyplot.yticks(y_tick_values, axes=inset_axes_object) inset_axes_object.set_xlim(0., 1.) inset_axes_object.set_ylim(0., 1.05 * numpy.max(example_frequency_by_bin)) inset_axes_object.set_title('Forecast histogram', fontsize=20) def plot_roc_curve(axes_object, pod_by_threshold, pofd_by_threshold, line_colour=ROC_CURVE_COLOUR, plot_background=True): """Plots ROC (receiver operating characteristic) curve. T = number of binarization thresholds For the definition of a "binarization threshold" and the role they play in ROC curves, see `model_evaluation.get_points_in_roc_curve`. :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param pod_by_threshold: length-T numpy array of POD (probability of detection) values. :param pofd_by_threshold: length-T numpy array of POFD (probability of false detection) values. :param line_colour: Line colour. :param plot_background: Boolean flag. If True, will plot background (reference line and Peirce-score contours). :return: line_handle: Line handle for ROC curve. """ error_checking.assert_is_numpy_array(pod_by_threshold, num_dimensions=1) error_checking.assert_is_geq_numpy_array( pod_by_threshold, 0., allow_nan=True) error_checking.assert_is_leq_numpy_array( pod_by_threshold, 1., allow_nan=True) num_thresholds = len(pod_by_threshold) expected_dim = numpy.array([num_thresholds], dtype=int) error_checking.assert_is_numpy_array( pofd_by_threshold, exact_dimensions=expected_dim) error_checking.assert_is_geq_numpy_array( pofd_by_threshold, 0., allow_nan=True) error_checking.assert_is_leq_numpy_array( pofd_by_threshold, 1., allow_nan=True) error_checking.assert_is_boolean(plot_background) if plot_background: pofd_matrix, pod_matrix = model_eval.get_pofd_pod_grid() peirce_score_matrix = pod_matrix - pofd_matrix this_colour_map_object, this_colour_norm_object = ( _get_peirce_colour_scheme() ) pyplot.contourf( pofd_matrix, pod_matrix, peirce_score_matrix, CSI_LEVELS, cmap=this_colour_map_object, norm=this_colour_norm_object, vmin=0., vmax=1., axes=axes_object) colour_bar_object = plotting_utils.plot_colour_bar( axes_object_or_matrix=axes_object, data_matrix=peirce_score_matrix, colour_map_object=this_colour_map_object, colour_norm_object=this_colour_norm_object, orientation_string='vertical', extend_min=False, extend_max=False, fraction_of_axis_length=0.8) colour_bar_object.set_label('Peirce score (POD minus POFD)') random_x_coords, random_y_coords = model_eval.get_random_roc_curve() axes_object.plot( random_x_coords, random_y_coords, color=plotting_utils.colour_from_numpy_to_tuple(RANDOM_ROC_COLOUR), linestyle='dashed', linewidth=RANDOM_ROC_WIDTH ) nan_flags = numpy.logical_or( numpy.isnan(pofd_by_threshold), numpy.isnan(pod_by_threshold) ) if numpy.all(nan_flags): line_handle = None else: real_indices = numpy.where(numpy.invert(nan_flags))[0] line_handle = axes_object.plot( pofd_by_threshold[real_indices], pod_by_threshold[real_indices], color=plotting_utils.colour_from_numpy_to_tuple(line_colour), linestyle='solid', linewidth=ROC_CURVE_WIDTH )[0] axes_object.set_xlabel('POFD (probability of false detection)') axes_object.set_ylabel('POD (probability of detection)') axes_object.set_xlim(0., 1.) axes_object.set_ylim(0., 1.) return line_handle def plot_bootstrapped_roc_curve( axes_object, ci_bottom_dict, ci_mean_dict, ci_top_dict, line_colour=ROC_CURVE_COLOUR, plot_background=True): """Bootstrapped version of plot_roc_curve. T = number of probability thresholds in curve :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param ci_bottom_dict: Dictionary with the following keys, representing the bottom of the confidence interval. ci_bottom_dict['pod_by_threshold']: length-T numpy array of POD values (probability of detection). ci_bottom_dict['pofd_by_threshold']: length-T numpy array of POFD values (probability of false detection). :param ci_mean_dict: Same but for mean of confidence interval. :param ci_top_dict: Same but for top of confidence interval. :param line_colour: See doc for `plot_roc_curve`. :param plot_background: Same. :return: line_handle: Same. """ line_handle = plot_roc_curve( axes_object=axes_object, pod_by_threshold=ci_mean_dict[model_eval.POD_BY_THRESHOLD_KEY], pofd_by_threshold=ci_mean_dict[model_eval.POFD_BY_THRESHOLD_KEY], line_colour=line_colour, plot_background=plot_background ) polygon_object = _confidence_interval_to_polygon( x_coords_bottom=ci_bottom_dict[model_eval.POFD_BY_THRESHOLD_KEY], y_coords_bottom=ci_bottom_dict[model_eval.POD_BY_THRESHOLD_KEY], x_coords_top=ci_top_dict[model_eval.POFD_BY_THRESHOLD_KEY], y_coords_top=ci_top_dict[model_eval.POD_BY_THRESHOLD_KEY] ) if polygon_object is None: return line_handle polygon_colour = matplotlib.colors.to_rgba( plotting_utils.colour_from_numpy_to_tuple(line_colour), POLYGON_OPACITY ) polygon_patch = PolygonPatch( polygon_object, lw=0, ec=polygon_colour, fc=polygon_colour) axes_object.add_patch(polygon_patch) return line_handle def plot_performance_diagram( axes_object, pod_by_threshold, success_ratio_by_threshold, line_colour=PERF_DIAGRAM_COLOUR, plot_background=True): """Plots performance diagram. T = number of binarization thresholds For the definition of a "binarization threshold" and the role they play in performance diagrams, see `model_evaluation.get_points_in_performance_diagram`. :param axes_object: Instance of `matplotlib.axes._subplots.AxesSubplot`. :param pod_by_threshold: length-T numpy array of POD (probability of detection) values. :param success_ratio_by_threshold: length-T numpy array of success ratios. :param line_colour: Line colour. :param plot_background: Boolean flag. If True, will plot background (frequency-bias and CSI contours). :return: line_handle: Line handle for ROC curve. """ error_checking.assert_is_numpy_array(pod_by_threshold, num_dimensions=1) error_checking.assert_is_geq_numpy_array( pod_by_threshold, 0., allow_nan=True) error_checking.assert_is_leq_numpy_array( pod_by_threshold, 1., allow_nan=True) num_thresholds = len(pod_by_threshold) expected_dim = numpy.array([num_thresholds], dtype=int) error_checking.assert_is_numpy_array( success_ratio_by_threshold, exact_dimensions=expected_dim) error_checking.assert_is_geq_numpy_array( success_ratio_by_threshold, 0., allow_nan=True) error_checking.assert_is_leq_numpy_array( success_ratio_by_threshold, 1., allow_nan=True) error_checking.assert_is_boolean(plot_background) if plot_background: success_ratio_matrix, pod_matrix = model_eval.get_sr_pod_grid() csi_matrix = model_eval.csi_from_sr_and_pod( success_ratio_array=success_ratio_matrix, pod_array=pod_matrix ) frequency_bias_matrix = model_eval.frequency_bias_from_sr_and_pod( success_ratio_array=success_ratio_matrix, pod_array=pod_matrix ) this_colour_map_object, this_colour_norm_object =
85 }, ] }, { 'id': 'mount_2', 'title': 'Mount_2', 'val': 40 }, ] } if InstData.inst_info[id_now]['type'] == 'LST': inst_health[id_now]['mirror'] = { 'id': 'mirror', 'title': 'Mirror', 'val': 10, 'children': [ { 'id': 'mirror_0', 'title': 'Mirror_0', 'val': 3 }, { 'id': 'mirror_1', 'title': 'Mirror_1', 'val': 78, 'children': [ { 'id': 'mirror_1_1', 'title': 'Mirror_0', 'val': 3 }, { 'id': 'mirror_1_0', 'title': 'Mirror_1_0', 'val': 28, # 'children': [ # { # 'id': 'mirror_1_0_0', # 'title': 'Mirror_1_0_0', # 'val': 90 # }, # { # 'id': 'mirror_1_0_1', # 'title': 'Mirror_1_0_1', # 'val': 90 # }, # ] }, ] }, ] } # else: # inst_health[id_now]['mirror'] = { # 'id': 'mirror', 'title': 'Mirror', 'val': 10, # 'children': [ # {'id': 'mirror_0', 'title': 'Mirror_0', 'val': 3}, # {'id': 'mirror_1', 'title': 'Mirror_1', 'val': 78}, # ] # } inst_health[id_now]['daq'] = { 'id': 'daq', 'title': 'DAQ', 'val': 87, 'children': [ { 'id': 'daq_3', 'title': 'DAQ_3', 'val': 50 }, { 'id': 'daq_7', 'title': 'DAQ_7', 'val': 50, 'children': [ { 'id': 'daq_7_0', 'title': 'DAQ_7_0', 'val': 10 }, { 'id': 'daq_7_1', 'title': 'DAQ_7_1', 'val': 20 }, { 'id': 'daq_7_2', 'title': 'DAQ_7_2', 'val': 85 }, { 'id': 'daq_7_3', 'title': 'DAQ_7_3', 'val': 85 }, { 'id': 'daq_7_4', 'title': 'DAQ_7_4', 'val': 85 }, { 'id': 'daq_7_5', 'title': 'DAQ_7_5', 'val': 85 }, { 'id': 'daq_7_6', 'title': 'DAQ_7_6', 'val': 85 }, ] }, { 'id': 'daq_8', 'title': 'DAQ_8', 'val': 50, 'children': [ { 'id': 'daq_8_0', 'title': 'DAQ_8_0', 'val': 10 }, { 'id': 'daq_8_1', 'title': 'DAQ_8_1', 'val': 90 }, { 'id': 'daq_8_2', 'title': 'DAQ_8_2', 'val': 85 }, { 'id': 'daq_8_30000', 'title': 'DAQ_8_30000', 'val': 85 }, ] }, ] } if InstData.inst_info[id_now]['type'] == 'LST': inst_health[id_now]['aux'] = { 'id': 'aux', 'title': 'Aux', 'val': 70, 'children': [ { 'id': 'aux_0', 'title': 'Aux_0', 'val': 90 }, { 'id': 'aux_1', 'title': 'Aux_1', 'val': 78, 'children': [ { 'id': 'aux_1_0', 'title': 'Aux_1_0', 'val': 10 }, { 'id': 'aux_1_4', 'title': 'Aux_1_4', 'val': 85 }, ] }, { 'id': 'aux_3', 'title': 'Aux_3', 'val': 78, 'children': [ { 'id': 'aux_3_0', 'title': 'Aux_3_0', 'val': 90 }, { 'id': 'aux_3_1', 'title': 'Aux_3_1', 'val': 15 }, { 'id': 'aux_3_2', 'title': 'Aux_3_2', 'val': 5 }, ] }, ] } # ------------------------------------------------------------------ for id_now in aux_ids: inst_health[id_now] = dict() inst_health[id_now]['inst_0'] = { 'id': 'inst_0', 'title': 'Inst_0', 'val': 20, 'children': [ { 'id': 'inst_00', 'title': 'Inst_00', 'val': 100 }, { 'id': 'inst_01', 'title': 'Inst_01', 'val': 10, 'children': [ { 'id': 'inst_01_0', 'title': 'Inst_01_0', 'val': 3 }, { 'id': 'inst_01_1', 'title': 'Inst_01_1', 'val': 78 }, ] }, { 'id': 'inst_06', 'title': 'Inst_06', 'val': 80 }, { 'id': 'inst_08', 'title': 'Inst_08', 'val': 80 }, ] } inst_health[id_now]['inst_1'] = { 'id': 'inst_1', 'title': 'Inst_1', 'val': 20, 'children': [ { 'id': 'inst_10', 'title': 'Inst_10', 'val': 100 }, { 'id': 'inst_11', 'title': 'Inst_11', 'val': 10, 'children': [ { 'id': 'inst_11_0', 'title': 'Inst_11_0', 'val': 3 }, { 'id': 'inst_11_1', 'title': 'Inst_11_1', 'val': 78 }, { 'id': 'inst_11_2', 'title': 'Inst_11_2', 'val': 78 }, ] }, { 'id': 'inst_18', 'title': 'Inst_18', 'val': 80 }, ] } # ------------------------------------------------------------------ for id_now in proc_ids: inst_health[id_now] = dict() inst_health[id_now]['prc_0'] = { 'id': 'prc_0', 'title': 'Prc_0', 'val': 20, 'children': [ { 'id': 'prc_00', 'title': 'Prc_00', 'val': 100 }, { 'id': 'prc_01', 'title': 'Prc_01', 'val': 10, 'children': [ { 'id': 'prc_01_0', 'title': 'Prc_01_0', 'val': 3 }, { 'id': 'prc_01_1', 'title': 'Prc_01_1', 'val': 78 }, { 'id': 'prc_01_2', 'title': 'Prc_01_1', 'val': 78 }, ] }, { 'id': 'prc_02', 'title': 'Prc_02', 'val': 10, 'children': [ { 'id': 'prc_02_0', 'title': 'Prc_02_0', 'val': 3 }, { 'id': 'prc_02_1', 'title': 'Prc_02_1', 'val': 78 }, ] }, { 'id': 'prc_03', 'title': 'Prc_03', 'val': 80 }, { 'id': 'prc_04', 'title': 'Prc_04', 'val': 80 }, { 'id': 'prc_05', 'title': 'Prc_05', 'val': 80 }, ] } inst_health[id_now]['prc_1'] = { 'id': 'prc_1', 'title': 'Prc_1', 'val': 20, 'children': [ { 'id': 'prc_11', 'title': 'Prc_11', 'val': 10, 'children': [ { 'id': 'prc_11_1', 'title': 'Prc_11_1', 'val': 78 }, { 'id': 'prc_11_2', 'title': 'Prc_11_2', 'val': 78 }, ] }, { 'id': 'prc_15', 'title': 'Prc_15', 'val': 80 }, { 'id': 'prc_16', 'title': 'Prc_16', 'val': 10, 'children': [ { 'id': 'prc_16_1', 'title': 'Prc_16_1', 'val': 78 }, { 'id': 'prc_16_2', 'title': 'Prc_16_2', 'val': 78 }, { 'id': 'prc_16_4', 'title': 'Prc_16_4', 'val': 78 }, ] }, ] } # ------------------------------------------------------------------ InstData.inst_health = inst_health # ------------------------------------------------------------------ # add entries for the 3rd level of the hierarchy, given range_full_props ~100, # we get ~200k properties overall for the South range_full_props = [80, 120] inst_health_deep = dict() for (inst_id, inst) in inst_health.items(): inst_health_deep[inst_id] = dict() for (field_id, data) in inst.items(): if 'children' in data: for child_0 in data['children']: if 'children' in child_0: for child_1 in child_0['children']: child_id = child_1['id'] child_title = child_1['title'] # child_val = child_1['val'] inst_health_deep[inst_id][child_id] = [] for n_prop in range(rnd_gen.randint(*range_full_props)): if rnd_gen.random() < 0.2: val = rnd_gen.randint(11, 40) else: val = rnd_gen.randint(60, 99) inst_health_deep[inst_id][child_id] += [{ 'id': (child_id + '_' + str(n_prop)), 'title': (child_title + '_' + str(n_prop)), 'val': val }] InstData.inst_health_deep = inst_health_deep return # ------------------------------------------------------------------ def get_inst_info(self): while not InstData.has_init: sleep(0.01) return InstData.inst_info # ------------------------------------------------------------------ def get_tel_type(self, tel_id): try: tel_type = InstData.inst_info[tel_id]['type'] except Exception: self.log.critical([ ['wr', ' - cant do get_tel_type(', str(tel_id), ')...'], ['wr', ' --> Will terminate!'], ]) raise Exception() return tel_type # ------------------------------------------------------------------ def is_tel_type(self, tel_id, comp_type): is_type = (self.get_tel_type(tel_id) == comp_type) return is_type # ------------------------------------------------------------------ def set_inst_id_to_types(self): InstData.tel_id_to_types = dict() for tel_id in InstData.inst_ids: InstData.tel_id_to_types[tel_id] = self.get_tel_type(tel_id) return # ------------------------------------------------------------------ def get_allowed_sub_arrays(self, sa_id, include_self=False): while not InstData.has_init: sleep(0.01) sub_array_tels = InstData.sub_array_tels allowed_sub_arrays = [] tel_ids = sub_array_tels[sa_id] n_tels = len(tel_ids) for (check_sa_id, check_tel_ids) in sub_array_tels.items(): if sa_id == check_sa_id and not include_self: continue check_n_tels = len(check_tel_ids) common_ids = set(tel_ids + check_tel_ids) if len(common_ids) == n_tels + check_n_tels: allowed_sub_arrays += [check_sa_id] return allowed_sub_arrays # ------------------------------------------------------------------ def get_inst_id_to_types(self, is_copy=True): while not InstData.has_init: sleep(0.01) out = InstData.tel_id_to_types if is_copy: out = copy.deepcopy(out) return out # ------------------------------------------------------------------ def get_categorical_types(self, is_copy=True): while not InstData.has_init: sleep(0.01) out = InstData.categorical_types if is_copy: out = copy.deepcopy(out) return out # ------------------------------------------------------------------ def get_inst_ids(self, inst_types=None, is_copy=True): n_tries, max_n_tries = 0, 1e3 try: while not InstData.has_init: sleep(0.01) n_tries += 1 if n_tries > max_n_tries: raise except Exception: self.log.critical([ ['wr', ' - cant do get_inst_ids(', inst_types, ')...'], ['wr', ' --> Will terminate!'], ]) raise Exception() if inst_types is None: inst_ids = InstData.inst_ids if is_copy: inst_ids = copy.deepcopy(inst_ids) else: if isinstance(inst_types, str): inst_types = [inst_types] inst_ids = [ i for i in InstData.inst_ids if any(self.is_tel_type(i, inst_type) for inst_type in inst_types) ] return inst_ids # ------------------------------------------------------------------ def get_proc_ids(self, inst_types=None, is_copy=True): n_tries, max_n_tries = 0, 1e3 try: while not InstData.has_init: sleep(0.01) n_tries += 1 if n_tries > max_n_tries: raise except Exception: self.log.critical([ ['wr', ' - cant do get_proc_ids(', inst_types, ')...'], ['wr', ' --> Will terminate!'], ]) raise Exception() out = InstData.proc_ids if is_copy: out = copy.deepcopy(out) return out # ------------------------------------------------------------------ def is_south_site(self): return (InstData.site_type == 'S') # ------------------------------------------------------------------ def get_inst_healths(self, is_copy=False): while not InstData.has_init: sleep(0.01) out = InstData.inst_health if is_copy: out = copy.deepcopy(out) return out # ------------------------------------------------------------------ def get_inst_health_fulls(self, is_copy=False): while not InstData.has_init: sleep(0.01) out = InstData.inst_health_deep if is_copy: out = copy.deepcopy(out) return out # ------------------------------------------------------------------ def get_sub_array_insts(self, is_copy=False): while not InstData.has_init: sleep(0.01) out = InstData.sub_array_tels if is_copy: out = copy.deepcopy(out) return out # ------------------------------------------------------------------ def get_inst_id_to_sub_array(self, is_copy=False): while not InstData.has_init: sleep(0.01) out = InstData.tel_id_to_sub_array if is_copy: out = copy.deepcopy(out) return out # ------------------------------------------------------------------ def get_inst_health_state(self, health): """mapping between numerical health values and a state used eg to determine the colour code for a given metric Parameters ---------- name : health a health metric, with expected values within [0, 100] for connected instruments. negative values indicate disconnected instruments. Returns ------- str the name of the state """ # make sure we are ordered in the threshold value states = sorted(InstData._inst_states, key=lambda x: x['thresholds'][1]) out = None for state in states: if health <= state['thresholds'][1]: out = state['name'] break if out is None: if health > states[-1]['thresholds'][1]: out = states[-1]['name'] else: out =
= [String] csp_rtable_check.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp_rtable.h: 118 if hasattr(_libs['csp'], 'csp_rtable_clear'): csp_rtable_clear = _libs['csp'].csp_rtable_clear csp_rtable_clear.argtypes = [] csp_rtable_clear.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp_iflist.h: 28 if hasattr(_libs['csp'], 'csp_iflist_add'): csp_iflist_add = _libs['csp'].csp_iflist_add csp_iflist_add.argtypes = [POINTER(csp_iface_t)] csp_iflist_add.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp_iflist.h: 35 if hasattr(_libs['csp'], 'csp_iflist_get_by_name'): csp_iflist_get_by_name = _libs['csp'].csp_iflist_get_by_name csp_iflist_get_by_name.argtypes = [String] csp_iflist_get_by_name.restype = POINTER(csp_iface_t) # /home/johan/git/pygnd/lib/libcsp/include/csp/csp_iflist.h: 40 if hasattr(_libs['csp'], 'csp_iflist_print'): csp_iflist_print = _libs['csp'].csp_iflist_print csp_iflist_print.argtypes = [] csp_iflist_print.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 43 try: my_address = (c_uint8).in_dll(_libs['csp'], 'my_address') except: pass # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 49 if hasattr(_libs['csp'], 'csp_init'): csp_init = _libs['csp'].csp_init csp_init.argtypes = [c_uint8] csp_init.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 56 if hasattr(_libs['csp'], 'csp_set_hostname'): csp_set_hostname = _libs['csp'].csp_set_hostname csp_set_hostname.argtypes = [String] csp_set_hostname.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 62 if hasattr(_libs['csp'], 'csp_get_hostname'): csp_get_hostname = _libs['csp'].csp_get_hostname csp_get_hostname.argtypes = [] if sizeof(c_int) == sizeof(c_void_p): csp_get_hostname.restype = ReturnString else: csp_get_hostname.restype = String csp_get_hostname.errcheck = ReturnString # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 69 if hasattr(_libs['csp'], 'csp_set_model'): csp_set_model = _libs['csp'].csp_set_model csp_set_model.argtypes = [String] csp_set_model.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 75 if hasattr(_libs['csp'], 'csp_get_model'): csp_get_model = _libs['csp'].csp_get_model csp_get_model.argtypes = [] if sizeof(c_int) == sizeof(c_void_p): csp_get_model.restype = ReturnString else: csp_get_model.restype = String csp_get_model.errcheck = ReturnString # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 82 if hasattr(_libs['csp'], 'csp_socket'): csp_socket = _libs['csp'].csp_socket csp_socket.argtypes = [c_uint32] csp_socket.restype = POINTER(csp_socket_t) # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 90 if hasattr(_libs['csp'], 'csp_accept'): csp_accept = _libs['csp'].csp_accept csp_accept.argtypes = [POINTER(csp_socket_t), c_uint32] csp_accept.restype = POINTER(csp_conn_t) # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 102 if hasattr(_libs['csp'], 'csp_read'): csp_read = _libs['csp'].csp_read csp_read.argtypes = [POINTER(csp_conn_t), c_uint32] csp_read.restype = POINTER(csp_packet_t) # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 111 if hasattr(_libs['csp'], 'csp_send'): csp_send = _libs['csp'].csp_send csp_send.argtypes = [POINTER(csp_conn_t), POINTER(csp_packet_t), c_uint32] csp_send.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 125 if hasattr(_libs['csp'], 'csp_send_prio'): csp_send_prio = _libs['csp'].csp_send_prio csp_send_prio.argtypes = [c_uint8, POINTER(csp_conn_t), POINTER(csp_packet_t), c_uint32] csp_send_prio.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 141 if hasattr(_libs['csp'], 'csp_transaction'): csp_transaction = _libs['csp'].csp_transaction csp_transaction.argtypes = [c_uint8, c_uint8, c_uint8, c_uint32, POINTER(None), c_int, POINTER(None), c_int] csp_transaction.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 154 if hasattr(_libs['csp'], 'csp_transaction_persistent'): csp_transaction_persistent = _libs['csp'].csp_transaction_persistent csp_transaction_persistent.argtypes = [POINTER(csp_conn_t), c_uint32, POINTER(None), c_int, POINTER(None), c_int] csp_transaction_persistent.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 163 if hasattr(_libs['csp'], 'csp_recvfrom'): csp_recvfrom = _libs['csp'].csp_recvfrom csp_recvfrom.argtypes = [POINTER(csp_socket_t), c_uint32] csp_recvfrom.restype = POINTER(csp_packet_t) # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 176 if hasattr(_libs['csp'], 'csp_sendto'): csp_sendto = _libs['csp'].csp_sendto csp_sendto.argtypes = [c_uint8, c_uint8, c_uint8, c_uint8, c_uint32, POINTER(csp_packet_t), c_uint32] csp_sendto.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 187 if hasattr(_libs['csp'], 'csp_sendto_reply'): csp_sendto_reply = _libs['csp'].csp_sendto_reply csp_sendto_reply.argtypes = [POINTER(csp_packet_t), POINTER(csp_packet_t), c_uint32, c_uint32] csp_sendto_reply.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 201 if hasattr(_libs['csp'], 'csp_connect'): csp_connect = _libs['csp'].csp_connect csp_connect.argtypes = [c_uint8, c_uint8, c_uint8, c_uint32, c_uint32] csp_connect.restype = POINTER(csp_conn_t) # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 208 if hasattr(_libs['csp'], 'csp_close'): csp_close = _libs['csp'].csp_close csp_close.argtypes = [POINTER(csp_conn_t)] csp_close.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 214 if hasattr(_libs['csp'], 'csp_conn_dport'): csp_conn_dport = _libs['csp'].csp_conn_dport csp_conn_dport.argtypes = [POINTER(csp_conn_t)] csp_conn_dport.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 220 if hasattr(_libs['csp'], 'csp_conn_sport'): csp_conn_sport = _libs['csp'].csp_conn_sport csp_conn_sport.argtypes = [POINTER(csp_conn_t)] csp_conn_sport.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 226 if hasattr(_libs['csp'], 'csp_conn_dst'): csp_conn_dst = _libs['csp'].csp_conn_dst csp_conn_dst.argtypes = [POINTER(csp_conn_t)] csp_conn_dst.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 232 if hasattr(_libs['csp'], 'csp_conn_src'): csp_conn_src = _libs['csp'].csp_conn_src csp_conn_src.argtypes = [POINTER(csp_conn_t)] csp_conn_src.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 238 if hasattr(_libs['csp'], 'csp_conn_flags'): csp_conn_flags = _libs['csp'].csp_conn_flags csp_conn_flags.argtypes = [POINTER(csp_conn_t)] csp_conn_flags.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 246 if hasattr(_libs['csp'], 'csp_listen'): csp_listen = _libs['csp'].csp_listen csp_listen.argtypes = [POINTER(csp_socket_t), c_size_t] csp_listen.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 254 if hasattr(_libs['csp'], 'csp_bind'): csp_bind = _libs['csp'].csp_bind csp_bind.argtypes = [POINTER(csp_socket_t), c_uint8] csp_bind.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 261 if hasattr(_libs['csp'], 'csp_route_start_task'): csp_route_start_task = _libs['csp'].csp_route_start_task csp_route_start_task.argtypes = [c_uint, c_uint] csp_route_start_task.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 271 if hasattr(_libs['csp'], 'csp_bridge_start'): csp_bridge_start = _libs['csp'].csp_bridge_start csp_bridge_start.argtypes = [c_uint, c_uint, POINTER(csp_iface_t), POINTER(csp_iface_t)] csp_bridge_start.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 282 if hasattr(_libs['csp'], 'csp_promisc_enable'): csp_promisc_enable = _libs['csp'].csp_promisc_enable csp_promisc_enable.argtypes = [c_uint] csp_promisc_enable.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 289 if hasattr(_libs['csp'], 'csp_promisc_disable'): csp_promisc_disable = _libs['csp'].csp_promisc_disable csp_promisc_disable.argtypes = [] csp_promisc_disable.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 299 if hasattr(_libs['csp'], 'csp_promisc_read'): csp_promisc_read = _libs['csp'].csp_promisc_read csp_promisc_read.argtypes = [c_uint32] csp_promisc_read.restype = POINTER(csp_packet_t) # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 312 if hasattr(_libs['csp'], 'csp_sfp_send'): csp_sfp_send = _libs['csp'].csp_sfp_send csp_sfp_send.argtypes = [POINTER(csp_conn_t), POINTER(None), c_int, c_int, c_uint32] csp_sfp_send.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 322 if hasattr(_libs['csp'], 'csp_sfp_recv'): csp_sfp_recv = _libs['csp'].csp_sfp_recv csp_sfp_recv.argtypes = [POINTER(csp_conn_t), POINTER(POINTER(None)), POINTER(c_int), c_uint32] csp_sfp_recv.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 335 if hasattr(_libs['csp'], 'csp_service_handler'): csp_service_handler = _libs['csp'].csp_service_handler csp_service_handler.argtypes = [POINTER(csp_conn_t), POINTER(csp_packet_t)] csp_service_handler.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 345 if hasattr(_libs['csp'], 'csp_ping'): csp_ping = _libs['csp'].csp_ping csp_ping.argtypes = [c_uint8, c_uint32, c_uint, c_uint8] csp_ping.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 351 if hasattr(_libs['csp'], 'csp_ping_noreply'): csp_ping_noreply = _libs['csp'].csp_ping_noreply csp_ping_noreply.argtypes = [c_uint8] csp_ping_noreply.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 359 if hasattr(_libs['csp'], 'csp_ps'): csp_ps = _libs['csp'].csp_ps csp_ps.argtypes = [c_uint8, c_uint32] csp_ps.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 366 if hasattr(_libs['csp'], 'csp_memfree'): csp_memfree = _libs['csp'].csp_memfree csp_memfree.argtypes = [c_uint8, c_uint32] csp_memfree.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 373 if hasattr(_libs['csp'], 'csp_buf_free'): csp_buf_free = _libs['csp'].csp_buf_free csp_buf_free.argtypes = [c_uint8, c_uint32] csp_buf_free.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 379 if hasattr(_libs['csp'], 'csp_reboot'): csp_reboot = _libs['csp'].csp_reboot csp_reboot.argtypes = [c_uint8] csp_reboot.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 386 if hasattr(_libs['csp'], 'csp_uptime'): csp_uptime = _libs['csp'].csp_uptime csp_uptime.argtypes = [c_uint8, c_uint32] csp_uptime.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 397 if hasattr(_libs['csp'], 'csp_rdp_set_opt'): csp_rdp_set_opt = _libs['csp'].csp_rdp_set_opt csp_rdp_set_opt.argtypes = [c_uint, c_uint, c_uint, c_uint, c_uint, c_uint] csp_rdp_set_opt.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 410 if hasattr(_libs['csp'], 'csp_rdp_get_opt'): csp_rdp_get_opt = _libs['csp'].csp_rdp_get_opt csp_rdp_get_opt.argtypes = [POINTER(c_uint), POINTER(c_uint), POINTER(c_uint), POINTER(c_uint), POINTER(c_uint), POINTER(c_uint)] csp_rdp_get_opt.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 420 if hasattr(_libs['csp'], 'csp_xtea_set_key'): csp_xtea_set_key = _libs['csp'].csp_xtea_set_key csp_xtea_set_key.argtypes = [String, c_uint32] csp_xtea_set_key.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 428 if hasattr(_libs['csp'], 'csp_hmac_set_key'): csp_hmac_set_key = _libs['csp'].csp_hmac_set_key csp_hmac_set_key.argtypes = [String, c_uint32] csp_hmac_set_key.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 433 if hasattr(_libs['csp'], 'csp_conn_print_table'): csp_conn_print_table = _libs['csp'].csp_conn_print_table csp_conn_print_table.argtypes = [] csp_conn_print_table.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 438 for _lib in _libs.itervalues(): if not hasattr(_lib, 'csp_buffer_print_table'): continue csp_buffer_print_table = _lib.csp_buffer_print_table csp_buffer_print_table.argtypes = [] csp_buffer_print_table.restype = None break csp_debug_hook_func_t = CFUNCTYPE(UNCHECKED(None), csp_debug_level_t, String) # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 444 # /home/johan/git/pygnd/lib/libcsp/include/csp/csp.h: 445 if hasattr(_libs['csp'], 'csp_debug_hook_set'): csp_debug_hook_set = _libs['csp'].csp_debug_hook_set csp_debug_hook_set.argtypes = [csp_debug_hook_func_t] csp_debug_hook_set.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/i2c.h: 59 class struct_i2c_frame_s(Structure): pass struct_i2c_frame_s.__slots__ = [ 'padding', 'retries', 'reserved', 'dest', 'len_rx', 'len', 'data', ] struct_i2c_frame_s._fields_ = [ ('padding', c_uint8), ('retries', c_uint8), ('reserved', c_uint32), ('dest', c_uint8), ('len_rx', c_uint8), ('len', c_uint16), ('data', c_uint8 * 256), ] i2c_frame_t = struct_i2c_frame_s # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/i2c.h: 59 i2c_callback_t = CFUNCTYPE(UNCHECKED(None), POINTER(i2c_frame_t), POINTER(None)) # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/i2c.h: 73 # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/i2c.h: 74 for _lib in _libs.itervalues(): if not hasattr(_lib, 'i2c_init'): continue i2c_init = _lib.i2c_init i2c_init.argtypes = [c_int, c_int, c_uint8, c_uint16, c_int, c_int, i2c_callback_t] i2c_init.restype = c_int break # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/i2c.h: 84 for _lib in _libs.itervalues(): if not hasattr(_lib, 'i2c_send'): continue i2c_send = _lib.i2c_send i2c_send.argtypes = [c_int, POINTER(i2c_frame_t), c_uint16] i2c_send.restype = c_int break # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/usart.h: 36 class struct_usart_conf(Structure): pass struct_usart_conf.__slots__ = [ 'device', 'baudrate', 'databits', 'stopbits', 'paritysetting', 'checkparity', ] struct_usart_conf._fields_ = [ ('device', String), ('baudrate', c_uint32), ('databits', c_uint8), ('stopbits', c_uint8), ('paritysetting', c_uint8), ('checkparity', c_uint8), ] # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/usart.h: 49 if hasattr(_libs['csp'], 'usart_init'): usart_init = _libs['csp'].usart_init usart_init.argtypes = [POINTER(struct_usart_conf)] usart_init.restype = None usart_callback_t = CFUNCTYPE(UNCHECKED(None), POINTER(c_uint8), c_int, POINTER(None)) # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/usart.h: 57 # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/usart.h: 58 if hasattr(_libs['csp'], 'usart_set_callback'): usart_set_callback = _libs['csp'].usart_set_callback usart_set_callback.argtypes = [usart_callback_t] usart_set_callback.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/usart.h: 65 if hasattr(_libs['csp'], 'usart_insert'): usart_insert = _libs['csp'].usart_insert usart_insert.argtypes = [c_char, POINTER(None)] usart_insert.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/usart.h: 73 if hasattr(_libs['csp'], 'usart_putc'): usart_putc = _libs['csp'].usart_putc usart_putc.argtypes = [c_char] usart_putc.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/usart.h: 82 if hasattr(_libs['csp'], 'usart_putstr'): usart_putstr = _libs['csp'].usart_putstr usart_putstr.argtypes = [String, c_int] usart_putstr.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/usart.h: 90 if hasattr(_libs['csp'], 'usart_getc'): usart_getc = _libs['csp'].usart_getc usart_getc.argtypes = [] usart_getc.restype = c_char # /home/johan/git/pygnd/lib/libcsp/include/csp/drivers/usart.h: 92 if hasattr(_libs['csp'], 'usart_messages_waiting'): usart_messages_waiting = _libs['csp'].usart_messages_waiting usart_messages_waiting.argtypes = [c_int] usart_messages_waiting.restype = c_int # ../../include/csp/csp_interface.h: 48 if hasattr(_libs['csp'], 'csp_qfifo_write'): csp_qfifo_write = _libs['csp'].csp_qfifo_write csp_qfifo_write.argtypes = [POINTER(csp_packet_t), POINTER(csp_iface_t), POINTER(c_int)] csp_qfifo_write.restype = None # ../../include/csp/csp_interface.h: 60 for _lib in _libs.itervalues(): if not hasattr(_lib, 'csp_route_get_mac'): continue csp_route_get_mac = _lib.csp_route_get_mac csp_route_get_mac.argtypes = [c_uint8] csp_route_get_mac.restype = c_uint8 break # ../../include/csp/csp_interface.h: 66 if hasattr(_libs['csp'], 'csp_iflist_add'): csp_iflist_add = _libs['csp'].csp_iflist_add csp_iflist_add.argtypes = [POINTER(csp_iface_t)] csp_iflist_add.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_can.h: 37 try: csp_if_can = (csp_iface_t).in_dll(_libs['csp'], 'csp_if_can') except: pass # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_can.h: 40 class struct_csp_can_config(Structure): pass struct_csp_can_config.__slots__ = [ 'bitrate', 'clock_speed', 'ifc', ] struct_csp_can_config._fields_ = [ ('bitrate', c_uint32), ('clock_speed', c_uint32), ('ifc', String), ] # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_can.h: 52 if hasattr(_libs['csp'], 'csp_can_init'): csp_can_init = _libs['csp'].csp_can_init csp_can_init.argtypes = [c_uint8, POINTER(struct_csp_can_config)] csp_can_init.restype = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_i2c.h: 33 for _lib in _libs.values(): try: csp_if_i2c = (csp_iface_t).in_dll(_lib, 'csp_if_i2c') break except: pass # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_i2c.h: 42 for _lib in _libs.itervalues(): if not hasattr(_lib, 'csp_i2c_init'): continue csp_i2c_init = _lib.csp_i2c_init csp_i2c_init.argtypes = [c_uint8, c_int, c_int] csp_i2c_init.restype = c_int break # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 50 if hasattr(_libs['csp'], 'csp_kiss_rx'): csp_kiss_rx = _libs['csp'].csp_kiss_rx csp_kiss_rx.argtypes = [POINTER(csp_iface_t), POINTER(c_uint8), c_int, POINTER(None)] csp_kiss_rx.restype = None csp_kiss_putc_f = CFUNCTYPE(UNCHECKED(None), c_char) # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 59 csp_kiss_discard_f = CFUNCTYPE(UNCHECKED(None), c_char, POINTER(None)) # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 73 enum_anon_8 = c_int # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 80 KISS_MODE_NOT_STARTED = 0 # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 80 KISS_MODE_STARTED = (KISS_MODE_NOT_STARTED + 1) # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 80 KISS_MODE_ESCAPED = (KISS_MODE_STARTED + 1) # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 80 KISS_MODE_SKIP_FRAME = (KISS_MODE_ESCAPED + 1) # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 80 kiss_mode_e = enum_anon_8 # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 80 # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 95 class struct_csp_kiss_handle_s(Structure): pass struct_csp_kiss_handle_s.__slots__ = [ 'kiss_putc', 'kiss_discard', 'rx_length', 'rx_mode', 'rx_first', 'rx_cbuf', 'rx_packet', ] struct_csp_kiss_handle_s._fields_ = [ ('kiss_putc', csp_kiss_putc_f), ('kiss_discard', csp_kiss_discard_f), ('rx_length', c_uint), ('rx_mode', kiss_mode_e), ('rx_first', c_uint), ('rx_cbuf', POINTER(c_ubyte)), ('rx_packet', POINTER(csp_packet_t)), ] csp_kiss_handle_t = struct_csp_kiss_handle_s # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 95 # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_kiss.h: 97 if hasattr(_libs['csp'], 'csp_kiss_init'): csp_kiss_init = _libs['csp'].csp_kiss_init csp_kiss_init.argtypes = [POINTER(csp_iface_t), POINTER(csp_kiss_handle_t), csp_kiss_putc_f, csp_kiss_discard_f, String] csp_kiss_init.restype = None # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_lo.h: 32 try: csp_if_lo = (csp_iface_t).in_dll(_libs['csp'], 'csp_if_lo') except: pass # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_zmqhub.h: 6 try: csp_if_zmqhub = (csp_iface_t).in_dll(_libs['csp'], 'csp_if_zmqhub') except: pass # /home/johan/git/pygnd/lib/libcsp/include/csp/interfaces/csp_if_zmqhub.h: 14 if hasattr(_libs['csp'], 'csp_zmqhub_init'): csp_zmqhub_init = _libs['csp'].csp_zmqhub_init csp_zmqhub_init.argtypes = [c_char, String] csp_zmqhub_init.restype = c_int # /tmp/tmpwAji6q.h: 1 try: __STDC__ = 1 except: pass # /tmp/tmpwAji6q.h: 1 try: __STDC_HOSTED__ = 1 except: pass # /tmp/tmpwAji6q.h: 1 try: __GNUC__ = 4 except: pass # /tmp/tmpwAji6q.h: 1 try: __GNUC_MINOR__ = 8 except: pass # /tmp/tmpwAji6q.h: 1 try: __GNUC_PATCHLEVEL__ = 2 except: pass # /tmp/tmpwAji6q.h: 1 try: __VERSION__
#!/usr/bin/env python3 # # Modified by <NAME> # # Copyright (c) Facebook, Inc. and its affiliates. """ Panoptic-DeepLab Training Script. This script is a simplified version of the training script in detectron2/tools. """ # tensorboard --logdir="d:/Segmentacija/panoptic-deeplab-master/tools_d2/output/" # python train_panoptic_deeplab.py --config-file ./configs/COCO-PanopticSegmentation/panoptic_deeplab_H_48_os16_mg124_poly_200k_bs64_crop_640_640_coco_dsconv.yaml --num-gpus 1 # python train_panoptic_deeplab.py --config-file ./configs/COCO-PanopticSegmentation/panoptic_deeplab_H_48_os16_mg124_poly_200k_bs64_crop_640_640_coco_dsconv.yaml --eval-only MODEL.WEIGHTS ./output/model_final.pth import json import math import random import numpy as np from decimal import Decimal, Rounded from PIL import Image, ImageDraw import cv2 from detectron2.model_zoo import model_zoo from detectron2.structures import BoxMode from detectron2.utils.visualizer import Visualizer, GenericMask from detectron2.utils.visualizer_sharp import Visualizer as Visualizer_sharp import sys from networkx.drawing.tests.test_pylab import mpl from tensorboard import program import os import torch from datasets import register_MaSTr1325 import _init_paths import os import torch import detectron2.data.transforms as T import detectron2.utils.comm as comm from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg, CfgNode from detectron2.data import MetadataCatalog, build_detection_train_loader, DatasetCatalog from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch, DefaultPredictor from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, ) from detectron2.projects.deeplab import build_lr_scheduler from detectron2.projects.panoptic_deeplab import ( PanopticDeeplabDatasetMapper, add_panoptic_deeplab_config, ) from detectron2.solver import get_default_optimizer_params from detectron2.solver.build import maybe_add_gradient_clipping import d2 def build_sem_seg_train_aug(cfg): augs = [ T.ResizeShortestEdge( cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MAX_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING ) ] if cfg.INPUT.CROP.ENABLED: augs.append(T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE)) augs.append(T.RandomFlip()) return augs class Trainer(DefaultTrainer): """ We use the "DefaultTrainer" which contains a number pre-defined logic for standard training workflow. They may not work for you, especially if you are working on a new research project. In that case you can use the cleaner "SimpleTrainer", or write your own training loop. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if cfg.MODEL.PANOPTIC_DEEPLAB.BENCHMARK_NETWORK_SPEED: return None if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type if evaluator_type in ["cityscapes_panoptic_seg", "coco_panoptic_seg"]: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) if evaluator_type == "cityscapes_panoptic_seg": assert ( torch.cuda.device_count() >= comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) if evaluator_type == "coco_panoptic_seg": # `thing_classes` in COCO panoptic metadata includes both thing and # stuff classes for visualization. COCOEvaluator requires metadata # which only contains thing classes, thus we map the name of # panoptic datasets to their corresponding instance datasets. dataset_name_mapper = { "coco_2017_val_panoptic": "coco_2017_val", "coco_2017_val_100_panoptic": "coco_2017_val_100", "coco_val_panoptic_mastr1325": "coco_val_panoptic_mastr1325", "coco_val_panoptic_mastr1325_evaluate": "coco_val_panoptic_mastr1325_evaluate" } evaluator_list.append( COCOEvaluator(dataset_name_mapper[dataset_name], output_dir=output_folder) ) ''' if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) ''' dataset_name_mapper = { "coco_2017_val_panoptic": "coco_2017_val", "coco_2017_val_100_panoptic": "coco_2017_val_100", "coco_val_panoptic_mastr1325": "coco_val_panoptic_mastr1325", "coco_val_panoptic_mastr1325_evaluate": "coco_val_panoptic_mastr1325_evaluate" } return COCOPanopticEvaluator(dataset_name_mapper[dataset_name], output_dir=output_folder) # elif len(evaluator_list) == 1: # return evaluator_list[0] # return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): mapper = PanopticDeeplabDatasetMapper(cfg, augmentations=build_sem_seg_train_aug(cfg)) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): """ Build an optimizer from config. """ params = get_default_optimizer_params( model, base_lr=cfg.SOLVER.BASE_LR, weight_decay=cfg.SOLVER.WEIGHT_DECAY, weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM, bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR, weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS, ) optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": return maybe_add_gradient_clipping(cfg, torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM, nesterov=cfg.SOLVER.NESTEROV, ) elif optimizer_type == "ADAM": return maybe_add_gradient_clipping(cfg, torch.optim.Adam)(params, cfg.SOLVER.BASE_LR) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() add_panoptic_deeplab_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg def main(args): cfg = setup(args) # cfg.defrost() # cfg.INPUT.CROP.SIZE = (384,512) # cfg.INPUT.MIN_SIZE_TRAIN = (384,) # cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice" # cfg.freeze() if args.eval_only: model = Trainer.build_model(cfg) DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load( cfg.MODEL.WEIGHTS, resume=args.resume ) res = Trainer.test(cfg, model) return res trainer = Trainer(cfg) trainer.resume_or_load(resume=args.resume) # cfg.defrost() # cfg.SOLVER.MAX_ITER = 130000 # cfg.freeze() return trainer.train() #---------------------------------------------------------------------------------------- # ▼ def createDetection(bbox, type, id_index, area): detections = { "type": type, "bbox": bbox, "id": id_index, "area": area } return detections def convertCategory(cat: int): if cat in [3, 5, 7, 8]: return "other" else: return "ship" def createObstacle(args, model, output, large=False, show=False): cfg = setup(args) cfg.defrost() cfg.set_new_allowed(True) thresh = 0.7 cfg.MODEL.RETINANET.SCORE_THRESH_TEST = thresh cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = thresh cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = thresh cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, model) cfg.DATASETS.TEST = ("coco_train_panoptic_mastr1325") cfg.INPUT.CROP.ENABLED = False cfg.freeze() x_mul = (Decimal(1278) / Decimal(512)) y_mul = (Decimal(958) / Decimal(384)) scale_factor = Decimal(1278 * 512) / Decimal(958 * 384) predictor = DefaultPredictor(cfg) jdata = json.load(open( "d:/Segmentacija/MODS/modb_evaluation-bbox_obstacle_detection/Detectors/Mask_RCNN/mrcnn_unedited_res.json")) parentdir = "d:/Segmentacija/MODS/" mapping_kope = open(parentdir + "mods" + "/raw/" + "sequence_mapping_new.txt").readlines() kope_arr = [] seqstring_arr = [] for mapping_line in mapping_kope: kope, seqstring = mapping_line.split() kope_arr.append(kope) seqstring_arr.append(seqstring) if not large: mapping = os.listdir(parentdir + "mods/preprocessed/") else: mapping = os.listdir(parentdir + "mods/raw/") for sequence in jdata["dataset"]["sequences"]: for frame in sequence["frames"]: frame["detections"] = [] if "obstacles" in frame: del frame["obstacles"] for folder in mapping: if not large: if os.path.isdir(parentdir + "mods/preprocessed/" + folder): files = os.listdir(parentdir + "mods/" + "preprocessed/" + folder + "/frames/") else: continue else: if os.path.isdir(parentdir + "mods/raw/" + folder): files = os.listdir(parentdir + "mods/" + "raw/" + folder + "/frames/") else: continue print(folder) if show and not large and (folder == "seq01" or folder == "seq02" or folder == "seq03"): continue if not large: mapping_list = open(parentdir + "mods/" + "preprocessed/" + folder + "/mapping.txt", "r").readlines() else: mapping_list = open(parentdir + "mods/" + "raw/" + folder + "/mapping.txt", "r").readlines() for file in files: if not large: im = cv2.imread(parentdir + "mods/" + "preprocessed/" + folder + "/frames/" + file) else: if file[-5:-4] == "L": im = cv2.imread(parentdir + "mods/" + "raw/" + folder + "/frames/" + file) else: continue if "instances" in predictor(im): instances = predictor(im)["instances"] # if True: # im = cv2.imread(parentdir + "mods/" + "preprocessed/" + "seq39" + "/frames/" + "0850.jpg") # # panoptic_seg, segments_info = predictor(im)["panoptic_seg"] # v = Visualizer(im[:, :, ::-1], MetadataCatalog.get("coco_train_panoptic_mastr1325"), scale=2.0) # # v = v.draw_instance_predictions(instances.to("cpu")) # v = v.draw_panoptic_seg_predictions(panoptic_seg.to("cpu"), segments_info) # cv2.imshow(file, v.get_image()[:, :, ::-1]) # cv2.waitKey(0) # cv2.destroyAllWindows() # exit() boxes = instances.pred_boxes if instances.has("pred_boxes") else None classes = instances.pred_classes if instances.has("pred_classes") else None classes = classes.tolist() masks = np.asarray(instances.pred_masks.to("cpu")) masks = [GenericMask(x, im.shape[0], im.shape[1]) for x in masks] detections = [] boxes = boxes.to("cpu") if show: orig_image = Image.fromarray(im) ori_image = ImageDraw.Draw(orig_image) # upscaled = cv2.resize(im, (1278, 958)) # scaled_image = Image.fromarray(upscaled) # im_scaled = ImageDraw.Draw(scaled_image) for box, index in zip(boxes, range(len(boxes))): type = convertCategory(classes[index]) x0, y0, x1, y1 = box.numpy().tolist() if large: mul_x0 = round(x0) mul_x1 = round(x1) mul_y0 = round(y0) mul_y1 = round(y1) else: mul_x0 = round(Decimal(x_mul) * Decimal(x0)) mul_x1 = round(Decimal(x_mul) * Decimal(x1)) mul_y0 = round(Decimal(y_mul) * Decimal(y0)) mul_y1 = round(Decimal(y_mul) * Decimal(y1)) width = mul_x1 - mul_x0 height = mul_y1 - mul_y0 if show: ori_image.rectangle((mul_x0, mul_y0, mul_x1, mul_y1)) bbox = [mul_x0, mul_y0, width, height] area = round(Decimal(int(masks[index].area())) * Decimal(scale_factor)) detections.append(createDetection(bbox, type, index, area)) if not large: for mapping_line in mapping_list: processed_file, raw_file = mapping_line.split(" ") if processed_file == file: mapper = raw_file[:-1] break for i, seqstring in enumerate(seqstring_arr): if seqstring == folder: kope_name = kope_arr[i] break else: if folder not in kope_arr: continue kope_name = folder mapper = file if show: print(detections) orig_image.show() exit(1) for sequence in jdata["dataset"]["sequences"]: if sequence["path"].split("/")[1] == kope_name: for frame in sequence["frames"]: if frame["image_file_name"] == mapper: frame["detections"] = detections break with open("d:/Segmentacija/MODS/modb_evaluation-bbox_obstacle_detection/Detectors/Mask_RCNN/" + output, "w") as f: json.dump(jdata, f) print("saved: d:/Segmentacija/MODS/modb_evaluation-bbox_obstacle_detection/Detectors/Mask_RCNN/" + output) def predictMODS(args, model, path, large): cfg = setup(args) cfg.defrost() cfg.MODEL.PANOPTIC_DEEPLAB.STUFF_AREA = 16 cfg.set_new_allowed(True) thresh = 0.7 cfg.MODEL.RETINANET.SCORE_THRESH_TEST = thresh cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = thresh cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = thresh cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, model) cfg.INPUT.CROP.ENABLED = False cfg.freeze() # model_0109999.pth predictor = DefaultPredictor(cfg) parentdir = "d:/Segmentacija/MODS/" if large: with open(parentdir + "mods" + "/raw/" + "sequence_mapping_new.txt") as m: mapping_kope = m.readlines() kope_arr = [] seqstring_arr = [] for mapping_line in mapping_kope: kope, seqstring = mapping_line.split() kope_arr.append(kope) seqstring_arr.append(seqstring) target_dir = "d:/Segmentacija/MODS/" + path + "/" if not os.path.exists(target_dir): os.makedirs(target_dir) if not large: seqs = os.listdir(parentdir + "mods" + "/preprocessed/") else: seqs = os.listdir(parentdir + "mods" + "/raw/") for seq_folder in seqs: if not large: if not os.path.isdir(parentdir + "mods" + "/preprocessed/" + seq_folder): continue files = os.listdir(parentdir + "mods/" + "preprocessed/" + seq_folder + "/frames/") target_dir_seq = target_dir + seq_folder + "/" if not os.path.exists(target_dir_seq): os.makedirs(target_dir_seq) print(seq_folder) else: if seq_folder not in kope_arr: continue if not os.path.isdir(parentdir + "mods" + "/raw/" + seq_folder): continue files = os.listdir(parentdir +
<reponame>juliomateoslangerak/microscope-metrics # Import sample infrastructure from itertools import product from microscopemetrics.samples import * from typing import Union, Tuple, List # Import analysis tools import numpy as np from pandas import DataFrame from skimage.transform import hough_line # hough_line_peaks, probabilistic_hough_line from scipy.signal import find_peaks from scipy.optimize import curve_fit from scipy.interpolate import griddata from microscopemetrics.analysis.tools import segment_image, compute_distances_matrix, compute_spots_properties from ..utilities.utilities import multi_airy_fun, airy_fun @register_image_analysis class ArgolightBAnalysis(Analysis): """This class handles the analysis of the Argolight sample pattern B """ def __init__(self): super().__init__(output_description="Analysis output of the 'SPOTS' matrix (pattern B) from the argolight sample. " "It contains chromatic shifts and homogeneity." ) self.add_requirement(name='spots_distance', description='Distance between argolight spots', data_type=float, units='MICRON', optional=False, ) self.add_requirement(name='pixel_size', description='Physical size of the voxel in z, y and x', data_type=Tuple[float, float, float], units='MICRON', optional=False, ) self.add_requirement(name='sigma', description='Smoothing factor for objects detection', data_type=Tuple[float, float, float], optional=True, default=(1, 3, 3)) self.add_requirement(name='lower_threshold_correction_factors', description='Correction factor for the lower thresholds. Must be a tuple with len = nr ' 'of channels or a float if all equal', data_type=Union[List[float], Tuple[float], float], optional=True, default=None) self.add_requirement(name='upper_threshold_correction_factors', description='Correction factor for the upper thresholds. Must be a tuple with len = nr ' 'of channels or a float if all equal', data_type=Union[List[float], Tuple[float], float], optional=True, default=None) self.add_requirement(name='remove_center_cross', description='Remove the center cross found in some Argolight patterns', data_type=bool, optional=True, default=False) def run(self): logger.info("Validating requirements...") if not self.validate_requirements(): logger.error("Metadata requirements ara not valid") return False logger.info("Analyzing spots image...") # Calculating the distance between spots in pixels with a security margin min_distance = round( (self.get_metadata_values('spots_distance') * 0.3) / max(self.get_metadata_values("pixel_size")[-2:]) ) # Calculating the maximum tolerated distance in microns for the same spot in a different channels max_distance = self.get_metadata_values("spots_distance") * 0.4 labels = segment_image( image=self.input.data['argolight_b'], min_distance=min_distance, sigma=self.get_metadata_values('sigma'), method="local_max", low_corr_factors=self.get_metadata_values("lower_threshold_correction_factors"), high_corr_factors=self.get_metadata_values("upper_threshold_correction_factors"), ) self.output.append(model.Image(name=list(self.input.data.keys())[0], description="Labels image with detected spots. " "Image intensities correspond to roi labels.", data=labels) ) spots_properties, spots_positions = compute_spots_properties( image=self.input.data['argolight_b'], labels=labels, remove_center_cross=self.get_metadata_values('remove_center_cross'), ) distances_df = compute_distances_matrix( positions=spots_positions, max_distance=max_distance, pixel_size=self.get_metadata_values('pixel_size'), ) properties_kv = {} properties_df = DataFrame() for ch, ch_spot_props in enumerate(spots_properties): ch_df = DataFrame() ch_df['channel'] = [ch for _ in ch_spot_props] ch_df["mask_labels"] = [p["label"] for p in ch_spot_props] ch_df["volume"] = [p["area"] for p in ch_spot_props] ch_df["roi_volume_units"] = "VOXEL" ch_df["max_intensity"] = [p["max_intensity"] for p in ch_spot_props] ch_df["min_intensity"] = [p["min_intensity"] for p in ch_spot_props] ch_df["mean_intensity"] = [p["mean_intensity"] for p in ch_spot_props] ch_df["integrated_intensity"] = [p["integrated_intensity"] for p in ch_spot_props] ch_df["z_weighted_centroid"] = [p["weighted_centroid"][0] for p in ch_spot_props] ch_df["y_weighted_centroid"] = [p["weighted_centroid"][1] for p in ch_spot_props] ch_df["x_weighted_centroid"] = [p["weighted_centroid"][2] for p in ch_spot_props] ch_df["roi_weighted_centroid_units"] = "PIXEL" # Key metrics for spots intensities properties_kv[f"nr_of_spots_ch{ch:02d}"] = len(ch_df) properties_kv[f"max_intensity_ch{ch:02d}"] = ch_df["integrated_intensity"].max().item() properties_kv[f"max_intensity_roi_ch{ch:02d}"] = ch_df["integrated_intensity"].argmax().item() properties_kv[f"min_intensity_ch{ch:02d}"] = ch_df["integrated_intensity"].min().item() properties_kv[f"min_intensity_roi_ch{ch:02d}"] = ch_df["integrated_intensity"].argmin().item() properties_kv[f"mean_intensity_ch{ch:02d}"] = ch_df["integrated_intensity"].mean().item() properties_kv[f"median_intensity_ch{ch:02d}"] = ch_df["integrated_intensity"].median().item() properties_kv[f"std_mean_intensity_ch{ch:02d}"] = ch_df["integrated_intensity"].std().item() properties_kv[f"mad_mean_intensity_ch{ch:02d}"] = ch_df["integrated_intensity"].mad().item() properties_kv[f"min-max_intensity_ratio_ch{ch:02d}"] = (properties_kv[f"min_intensity_ch{ch:02d}"] / properties_kv[f"max_intensity_ch{ch:02d}"]) properties_df = properties_df.append(ch_df) channel_shapes = [model.Point(x=p["weighted_centroid"][2].item(), y=p["weighted_centroid"][1].item(), z=p["weighted_centroid"][0].item(), c=ch, label=f'{p["label"]}') for p in ch_spot_props ] self.output.append(model.Roi(name=f'Centroids_ch{ch:03d}', description=f"weighted centroids channel {ch}", shapes=channel_shapes) ) distances_kv = {"distance_units": self.get_metadata_units('pixel_size')} for a, b in product(distances_df.channel_a.unique(), distances_df.channel_b.unique()): temp_df = distances_df[(distances_df.channel_a == a) & (distances_df.channel_b == b)] a = int(a) b = int(b) distances_kv[f'mean_3d_dist_ch{a:02d}_ch{b:02d}'] = temp_df.dist_3d.mean().item() distances_kv[f'median_3d_dist_ch{a:02d}_ch{b:02d}'] = temp_df.dist_3d.median().item() distances_kv[f'std_3d_dist_ch{a:02d}_ch{b:02d}'] = temp_df.dist_3d.std().item() distances_kv[f'mad_3d_dist_ch{a:02d}_ch{b:02d}'] = temp_df.dist_3d.mad().item() distances_kv[f'mean_z_dist_ch{a:02d}_ch{b:02d}'] = temp_df.z_dist.mean().item() distances_kv[f'median_z_dist_ch{a:02d}_ch{b:02d}'] = temp_df.z_dist.median().item() distances_kv[f'std_z_dist_ch{a:02d}_ch{b:02d}'] = temp_df.z_dist.std().item() distances_kv[f'mad_z_dist_ch{a:02d}_ch{b:02d}'] = temp_df.z_dist.mad().item() self.output.append(model.KeyValues(name='Intensity Key Annotations', description='Key Intensity Measurements on Argolight D spots', key_values=properties_kv) ) self.output.append(model.KeyValues(name='Distances Key Annotations', description='Key Distance Measurements on Argolight D spots', key_values=distances_kv) ) self.output.append(model.Table(name='Properties', description="Analysis_argolight_D_properties", table=properties_df) ) self.output.append(model.Table(name='Distances', description="Analysis_argolight_D_distances", table=distances_df) ) return True @register_image_analysis class ArgolightEAnalysis(Analysis): """This class handles the analysis of the Argolight sample pattern E with lines along the X or Y axis """ def __init__(self): super().__init__( output_description="Analysis output of the lines (pattern E) from the argolight sample. " "It contains resolution data on the axis indicated:" "- axis 1 = Y resolution = lines along X axis" "- axis 2 = X resolution = lines along Y axis" ) self.add_requirement(name='pixel_size', description='Physical size of the voxel in z, y and x', data_type=Tuple[float, float, float], units='MICRON', optional=False ) self.add_requirement(name='axis', description='axis along which resolution is being measured. 1=Y, 2=X', data_type=int, optional=False ) self.add_requirement(name='measured_band', description='Fraction of the image across which intensity profiles are measured', data_type=float, optional=True, default=.4 ) def run(self): """A intermediate function to specify the axis to be analyzed""" logger.info("Validating requirements...") if not self.validate_requirements(): logger.error("Metadata requirements ara not valid") return False logger.info("Analyzing resolution...") return self._analyze_resolution(image=self.input.data['argolight_e'], axis=self.get_metadata_values('axis'), measured_band=self.get_metadata_values("measured_band"), pixel_size=self.get_metadata_values('pixel_size'), pixel_size_units=self.get_metadata_units('pixel_size')) def _analyze_resolution(self, image, axis, measured_band, pixel_size, pixel_size_units): ( profiles, z_planes, peak_positions, peak_heights, resolution_values, resolution_indexes, resolution_method, ) = _compute_resolution( image=image, axis=axis, measured_band=measured_band, prominence=0.264, do_angle_refinement=False, ) # resolution in native units resolution_values = [x * pixel_size[axis] for x in resolution_values] key_values = { f"ch{ch:02d}_{resolution_method}_resolution": res.item() for ch, res in enumerate(resolution_values) } key_values["resolution_units"] = pixel_size_units key_values["resolution_axis"] = axis key_values["measured_band"] = measured_band for ch, indexes in enumerate(resolution_indexes): key_values[f"peak_positions_ch{ch:02d}"] = [ (peak_positions[ch][ind].item(), peak_positions[ch][ind + 1].item()) for ind in indexes ] key_values[f"peak_heights_ch{ch:02d}"] = [ (peak_heights[ch][ind].item(), peak_heights[ch][ind + 1].item()) for ind in indexes ] key_values[f"focus_ch{ch:02d}"] = z_planes[ch].item() out_tables = {} # Populate tables and rois for ch, profile in enumerate(profiles): out_tables.update(_profile_to_table(profile, ch)) shapes = [] for pos in key_values[f"peak_positions_ch{ch:02d}"]: for peak in pos: # Measurements are taken at center of pixel so we add .5 pixel to peak positions if axis == 1: # Y resolution -> horizontal rois axis_len = image.shape[-2] x1_pos = int( (axis_len / 2) - (axis_len * measured_band / 2) ) y1_pos = peak + 0.5 x2_pos = int( (axis_len / 2) + (axis_len * measured_band / 2) ) y2_pos = peak + 0.5 elif axis == 2: # X resolution -> vertical rois axis_len = image.shape[-1] y1_pos = int( (axis_len / 2) - (axis_len * measured_band / 2) ) x1_pos = peak + 0.5 y2_pos = int( (axis_len / 2) + (axis_len * measured_band / 2) ) x2_pos = peak + 0.5 shapes.append(model.Line(x1=x1_pos, y1=y1_pos, x2=x2_pos, y2=y2_pos, z=z_planes[ch], c=ch) ) self.output.append(model.Roi(name=f"Peaks_ch{ch:03d}", description=f"Lines where highest Rayleigh resolution was found in channel {ch}", shapes=shapes) ) self.output.append(model.KeyValues(name='Key-Value Annotations', description=f'Measurements on Argolight E pattern along axis={axis}', key_values=key_values) ) self.output.append(model.Table(name='Profiles', description='Raw and fitted profiles across the center of the image along the ' 'defined axis', table=DataFrame.from_dict(out_tables)) ) return True class ArgolightReporter(Reporter): """Reporter subclass to produce Argolight sample figures""" def __init__(self): image_report_to_func = { "spots": self.full_report_spots, "vertical_resolution": self.full_report_vertical_resolution, "horizontal_resolution": self.full_report_horizontal_resolution, } super().__init__(image_report_to_func=image_report_to_func) def produce_image_report(self, image): pass def full_report_spots(self, image): pass def full_report_vertical_resolution(self, image): pass def full_report_horizontal_resolution(self, image): pass def plot_homogeneity_map(self, image): nr_channels = image.getSizeC() x_dim = image.getSizeX() y_dim = image.getSizeY() tables = self.get_tables( image, namespace_start="metrics", name_filter="properties" ) if len(tables) != 1: raise Exception( "There are none or more than one properties tables. Verify data integrity." ) table = tables[0] row_count = table.getNumberOfRows() col_names = [c.name for c in table.getHeaders()] wanted_columns = [ "channel", "max_intensity", "mean_intensity", "integrated_intensity", "x_weighted_centroid", "y_weighted_centroid", ] fig, axes = plt.subplots( ncols=nr_channels, nrows=3, squeeze=False, figsize=(3 * nr_channels, 9) ) for ch in range(nr_channels): data = table.slice( [col_names.index(w_col) for w_col in wanted_columns], table.getWhereList( condition=f"channel=={ch}", variables={}, start=0, stop=row_count, step=0, ), ) max_intensity = np.array( [ val for col in data.columns for val in col.values if col.name == "max_intensity" ] ) integrated_intensity = np.array( [ val for col in data.columns for val in col.values if col.name == "integrated_intensity" ] ) x_positions = np.array( [ val for col in data.columns for val in col.values if col.name == "x_weighted_centroid" ] ) y_positions = np.array( [ val for col in data.columns for val in col.values if col.name == "y_weighted_centroid" ] ) grid_x, grid_y = np.mgrid[0:x_dim, 0:y_dim] image_intensities = get_intensities(image, c_range=ch, t_range=0).max(0) try: interpolated_max_int = griddata( np.stack((x_positions, y_positions), axis=1), max_intensity, (grid_x, grid_y), method="linear", ) interpolated_intgr_int = griddata( np.stack((x_positions, y_positions), axis=1), integrated_intensity, (grid_x, grid_y), method="linear", ) except Exception as e: # TODO: Log a warning interpolated_max_int = np.zeros((256, 256)) ax = axes.ravel() ax[ch] = plt.subplot(3, 4, ch + 1) ax[ch].imshow(np.squeeze(image_intensities), cmap="gray") ax[ch].set_title("MIP_" + str(ch)) ax[ch + nr_channels].imshow( np.flipud(interpolated_intgr_int), extent=(0, x_dim, y_dim, 0), origin="lower", cmap=cm.hot, vmin=np.amin(integrated_intensity), vmax=np.amax(integrated_intensity), ) ax[ch + nr_channels].plot(x_positions, y_positions, "k.", ms=2) ax[ch + nr_channels].set_title("Integrated_int_" + str(ch)) ax[ch + 2 * nr_channels].imshow( np.flipud(interpolated_max_int), extent=(0, x_dim, y_dim, 0), origin="lower", cmap=cm.hot, vmin=np.amin(image_intensities), vmax=np.amax(image_intensities), ) ax[ch + 2 * nr_channels].plot(x_positions, y_positions, "k.", ms=2) ax[ch + 2 * nr_channels].set_title("Max_int_" + str(ch)) plt.show() def plot_distances_map(self, image): nr_channels = image.getSizeC() x_dim =
False, rev = False, open_revs = False) : try : if rev : return self.db.get(name, rev = rev) elif open_revs : return self.db.get(name, open_revs = open_revs) else : return self.db[name] except couch_ResourceNotFound, e : # This happens during DB timeouts only for the _users database if name.count("org.couchdb.user") and not second_time : raise PossibleResourceNotFound(name) if false_if_not_found : return False else : raise ResourceNotFound("Cannot lookup key: " + name, e) @reauth def delete_doc(self, doc) : self.db.delete(doc) @reauth def __delitem__(self, name, second_time = False) : revs = [] try : all_deleted = False count = -1 while not all_deleted : count += 1 all_deleted = True docs = self.__getitem__(name, open_revs = "all") for doc in docs : if "_deleted" in doc["ok"] : continue all_deleted = False mverbose(str(count) + ") DELETE Found undeleted revision: " + name + ": " + doc["ok"]["_rev"]) olddoc = self.__getitem__(name, rev = doc["ok"]["_rev"]) if olddoc is not None : mverbose(str(count) + ") DELETE Deleting...") try : self.delete_doc(olddoc) except (CommunicationError, couch_ResourceNotFound), e : mwarn("OK if not found. Will try again: " + str(e)) ''' doc = self.db[name] if "_conflicts" in doc : mdebug("FOUND conflict revisions.") revs += doc["_conflicts"] if "_deleted_conflicts" in doc : mdebug("FOUND deleted conflict revisions.") revs += doc["_deleted_conflicts"] for rev in revs : olddoc = self.db.get(name, rev=rev) self.db.delete(olddoc) #del self.db[name] ''' except ResourceNotFound, e : # This happens during DB timeouts only for the _users database if name.count("org.couchdb.user") and not second_time : raise PossibleResourceNotFound(name) raise e @reauth def delete_attachment(self, doc, filename) : self.db.delete_attachment(doc, filename) @reauth def purge(self, doc_list) : self.db.purge(doc_list) @reauth def put_attachment(self, name, filename, contents, new_doc = False) : if not new_doc : mdebug("No existing doc. Will make a new one") trydelete = True if self.doc_exist(name) is True : mdebug("Deleting original @ " + name) doc = self.__getitem__(name) self.__delitem__(name) self.purge([doc]) trydelete = False mdebug("Synthesize foo doc") doc = { "foo" : "bar"} # This 'translated_at' is because of bug: https://issues.apache.org/jira/browse/COUCHDB-1415 # Supposedly fixed in CouchDB 2.0 doc["translated_at"] = time() if trydelete : try : doc["_rev"] = self.__getitem__(name)["_rev"] mdebug("Old revision found.") except ResourceNotFound, e : mdebug("No old revision found.") pass mdebug("Going to write: " + str(doc) + " to doc id " + name + " under filename " + filename) self.__setitem__(name, doc) doc = self.__getitem__(name) else : doc = new_doc if type(contents) != file : mdebug("Putting attachment of length: " + str(len(contents))) return self.db.put_attachment(doc, contents, filename) @reauth def get_attachment(self, name, filename) : obj = self.db.get_attachment(name, filename) if obj is not None : return obj.read() else : raise CommunicationError("No such attachment: " + name + " => " + filename) @reauth def get_attachment_to_path(self, name, filename, path) : sourcebytes = 0 obj = self.db.get_attachment(name, filename) if obj is not None : fh = open(path, 'wb') while True : byte = obj.read(4096) if byte : sourcebytes += len(byte) fh.write(byte) else : break fh.close() else : raise CommunicationError("No such attachment: " + name + " => " + filename) return sourcebytes def listen(self, username, password, port) : return port def get_attachment_meta(self, name, filename) : return self.__getitem__(name)["_attachments"][filename] @reauth def doc_exist(self, name, second_time = False) : try : self.db[name] except couch_ResourceNotFound, e : # This happens during DB timeouts only for the _users database if name.count("org.couchdb.user") and not second_time : raise PossibleResourceNotFound(name) ((error, reason),) = e.args mverbose("Doc exist returns not found: " + reason) return False return True def iocheck(self, e) : if e.errno in bad_errnos : try : self.reauthorize(e = e) except CommunicationError, e : mdebug("iocheck Re-authorization failed, but we'll keep trying.") else : mwarn("Actual error number: " + str(e.errno)) raise e def do_check_for_unauthorized(self, e) : try : check_for_unauthorized(e) raise CommunicationError("Failed to perform view: " + str(e)) except Unauthorized, e : try : self.reauthorize(e = e) except CommunicationError, e : mdebug("error check Re-authorization failed, but we'll keep trying.") def error_check(self, errors) : if errors["errors_left"] > 0 : mwarn("Server errors left: " + str(errors["errors_left"])) errors["errors_left"] -= 1 sleep(1) else : merr("No errors_left remaining.") raise CommunicationError("Failed to perform view. Ran out of tries.") def couchdb_pager(self, view_name='_all_docs', startkey=None, startkey_docid=None, endkey=None, endkey_docid=None, bulk=5000, stale = False): # Request one extra row to resume the listing there later. options = {'limit': bulk + 1} if stale : options["stale"] = stale if startkey: options['startkey'] = startkey if startkey_docid: options['startkey_docid'] = startkey_docid if endkey: options['endkey'] = endkey if endkey_docid: options['endkey_docid'] = endkey_docid yielded_rows = {} errors = {"errors_left" : limit} while errors["errors_left"] > 0 : try: view = self.db.view(view_name, **options) rows = [] done = False # If we got a short result (< limit + 1), we know we are done. if len(view) <= bulk: rows = view.rows done = True else: # Otherwise, continue at the new start position. rows = view.rows[:-1] last = view.rows[-1] options['startkey'] = last.key options['startkey_docid'] = last.id for row in self.do_rows(rows, yielded_rows) : yield row if not done : continue break except retriable_errors, e : try : self.reauthorize(e = e) except CommunicationError, e : mdebug("view 1) check Re-authorization failed, but we'll keep trying.") except IOError, e: self.iocheck(e) except couch_ServerError, e : self.do_check_for_unauthorized(e) self.error_check(errors) def do_rows(self, rows, yielded_rows) : for row in rows : if row["key"] is None or (("id" in row and row["id"] not in yielded_rows) or row["value"]["_id"] not in yielded_rows) : if row["key"] is not None : _id = row["id"] if "id" in row else row["value"]["_id"] yielded_rows[_id] = True yield row def view(self, *args, **kwargs) : view_name = args[0] mverbose("Query view: " + view_name) if "keys" in kwargs : keylist = [] username = kwargs["username"] for key in kwargs["keys"] : keylist.append([username, key]) kwargs["keys"] = keylist if "username" in kwargs : del kwargs["username"] if "keys" in kwargs : yielded_rows = {} errors = {"errors_left" : limit} while errors["errors_left"] > 0 : try : for row in self.do_rows(self.db.view(*args, **kwargs), yielded_rows) : yield row break except retriable_errors, e : try : self.reauthorize(e = e) except CommunicationError, e : mdebug("view 2) check Re-authorization failed, but we'll keep trying.") except IOError, e: self.iocheck(e) except couch_ServerError, e : self.do_check_for_unauthorized(e) self.error_check(errors) else : kwargs["view_name"] = view_name kwargs["bulk"] = 50 for row in self.couchdb_pager(**kwargs) : yield row @reauth def compact(self, *args, **kwargs) : self.db.compact(*args, **kwargs) @reauth def cleanup(self, *args, **kwargs) : self.db.cleanup(*args, **kwargs) def close(self) : pass def runloop(self) : mdebug("Server runloop - nothing to do.") def pull_percent(self) : return "100.0" def push_percent(self) : return "100.0" def detach_thread(self) : pass # FIXME: need try's here so we return our "NotFound" # instead of our not found class MicaServerCouchDB(AuthBase) : def __init__(self, url = False, username = False, password = <PASSWORD>, cookie = False, refresh = False) : self.url = url self.cookie = cookie self.refresh = refresh if refresh : self.username = username self.password = password self.couch_server = Server(url) if refresh : assert(self.url) assert(self.username) assert(self.password) self.first_auth(username, password) @reauth def first_auth(self, username, password) : self.auth(username, password) def get_cookie(self, url, username, password) : username_unquoted = myquote(username) password_unquoted = myquote(password) full_url = url.replace("//", "//" + username_unquoted + ":" + password_unquoted + "@") tmp_server = Server(full_url) mverbose("Requesting cookie.") try : code, message, obj = tmp_server.resource.post('_session',headers={'Content-Type' : 'application/x-www-form-urlencoded'}, body="name=" + username_unquoted + "&password=" + password_unquoted) except UnicodeDecodeError : # CouchDB folks messed up badly. This is ridiculous that I have # to do this mwarn("Retrying another way....") username_unquoted = username_unquoted.encode("latin1").decode("latin1") password_unquoted = password_unquoted.encode("latin1").decode("latin1") code, message, obj = tmp_server.resource.post('_session',headers={'Content-Type' : 'application/x-www-form-urlencoded'}, body="name=" + username_unquoted + "&password=" + password_unquoted) except UnicodeEncodeError : mwarn("Retrying a third way....") save = couchdb.http.basic_auth def basic_auth_override(credentials) : if credentials: token = base64_b64encode('%s:%s' % credentials) return ('Basic %s' % token.strip()).encode('ascii') couchdb.http.basic_auth = basic_auth_override try : code, message, obj = tmp_server.resource.post('_session',headers={'Content-Type' : 'application/x-www-form-urlencoded'}, body="name=" + username_unquoted + "&password=" + password_unquoted) except Exception, e : merr(str(e)) couchdb.http.basic_auth = save raise e couchdb.http.basic_auth = save if (code != 200) : raise CommunicationError("MLL Unauthorized: " + username) cookie = message["Set-Cookie"].split(";", 1)[0].strip() mverbose("Received cookie: " + cookie) return cookie def auth(self, username = False, password = False) : mverbose("Reauth start")
-name] { # foreach file [glob -nocomplain -directory /tmp -types f ${router} ${router}_*] { # catch {file delete -force $file} # } # } # xscale_connect_routers # enaDestructor -id on_resolve_fail [list xscale_forget_enxr_topology] # } else { # enaLogVerify -skip "No subset applicable to EnXR" -fail false # } # } # } mappings_start_time = time.perf_counter() # {{{ dynobj_mappings = {} # [group_name][object_name] = set([xos_obj, ...]) dynobj_mappings_link_parts = {} # [group_name][link_name][tuple(xos_link_devices)] = set([xos_link_parts, ...]) dynobjs_by_type = { 'device': self.device_names & all_used_objects, 'interface': self.interface_names & all_used_objects, 'link': self.link_names & all_used_objects, } # Determine useable constraint_groups {{{ weighted_constraint_groups_list = [] mandatory_constraint_groups_list = [] for group_name, constraint_group in self.constraint_groups.items(): constraint_group_objects = set(constraint_group.objects.keys()) if constraint_group_objects and constraint_group_objects <= all_used_objects: if constraint_group.weight == 'mandatory': mandatory_constraint_groups_list.append(group_name) else: weighted_constraint_groups_list.append(group_name) # }}} for group_name in [None] + mandatory_constraint_groups_list + weighted_constraint_groups_list: dynobj_mappings[group_name] = {} dynobj_mappings_link_parts[group_name] = {} # enaVerifyGroup {{{ if group_name is None: constraint_group = self group_weight = 'mandatory' # TODO set ipfxgroup "" # TODO set kpfxgroup "" else: # TODO lappend _enaVerify_defaults -prefix "($group_name group) " constraint_group = self.constraint_groups[group_name] # TODO set ipfxgroup $group_name, # TODO set kpfxgroup constraint_groups.$group_name. group_weight = constraint_group.weight for type in ('device', 'interface', 'link'): for object_name in dynobjs_by_type[type]: object_constraints = constraint_group.query_constraints(object_name) # enaVerifyGroup {{{ # TODO lappend _enaVerify_defaults -append-prefix "$object_name" if group_name is not None: pass # TODO # if { ![keylexists kltopo ${kpfxgroup}objects.$object_name.constraints] } { # if { $type eq "device" } { # set dynobj_mappings($ipfxgroup$object_name) $dynobj_mappings($object_name) # continue # } else { # keylset kltopo ${kpfxgroup}objects.$object_name.constraints [set [namespace current]::kl_default_constraints_$type] # } # } if type == 'device': # {{{ if group_name is None: # TODO # if { [info exists ::test_params(rtrLabelList)] } { # set lbls [lsearch -glob -all -inline [keylkeys ::test_params(rtrLabelList)] [keylget kltopo ${kpfxgroup}objects.$object_name.constraints.labels]] # if { [llength $lbls] } { # keylset kltopo objects.$object_name.constraints.name [struct::list map $lbls {keylget ::test_params(rtrLabelList)}] # } # } # XXXJST TODO support constraint-group environment overrides for constraint, envsfx in ( ('type', 'TYPE'), ('match_name', 'NAME'), ('platform', 'PLATFORM'), ('tgen_platform', 'TGEN_PLATFORM'), ('os', 'OS'), ('multinode_requested', 'MULTINODE'), ): for role in [object_name] + list(object_constraints.label) + list(object_constraints.role): env = os.environ.get('DYNTOPO_%s_%s' % (role, envsfx), None) if env is not None: if constraint == 'multinode_requested': env = bool(env) setattr(object_constraints, constraint, env.split()) break find_kwargs = {} if object_constraints.type is not None: find_kwargs['type'] = object_constraints.type.__contains__ name_constraint = None if object_constraints.match_name is not None: v = set(object_constraints.match_name) if name_constraint is None: name_constraint = v else: name_constraint &= v if group_name is not None: v = set(o.name for o in dynobj_mappings[object_name]) if name_constraint is None: name_constraint = v else: name_constraint &= v if name_constraint is not None: find_kwargs['name'] = name_constraint.__contains__ if object_constraints.platform is not None: find_kwargs['platform'] = object_constraints.platform.__contains__ if object_constraints.tgen_platform is not None: find_kwargs['tgen_platform'] = object_constraints.tgen_platform.__contains__ if object_constraints.os is not None: find_kwargs['os'] = object_constraints.os.__contains__ if object_constraints.multinode_requested is not None: find_kwargs['multinode'] = object_constraints.multinode_requested if _trace.find_cmd: logger.debug('group_name=%r, object_name=%r, find_kwargs=%r', group_name, object_name, find_kwargs) xos_devices = Genie.testbed.find_devices(**find_kwargs) if _trace.find_cmd: logger.debug('found xos_devices=%r', xos_devices) for predicate in (object_constraints.predicates or []): xos_devices = filter(predicate, xos_devices) xos_devices = OrderedSet(sorted(xos_devices)) dynobj_mappings[group_name][object_name] = xos_devices if group_name is not None and group_weight == 'mandatory': dynobj_mappings[None][object_name] = dynobj_mappings[group_name][object_name] # }}} elif type == 'interface': # {{{ m = re.search(r'^(?P<device_name>R\d+|TGEN)I(?P<link_intf_num>\d+)(?:\.(?P<intf_sub>\d+))?$', object_name) if not m: raise KeyError('Invalid interface object name %r' % (object_name,)) device_name = m.group('device_name') # link_intf_num = int(m.group('link_intf_num')) # intf_sub = m.group('intf_sub') if group_name is None: for constraint, envsfx in ( #('device', 'DEVICE'), ('match_name', 'NAME'), ('type', 'TYPE'), #('engine', 'ENGINE'), ('product_id', 'PRODUCT_ID'), #('diff_slot', 'DIFF_SLOT'), #('same_slot', 'SAME_SLOT'), ): for role in [object_name] + list(object_constraints.label): env = os.environ.get('DYNTOPO_%s_%s' % (role, envsfx), None) if env is not None: setattr(object_constraints, constraint, env.split()) break find_kwargs = {} find_kwargs['device'] = dynobj_mappings[group_name][device_name].__contains__ # if object_constraints.device_name is not None: # find_kwargs['name', object_constraints.device_name.__contains__] if object_constraints.match_name is not None: find_kwargs['name'] = object_constraints.match_name.__contains__ if object_constraints.type is not None: find_kwargs['type'] = object_constraints.type.__contains__ # if object_constraints.engine is not None: # find_kwargs['engine'] = object_constraints.engine.__contains__ if object_constraints.product_id is not None: find_kwargs['product_id'] = object_constraints.product_id.__contains__ # if object_constraints.diff_slot is not None: # find_kwargs['diff_slot'] = object_constraints.diff_slot.__contains__ # if object_constraints.same_slot is not None: # find_kwargs['same_slot'] = object_constraints.same_slot.__contains__ if group_name is not None: find_kwargs['iterable'] = dynobj_mappings[None][object_name] if _trace.find_cmd: logger.debug('group_name=%r, object_name=%r, device_name=%r, find_kwargs=%r', group_name, object_name, device_name, find_kwargs) # diff_slot & same_slot handled later xos_interfaces = Genie.testbed.find_interfaces(**find_kwargs) if _trace.find_cmd: logger.debug('found xos_interfaces=%r', xos_interfaces) for predicate in (object_constraints.predicates or []): xos_interfaces = filter(predicate, xos_interfaces) xos_interfaces = OrderedSet(sorted(xos_interfaces)) dynobj_mappings[group_name][object_name] = xos_interfaces if group_name is not None and group_weight == 'mandatory': dynobj_mappings[None][object_name] = dynobj_mappings[group_name][object_name] # }}} elif type == 'link': # {{{ if group_name is None: for constraint, envsfx in ( ('match_name', 'NAME'), ('type', 'TYPE'), ('interface', 'INTERFACE'), ): for role in [object_name] + list(object_constraints.label): env = os.environ.get('DYNTOPO_%s_%s' % (role, envsfx), None) if env is not None: setattr(object_constraints, constraint, env.split()) break # TODO from and to constraints, including exact names link_device_names = self.link_device_names(object_name) link_interface_names = self.link_interface_names(object_name) # XXXJST TODO Support Mesh objects device_name1 = link_device_names[0] device_name2 = link_device_names[1] find_kwargs = {} if object_constraints.match_name is not None: find_kwargs['name'] = object_constraints.match_name.__contains__ if object_constraints.type is not None: find_kwargs['type'] = object_constraints.type.__contains__ if group_name is not None: find_kwargs['iterable'] = dynobj_mappings[None][object_name] if _trace.find_cmd: logger.debug('group_name=%r, object_name=%r, find_kwargs=%r', group_name, object_name, find_kwargs) xos_links = [] if _trace.find_cmd: logger.debug('found xos_links=%r', xos_links) xos_links = Genie.testbed.find_links(**find_kwargs) # Could add this as find_kwargs['interfaces'] but it is # very slow compared to other constraint checks,do # last. def test_link_interface_mappings(interfaces): for iintf1, intf1 in enumerate(interfaces): if intf1 not in dynobj_mappings[group_name][link_interface_names[0]]: continue if intf1.device not in dynobj_mappings[group_name][device_name1]: continue if object_constraints.interface is not None: if intf1.name not in object_constraints.interface: continue # intf1 matches for iintf2, intf2 in enumerate(interfaces): if iintf1 == iintf2: continue if intf2 not in dynobj_mappings[group_name][link_interface_names[1]]: continue if intf2.device not in dynobj_mappings[group_name][device_name2]: continue if object_constraints.interface is not None: if intf2.name not in object_constraints.interface: continue break # intf2 matches else: continue # no intf2 matches return True # intf1 and intf2 match else: return False # no intf1 and intf2 match xos_links = filter( (lambda xos_link: test_link_interface_mappings(xos_link.interfaces)), xos_links) # generator # Filter predicates last so users can proceed based on # the assumption that all other constraints are # asserted first. for predicate in (object_constraints.predicates or []): xos_links = filter(predicate, xos_links) # generator xos_links = sorted(xos_links) new_xos_links = set() new_xos_link_parts = set() # [xos_link, xos_intf1, xos_intf2, ...] arr_new_xos_devices = {device_name: set() for device_name in link_device_names} arr_new_link_parts = {} for xos_link in xos_links: xos_link_intfs = xos_link.interfaces # WeakList if len(xos_link_intfs) < len(link_device_names): # XXXJST TODO != 2 devices! continue xos_link_intfs = sorted(xos_link_intfs) # WeakList -> sorted list link_predicates = object_constraints.predicates or [] for xos_link_intfs in itertools.permutations(xos_link_intfs, len(link_device_names)): # xos_link_intfs is a tuple... keep it as such because lists are not hashable bAccept = True for i, xos_intf in enumerate(xos_link_intfs): if xos_intf not in dynobj_mappings[group_name][link_interface_names[i]]: bAccept = False break if i >= 2: # find_links above only matches I1 & I2... # complete constraint matching for I3+ if object_constraints.interface is not None: if xos_intf.name not in object_constraints.interface: # bAccept = False continue if not bAccept: continue if not all(predicate(xos_link) for predicate in link_predicates): # bAccept = False break # NOT continue link_predicates = [] # Only needs to be tested once per link xos_link_devices = tuple(xos_intf.device for xos_intf in xos_link_intfs) for xos_device, device_name in zip(xos_link_devices, link_device_names): arr_new_xos_devices[device_name].add(xos_device) xos_link_parts = (xos_link,) + xos_link_intfs new_xos_links.add(xos_link) new_xos_link_parts.add(xos_link_parts) arr_new_link_parts.setdefault(xos_link_devices, set()) arr_new_link_parts[xos_link_devices].add(xos_link_parts) dynobj_mappings[group_name][object_name] = OrderedSet(sorted(new_xos_links)) dynobj_mappings_link_parts[group_name][object_name] = collections.defaultdict(set) dynobj_mappings_link_parts[group_name][object_name][None] = new_xos_link_parts for xos_link_devices, xos_link_parts_set in arr_new_link_parts.items(): dynobj_mappings_link_parts[group_name][object_name][xos_link_devices] = xos_link_parts_set if group_name is not None and group_weight == 'mandatory': dynobj_mappings[None][object_name] = dynobj_mappings[group_name][object_name] dynobj_mappings_link_parts[None][object_name] = dynobj_mappings_link_parts[group_name][object_name] arr_old_link_parts = copy(dynobj_mappings_link_parts[None][object_name]) dynobj_mappings_link_parts[None][object_name] = collections.defaultdict(set) dynobj_mappings_link_parts[None][object_name][None] = arr_old_link_parts.pop(None) for xos_link_devices in arr_new_link_parts.keys(): dynobj_mappings_link_parts[None][object_name][xos_link_devices] = \ arr_old_link_parts.getdefault(xos_link_devices, set()) & dynobj_mappings_link_parts[group_name][object_name][xos_link_devices] if object_name in absolutely_required_objects: for device_name, new_xos_devices in arr_new_xos_devices.items(): dynobj_mappings[group_name][device_name] &= new_xos_devices if group_name is not None and group_weight == 'mandatory': for device_name in link_device_names: dynobj_mappings[None][device_name] = dynobj_mappings[group_name][device_name] # XXXJST TODO -- optimize based on list of accepted xos_link_parts {{{ # if { $optimize && [lcontain $absolutely_required_objects $object_name] } { # set dynobj_mappings($ipfxgroup$device_name1) [intersect $dynobj_mappings($ipfxgroup$device_name1) [enaObjGetParam -list true $lvIntfs1 -router -self]] # set dynobj_mappings($ipfxgroup$device_name2) [intersect
# last update: 11/19 - cleaned up the last several tests to not try to delete the # addresses the host would use for comunication, but rather # an address they would use to communicate between each other. # past updates: # 11/17/18 - changed to use subnets, since Mac and Linux apparently really need them # 11/10/18 - fixed the expected result of GET view import os import sys import requests import time import unittest import json import docker_control dockerBuildTag = "cs128-hw3" # put the tag for your docker build here hostIp = "localhost" # this can be localhost again needSudo = False # obviously if you need sudo, set this to True # contact me imediately if setting this to True breaks things # (I don't have a machine which needs sudo, so it has not been tested, although in theory it should be fine) port_prefix = "808" networkName = "mynet" # the name of the network you created # should be everything up to the last period of the subnet you specified when you networkIpPrefix = "10.0.0." # created your network # sets number of seconds we sleep after certain actions to let data propagate through your system propogationTime = 3 # you may lower this to speed up your testing if you know that your system is fast enough to propigate information faster than this # I do not recomend increasing this dc = docker_control.docker_controller(networkName, needSudo) def getViewString(view): listOStrings = [] for instance in view: listOStrings.append(instance["networkIpPortAddress"]) return ",".join(listOStrings) def viewMatch(collectedView, expectedView): collectedView = collectedView.split(",") expectedView = expectedView.split(",") if len(collectedView) != len(expectedView): return False for ipPort in expectedView: if ipPort in collectedView: collectedView.remove(ipPort) else: return False if len(collectedView) > 0: return False else: return True # Basic Functionality # These are the endpoints we should be able to hit # KVS Functions def storeKeyValue(ipPort, key, value, payload): print('PUT: http://%s/keyValue-store/%s' % (str(ipPort), key)) return requests.put('http://%s/keyValue-store/%s' % (str(ipPort), key), data={'val': value, 'payload': json.dumps(payload)}, timeout=5) def checkKey(ipPort, key, payload): print('GET: http://%s/keyValue-store/search/%s' % (str(ipPort), key)) return requests.get('http://%s/keyValue-store/search/%s' % (str(ipPort), key), data={'payload': json.dumps(payload)}) def getKeyValue(ipPort, key, payload): print('GET: http://%s/keyValue-store/%s' % (str(ipPort), key)) return requests.get('http://%s/keyValue-store/%s' % (str(ipPort), key), data={'payload': json.dumps(payload)}) def deleteKey(ipPort, key, payload): print('DELETE: http://%s/keyValue-store/%s' % (str(ipPort), key)) return requests.delete('http://%s/keyValue-store/%s' % (str(ipPort), key), data={'payload': json.dumps(payload)}) # Replication Functions def addNode(ipPort, newAddress): print('PUT: http://%s/view' % str(ipPort)) return requests.put('http://%s/view' % str(ipPort), data={'ip_port': newAddress}) def removeNode(ipPort, oldAddress): print('DELETE: http://%s/view' % str(ipPort)) return requests.delete('http://%s/view' % str(ipPort), data={'ip_port': oldAddress}) def viewNetwork(ipPort): print('GET: http://%s/view' % str(ipPort)) return requests.get('http://%s/view' % str(ipPort)) ########################################################################################### class TestHW3(unittest.TestCase): view = {} def setUp(self): self.view = dc.spinUpManyContainers( dockerBuildTag, hostIp, networkIpPrefix, port_prefix, 2) for container in self.view: if " " in container["containerID"]: self.assertTrue( False, "There is likely a problem in the settings of your ip addresses or network.") def tearDown(self): dc.cleanUpDockerContainer() def getPayload(self, ipPort, key): response = checkKey(ipPort, key, {}) print(response) data = response.json() return data["payload"] def confirmAddKey(self, ipPort, key, value, expectedStatus, expectedMsg, expectedReplaced, payload={}): response = storeKeyValue(ipPort, key, value, payload) self.assertEqual(response.status_code, expectedStatus) data = response.json() self.assertEqual(data['msg'], expectedMsg) self.assertEqual(data['replaced'], expectedReplaced) return data["payload"] def confirmCheckKey(self, ipPort, key, expectedStatus, expectedResult, expectedIsExists, payload={}): response = checkKey(ipPort, key, payload) print(response) self.assertEqual(response.status_code, expectedStatus) data = response.json() self.assertEqual(data['result'], expectedResult) self.assertEqual(data['isExists'], expectedIsExists) return data["payload"] def confirmGetKey(self, ipPort, key, expectedStatus, expectedResult, expectedValue=None, expectedMsg=None, payload={}): response = getKeyValue(ipPort, key, payload) print(response) self.assertEqual(response.status_code, expectedStatus) data = response.json() self.assertEqual(data['result'], expectedResult) if expectedValue != None and 'value' in data: self.assertEqual(data['value'], expectedValue) if expectedMsg != None and 'msg' in data: self.assertEqual(data['msg'], expectedMsg) return data["payload"] def confirmDeleteKey(self, ipPort, key, expectedStatus, expectedResult, expectedMsg, payload={}): response = deleteKey(ipPort, key, payload) print(response) self.assertEqual(response.status_code, expectedStatus) data = response.json() self.assertEqual(data['result'], expectedResult) self.assertEqual(data['msg'], expectedMsg) return data["payload"] def confirmViewNetwork(self, ipPort, expectedStatus, expectedView): response = viewNetwork(ipPort) print(response) self.assertEqual(response.status_code, expectedStatus) data = response.json() self.assertTrue(viewMatch(data['view'], expectedView), "%s != %s" % ( data['view'], expectedView)) def confirmAddNode(self, ipPort, newAddress, expectedStatus, expectedResult, expectedMsg): response = addNode(ipPort, newAddress) print(response) self.assertEqual(response.status_code, expectedStatus) data = response.json() self.assertEqual(data['result'], expectedResult) self.assertEqual(data['msg'], expectedMsg) def confirmDeleteNode(self, ipPort, removedAddress, expectedStatus, expectedResult, expectedMsg): response = removeNode(ipPort, removedAddress) print(response) self.assertEqual(response.status_code, expectedStatus) data = response.json() self.assertEqual(data['result'], expectedResult) self.assertEqual(data['msg'], expectedMsg) ########################################################################################################## # Confirm Basic functionality: def test_a_add_key_value_one_node(self): ipPort = self.view[0]["testScriptAddress"] key = "addNewKey" payload = self.confirmAddKey(ipPort=ipPort, key=key, value="a simple value", expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False) value = "aNewValue" payload = self.confirmAddKey(ipPort=ipPort, key=key, value=value, expectedStatus=201, expectedMsg="Updated successfully", expectedReplaced=True, payload=payload) payload = self.confirmCheckKey(ipPort=ipPort, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) payload = self.confirmGetKey(ipPort=ipPort, key=key, expectedStatus=200, expectedResult="Success", expectedValue=value, payload=payload) def test_b_add_key_value_two_nodes(self): ipPortOne = self.view[0]["testScriptAddress"] ipPortTwo = self.view[1]["testScriptAddress"] key = "keyOnBothNodes" value = "aValue" payload = self.getPayload(ipPortOne, key) payload = self.confirmAddKey(ipPort=ipPortOne, key=key, value=value, expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False, payload=payload) payload = self.confirmCheckKey(ipPort=ipPortOne, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) time.sleep(propogationTime) payload = self.confirmCheckKey(ipPort=ipPortTwo, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) payload = self.confirmGetKey(ipPort=ipPortTwo, key=key, expectedStatus=200, expectedResult="Success", expectedValue=value, payload=payload) def test_c_delete_value_one_node(self): ipPort = self.view[0]["testScriptAddress"] key = "keyToBeDeletedFromOneNode" value = "aValue" payload = self.getPayload(ipPort, key) payload = self.confirmAddKey(ipPort=ipPort, key=key, value=value, expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False, payload=payload) payload = self.confirmCheckKey(ipPort=ipPort, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) payload = self.confirmDeleteKey(ipPort=ipPort, key=key, expectedStatus=200, expectedResult="Success", expectedMsg="Key deleted", payload=payload) payload = self.confirmCheckKey(ipPort=ipPort, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=False, payload=payload) def test_d_delete_value_two_nodes(self): ipPortOne = self.view[0]["testScriptAddress"] ipPortTwo = self.view[1]["testScriptAddress"] key = "keyToBeDeletedFromTwoNodes" value = "aValue" payload = self.getPayload(ipPortOne, key) # add the key payload = self.confirmAddKey(ipPort=ipPortTwo, key=key, value=value, expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False, payload=payload) payload = self.confirmCheckKey(ipPort=ipPortTwo, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) time.sleep(propogationTime) payload = self.confirmCheckKey(ipPort=ipPortOne, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) # delete the key payload = self.confirmDeleteKey(ipPort=ipPortOne, key=key, expectedStatus=200, expectedResult="Success", expectedMsg="Key deleted", payload=payload) payload = self.confirmCheckKey(ipPort=ipPortOne, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=False, payload=payload) time.sleep(propogationTime) payload = self.confirmCheckKey(ipPort=ipPortTwo, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=False, payload=payload) def test_e_check_nonexistantKey(self): self.confirmCheckKey(ipPort=self.view[0]["testScriptAddress"], key="SomethingWhichDoesNotExist", expectedStatus=200, expectedResult="Success", expectedIsExists=False) def test_f_get_nonexistantKey(self): self.confirmGetKey(ipPort=self.view[0]["testScriptAddress"], key="SomethingWhichDoesNotExist", expectedStatus=404, expectedResult="Error", expectedMsg="Key does not exist") def test_g_delete_nonexistantKey(self): self.confirmDeleteKey(ipPort=self.view[0]["testScriptAddress"], key="SomethingWhichDoesNotExist", expectedStatus=404, expectedResult="Error", expectedMsg="Key does not exist") # Everything up to this point could be done via message forwarding, as in assignment 2 # However, if that is all you are doing, the following tests should fail def test_h_get_view(self): viewString = getViewString(self.view) self.confirmViewNetwork(ipPort=self.view[0]["testScriptAddress"], expectedStatus=200, expectedView=viewString) def test_i_add_node_to_network(self): ipPort = self.view[0]["testScriptAddress"] newPort = "%s4" % port_prefix newView = "%s4:8080" % (networkIpPrefix) viewString = getViewString(self.view) viewString += ",%s" % newView newNode = dc.spinUpDockerContainer( dockerBuildTag, hostIp, networkIpPrefix+"4", newPort, viewString) self.view.append(newNode) self.confirmAddNode(ipPort=ipPort, newAddress=newView, expectedStatus=200, expectedResult="Success", expectedMsg="Successfully added %s to view" % newView) time.sleep(propogationTime) for node in self.view: self.confirmViewNetwork(ipPort=node["testScriptAddress"], expectedStatus=200, expectedView=viewString) def test_j_remove_node_from_network(self): ipPort = self.view[0]["testScriptAddress"] removedNode = self.view.pop() self.confirmDeleteNode(ipPort=ipPort, removedAddress=removedNode["networkIpPortAddress"], expectedStatus=200, expectedResult="Success", expectedMsg="Successfully removed %s from view" % removedNode["networkIpPortAddress"]) for node in self.view: self.confirmViewNetwork(ipPort=node["testScriptAddress"], expectedStatus=200, expectedView=getViewString(self.view)) def test_k_replication_add_node_get_up_to_speed(self): key = "OhLookAKey" value = "AndHeyAValue" ipPort = self.view[0]["testScriptAddress"] payload = self.getPayload(ipPort, key) payload = self.confirmAddKey(ipPort=ipPort, key=key, value=value, expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False, payload=payload) self.test_i_add_node_to_network() time.sleep(propogationTime) newIpPort = self.view[2]["testScriptAddress"] payload = self.confirmCheckKey(ipPort=newIpPort, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) payload = self.confirmGetKey(ipPort=newIpPort, key=key, expectedStatus=200, expectedResult="Success", expectedValue=value, payload=payload) def test_l_replication_add_node_keep_up_to_speed(self): key = "HeyIGotANewKey" value = "YouShouldKnowAboutItToo" ipPort = self.view[0]["testScriptAddress"] self.test_i_add_node_to_network() payload = self.getPayload(ipPort, key) payload = self.confirmAddKey(ipPort=self.view[0]["testScriptAddress"], key=key, value=value, expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False, payload=payload) time.sleep(propogationTime) newIpPort = self.view[2]["testScriptAddress"] payload = self.confirmCheckKey(ipPort=newIpPort, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) payload = self.confirmGetKey(ipPort=newIpPort, key=key, expectedStatus=200, expectedResult="Success", expectedValue=value, payload=payload) def test_m_replication_add_node_make_sure_it_tells_everyone_else_about_new_things(self): key = "HeyIGotANewKey" value = "YouShouldKnowAboutItToo" self.test_i_add_node_to_network() ipPort = self.view[1]["testScriptAddress"] newIpPort = self.view[2]["testScriptAddress"] payload = self.getPayload(ipPort, key) payload = self.confirmAddKey(ipPort=newIpPort, key=key, value=value, expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False, payload=payload) time.sleep(propogationTime) payload = self.confirmCheckKey(ipPort=ipPort, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) payload = self.confirmGetKey(ipPort=ipPort, key=key, expectedStatus=200, expectedResult="Success", expectedValue=value, payload=payload) def test_n_replication_remove_node(self): key = "HeyWhereDidYouGo" value = "IllHoldYourStuffWhileYoureGone" stationaryNode = self.view[0]["testScriptAddress"] removedNode = self.view.pop() payload = self.getPayload(removedNode["testScriptAddress"], key) payload = self.confirmAddKey(ipPort=removedNode["testScriptAddress"], key=key, value=value, expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False, payload=payload) self.confirmDeleteNode(ipPort=stationaryNode, removedAddress=removedNode["networkIpPortAddress"], expectedStatus=200, expectedResult="Success", expectedMsg="Successfully removed %s from view" % removedNode["networkIpPortAddress"]) time.sleep(propogationTime) payload = self.confirmCheckKey(ipPort=stationaryNode, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) payload = self.confirmGetKey(ipPort=stationaryNode, key=key, expectedStatus=200, expectedResult="Success", expectedValue=value, payload=payload) def test_o_replication_remove_node_dont_talk_to_the_dead(self): key = "TheDeadCannotHear" value = "SoWeCanSayTheySmellAndTheydNeverKnow" stationaryNode = self.view[0]["testScriptAddress"] removedNode = self.view.pop() self.confirmDeleteNode(ipPort=stationaryNode, removedAddress=removedNode["networkIpPortAddress"], expectedStatus=200, expectedResult="Success", expectedMsg="Successfully removed %s from view" % removedNode["networkIpPortAddress"]) payload = self.getPayload(stationaryNode, key) payload = self.confirmAddKey(ipPort=stationaryNode, key=key, value=value, expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False, payload=payload) time.sleep(propogationTime) payload = self.confirmCheckKey(ipPort=removedNode["testScriptAddress"], key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=False, payload=payload) def test_p_replication_sudden_failure(self): key = "ThisLand" value = "CurseYourSuddenButInevitableBetrayal" failedNode = self.view[0] liveNode = self.view[1]["testScriptAddress"] payload = self.getPayload(liveNode, key) payload = self.confirmAddKey(ipPort=failedNode["testScriptAddress"], key=key, value=value, expectedStatus=200, expectedMsg="Added successfully", expectedReplaced=False, payload=payload) time.sleep(propogationTime) dc.cleanUpDockerContainer(failedNode["containerID"]) payload = self.confirmCheckKey(ipPort=liveNode, key=key, expectedStatus=200, expectedResult="Success", expectedIsExists=True, payload=payload) payload = self.confirmGetKey(ipPort=liveNode, key=key, expectedStatus=200, expectedResult="Success", expectedValue=value, payload=payload) if __name__ == '__main__':
# coding: utf-8 from __future__ import print_function, unicode_literals import re import pytest import sqlitefts as fts from sqlitefts import fts5, fts5_aux apsw = pytest.importorskip("apsw") class SimpleTokenizer(fts.Tokenizer): _p = re.compile(r"\w+", re.UNICODE) def tokenize(self, text): for m in self._p.finditer(text): s, e = m.span() t = text[s:e] l = len(t.encode("utf-8")) p = len(text[:s].encode("utf-8")) yield t, p, p + l class SimpleFTS5Tokenizer(fts5.FTS5Tokenizer): _p = re.compile(r"\w+", re.UNICODE) def tokenize(self, text, flags): for m in self._p.finditer(text): s, e = m.span() t = text[s:e] l = len(t.encode("utf-8")) p = len(text[:s].encode("utf-8")) yield t, p, p + l def test_createtable(): c = apsw.Connection(":memory:") name = "simple" sql = "CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name) fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer())) c.cursor().execute(sql) r = ( c.cursor() .execute( "SELECT type, name, tbl_name, sql FROM sqlite_master WHERE type='table' AND name='fts'" ) .fetchone() ) assert r == ("table", "fts", "fts", sql) c.close() def test_insert(): c = apsw.Connection(":memory:") name = "simple" content = "これは日本語で書かれています" fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer())) c.cursor().execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name)) r = c.cursor().execute("INSERT INTO fts VALUES(?)", (content,)) assert c.changes() == 1 r = c.cursor().execute("SELECT content FROM fts").fetchone() assert r[0] == content c.close() def test_match(): c = apsw.Connection(":memory:") name = "simple" contents = [("abc def",), ("abc xyz",), ("あいうえお かきくけこ",), ("あいうえお らりるれろ",)] fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer())) c.cursor().execute("CREATE VIRTUAL TABLE fts USING FTS4(tokenize={})".format(name)) r = c.cursor().executemany("INSERT INTO fts VALUES(?)", contents) r = c.cursor().execute("SELECT * FROM fts").fetchall() assert len(r) == 4 r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'abc'").fetchall() assert len(r) == 2 r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'def'").fetchall() assert len(r) == 1 and r[0][0] == contents[0][0] r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'xyz'").fetchall() assert len(r) == 1 and r[0][0] == contents[1][0] r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'zzz'").fetchall() assert len(r) == 0 r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'あいうえお'").fetchall() assert len(r) == 2 r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'かきくけこ'").fetchall() assert len(r) == 1 and r[0][0] == contents[2][0] r = c.cursor().execute("SELECT content FROM fts WHERE fts MATCH 'らりるれろ'").fetchall() assert len(r) == 1 and r[0][0] == contents[3][0] r = c.cursor().execute("SELECT * FROM fts WHERE fts MATCH 'まみむめも'").fetchall() assert len(r) == 0 c.close() def test_full_text_index_queries(): name = "simple" docs = [ ( "README", "sqlitefts-python provides binding for tokenizer of SQLite Full-Text search(FTS3/4). It allows you to write tokenizers in Python.", ), ( "LICENSE", """Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:""", ), ("日本語", "あいうえお かきくけこ さしすせそ たちつてと なにぬねの"), ] with apsw.Connection(":memory:") as c: fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer())) c.cursor().execute( "CREATE VIRTUAL TABLE docs USING FTS4(title, body, tokenize={})".format( name ) ) c.cursor().executemany("INSERT INTO docs(title, body) VALUES(?, ?)", docs) r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'Python'") .fetchall() ) assert len(r) == 1 r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'bind'").fetchall() assert len(r) == 0 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'binding'") .fetchall() ) assert len(r) == 1 r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'to'").fetchall() assert len(r) == 2 r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお'").fetchall() assert len(r) == 1 r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'らりるれろ'").fetchall() assert len(r) == 0 assert ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'binding'") .fetchall()[0] == c.cursor() .execute("SELECT * FROM docs WHERE body MATCH 'binding'") .fetchall()[0] ) assert ( c.cursor() .execute("SELECT * FROM docs WHERE body MATCH 'binding'") .fetchall()[0] == c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'body:binding'") .fetchall()[0] ) assert ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお'") .fetchall()[0] == c.cursor() .execute("SELECT * FROM docs WHERE body MATCH 'あいうえお'") .fetchall()[0] ) assert ( c.cursor() .execute("SELECT * FROM docs WHERE body MATCH 'かきくけこ'") .fetchall()[0] == c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'body:かきくけこ'") .fetchall()[0] ) r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'title:bind'") .fetchall() ) assert len(r) == 0 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'title:README'") .fetchall() ) assert len(r) == 1 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'title:日本語'") .fetchall() ) assert len(r) == 1 r = c.cursor().execute("SELECT * FROM docs WHERE title MATCH 'bind'").fetchall() assert len(r) == 0 r = ( c.cursor() .execute("SELECT * FROM docs WHERE title MATCH 'README'") .fetchall() ) assert len(r) == 1 r = c.cursor().execute("SELECT * FROM docs WHERE title MATCH '日本語'").fetchall() assert len(r) == 1 r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'to in'").fetchall() assert len(r) == 2 r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'Py*'").fetchall() assert len(r) == 1 r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'Z*'").fetchall() assert len(r) == 0 r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'あ*'").fetchall() assert len(r) == 1 r = c.cursor().execute("SELECT * FROM docs WHERE docs MATCH 'ん*'").fetchall() assert len(r) == 0 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'tokenizer SQLite'") .fetchall() ) assert len(r) == 1 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH '\"tokenizer SQLite\"'") .fetchall() ) assert len(r) == 0 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお たちつてと'") .fetchall() ) assert len(r) == 1 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH '\"あいうえお たちつてと\"'") .fetchall() ) assert len(r) == 0 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH '\"tok* SQL*\"'") .fetchall() ) assert len(r) == 0 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH '\"tok* of SQL*\"'") .fetchall() ) assert len(r) == 1 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH '\"あ* さ*\"'") .fetchall() ) assert len(r) == 0 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH '\"あ* かきくけこ さ*\"'") .fetchall() ) assert len(r) == 1 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'tokenizer NEAR SQLite'") .fetchall() ) assert len(r) == 1 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'binding NEAR/2 SQLite'") .fetchall() ) assert len(r) == 0 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'binding NEAR/3 SQLite'") .fetchall() ) assert len(r) == 1 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお NEAR たちつてと'") .fetchall() ) assert len(r) == 1 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお NEAR/2 たちつてと'") .fetchall() ) assert len(r) == 1 r = ( c.cursor() .execute("SELECT * FROM docs WHERE docs MATCH 'あいうえお NEAR/3 たちつてと'") .fetchall() ) assert len(r) == 1 def test_tokenizer_output(): name = "simple" with apsw.Connection(":memory:") as c: fts.register_tokenizer(c, name, fts.make_tokenizer_module(SimpleTokenizer())) c.cursor().execute( "CREATE VIRTUAL TABLE tok1 USING fts3tokenize({})".format(name) ) expect = [ ("This", 0, 4, 0), ("is", 5, 7, 1), ("a", 8, 9, 2), ("test", 10, 14, 3), ("sentence", 15, 23, 4), ] for a, e in zip( c.cursor().execute( "SELECT token, start, end, position " "FROM tok1 WHERE input='This is a test sentence.'" ), expect, ): assert e == a s = "これ は テスト の 文 です" expect = [(None, 0, -1, 0)] for i, t in enumerate(s.split()): expect.append( (t, expect[-1][2] + 1, expect[-1][2] + 1 + len(t.encode("utf-8")), i) ) expect = expect[1:] for a, e in zip( c.cursor().execute( "SELECT token, start, end, position " "FROM tok1 WHERE input=?", [s] ), expect, ): assert e == a @pytest.mark.xfail( apsw.using_amalgamation, reason="FTS5 with APSW+Amalgamation not supported" ) def test_fts5_api_from_db(): with apsw.Connection(":memory:") as c: fts5api = fts5.fts5_api_from_db(c) assert fts5api.iVersion == 2 assert fts5api.xCreateTokenizer @pytest.mark.xfail( apsw.using_amalgamation, reason="FTS5 with APSW+Amalgamation not supported", raises=fts.Error, ) def test_aux_and_tokenize(): c = apsw.Connection(":memory:") try: fts5_aux.register_aux_function(c, "tokenize", fts5_aux.aux_tokenize) cur = c.cursor() cur.execute("CREATE VIRTUAL TABLE fts USING FTS5(content)") cur.executemany("INSERT INTO fts VALUES(?)", (["hello world"], ["こんにちは 世界"])) cur.execute("SELECT COUNT(*) FROM
<reponame>dmsteck/Fusion360GalleryDataset """ Test export functionality of the Fusion 360 Server """ import unittest import requests from pathlib import Path import sys import os import numpy from stl import mesh import importlib import json import shutil import common_test # Add the client folder to sys.path CLIENT_DIR = os.path.join(os.path.dirname(__file__), "..", "client") if CLIENT_DIR not in sys.path: sys.path.append(CLIENT_DIR) from fusion360gym_client import Fusion360GymClient HOST_NAME = "127.0.0.1" PORT_NUMBER = 8080 class TestFusion360ServerExport(unittest.TestCase): @classmethod def setUpClass(cls): cls.client = Fusion360GymClient(f"http://{HOST_NAME}:{PORT_NUMBER}") # Clear all documents so we start with a clean slate cls.client.clear() # ------------------------------------------ # TEST FILES cls.data_dir = Path(__file__).parent.parent.parent / "testdata" cls.output_dir = cls.data_dir / "output" box_design = "SingleSketchExtrude" hex_design = "Hexagon" couch_design = "Couch" # Box json reconstruction file cls.box_design_json_file = cls.data_dir / f"{box_design}.json" # Hex shape json reconstruction file cls.hex_design_json_file = cls.data_dir / f"{hex_design}.json" # Couch design cls.couch_design_json_file = cls.data_dir / f"{couch_design}.json" # # OUTPUT FILES # Mesh stl file cls.test_mesh_stl_file = cls.output_dir / f"{box_design}.stl" # Mesh obj file cls.test_mesh_obj_file = cls.output_dir / f"{box_design}.obj" # BRep step file cls.test_brep_step_file = cls.output_dir / f"{box_design}.step" # BRep smt file cls.test_brep_smt_file = cls.output_dir / f"{box_design}.smt" # BRep f3d file cls.test_brep_f3d_file = cls.output_dir / f"{box_design}.f3d" # Screenshot png file cls.test_screenshot_png_file = cls.output_dir / f"{box_design}.png" # Test output temp folder cls.test_output_dir = cls.output_dir / "test_output" # Make sure it is empty first if cls.output_dir.exists(): shutil.rmtree(cls.output_dir) if not cls.output_dir.exists(): cls.output_dir.mkdir() if cls.test_output_dir.exists(): shutil.rmtree(cls.test_output_dir) # Clean up after ourselves cls.clean_output = True # ------------------------------------------ def test_mesh_invalid_format(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the mesh test_invalid_mesh_file = self.data_dir / "file.obj" r = self.client.mesh(test_invalid_mesh_file) self.assertIsNone(r, msg="mesh response is None") r = self.client.clear() def test_mesh_stl(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the mesh r = self.client.mesh(self.test_mesh_stl_file) self.assertIsNotNone(r, msg="mesh response is not None") self.assertEqual(r.status_code, 200, msg="mesh status code") self.assertTrue(self.test_mesh_stl_file.exists()) self.__test_box_mesh(self.test_mesh_stl_file) # Clear r = self.client.clear() if self.clean_output: self.test_mesh_stl_file.unlink() def test_mesh_obj(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the mesh r = self.client.mesh(self.test_mesh_obj_file) self.assertIsNotNone(r, msg="mesh response is not None") self.assertEqual(r.status_code, 200, msg="mesh status code") self.assertTrue(self.test_mesh_obj_file.exists()) # Clear r = self.client.clear() if self.clean_output: self.test_mesh_obj_file.unlink() def test_mesh_invalid_format(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the mesh test_invalid_file = self.data_dir / "file.off" r = self.client.mesh(test_invalid_file) self.assertIsNone(r, msg="mesh response is None") # Clear r = self.client.clear() def test_brep_step(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the brep r = self.client.brep(self.test_brep_step_file) self.assertIsNotNone(r, msg="brep response is not None") self.assertEqual(r.status_code, 200, msg="brep status code") self.assertTrue(self.test_brep_step_file.exists()) self.assertGreater(self.test_brep_step_file.stat().st_size, 0, msg="brep file size greater than 0") # Clear r = self.client.clear() if self.clean_output: self.test_brep_step_file.unlink() def test_brep_smt(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the brep r = self.client.brep(self.test_brep_smt_file) self.assertIsNotNone(r, msg="brep response is not None") self.assertEqual(r.status_code, 200, msg="brep status code") self.assertTrue(self.test_brep_smt_file.exists()) self.assertGreater(self.test_brep_smt_file.stat().st_size, 0, msg="brep file size greater than 0") # Clear r = self.client.clear() # self.test_brep_smt_file.unlink() def test_brep_f3d(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the brep r = self.client.brep(self.test_brep_f3d_file) self.assertIsNotNone(r, msg="brep response is not None") self.assertEqual(r.status_code, 200, msg="brep status code") self.assertTrue(self.test_brep_f3d_file.exists()) self.assertGreater(self.test_brep_f3d_file.stat().st_size, 0, msg="brep file size greater than 0") # Clear r = self.client.clear() if self.clean_output: self.test_brep_f3d_file.unlink() def test_brep_invalid_format(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the mesh test_invalid_file = self.data_dir / "file.sat" r = self.client.brep(test_invalid_file) self.assertIsNone(r, msg="brep response is None") # Clear r = self.client.clear() def test_sketches_png(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Make the folder if not self.test_output_dir.exists(): self.test_output_dir.mkdir() # Save out the sketches r = self.client.sketches(self.test_output_dir) self.assertIsNotNone(r, msg="sketches response is not None") self.assertEqual(r.status_code, 200, msg="sketch status code") for i in range(1): sketch_file = self.test_output_dir / f"Sketch{i+1}.png" self.assertTrue(sketch_file.exists()) self.assertGreater(sketch_file.stat().st_size, 0, msg="sketch image file size greater than 0") # Clear r = self.client.clear() if self.clean_output: shutil.rmtree(self.test_output_dir) def test_sketches_png_multiple(self): # Reconstruct first r = self.client.reconstruct(self.hex_design_json_file) # Make the folder if not self.test_output_dir.exists(): self.test_output_dir.mkdir() # Save out the sketches r = self.client.sketches(self.test_output_dir) self.assertIsNotNone(r, msg="sketches response is not None") self.assertEqual(r.status_code, 200, msg="sketch status code") for i in range(3): sketch_file = self.test_output_dir / f"Sketch{i+1}.png" self.assertTrue(sketch_file.exists()) self.assertGreater(sketch_file.stat().st_size, 0, msg="sketch image file size greater than 0") # Clear r = self.client.clear() if self.clean_output: shutil.rmtree(self.test_output_dir) def test_sketches_dxf(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Make the folder if not self.test_output_dir.exists(): self.test_output_dir.mkdir() # Save out the sketches r = self.client.sketches(self.test_output_dir, ".dxf") self.assertIsNotNone(r, msg="sketches response is not None") self.assertEqual(r.status_code, 200, msg="sketch status code") for i in range(1): sketch_file = self.test_output_dir / f"Sketch{i+1}.dxf" self.assertTrue(sketch_file.exists()) self.assertGreater(sketch_file.stat().st_size, 0, msg="sketch dxf file size greater than 0") # Clear r = self.client.clear() if self.clean_output: shutil.rmtree(self.test_output_dir) def test_sketches_dxf_multiple(self): # Reconstruct first r = self.client.reconstruct(self.hex_design_json_file) # Make the folder if not self.test_output_dir.exists(): self.test_output_dir.mkdir() # Save out the sketches r = self.client.sketches(self.test_output_dir, ".dxf") self.assertIsNotNone(r, msg="sketches response is not None") self.assertEqual(r.status_code, 200, msg="sketch status code") for i in range(3): sketch_file = self.test_output_dir / f"Sketch{i+1}.dxf" self.assertTrue(sketch_file.exists()) self.assertGreater(sketch_file.stat().st_size, 0, msg="sketch dxf file size greater than 0") # Clear r = self.client.clear() if self.clean_output: shutil.rmtree(self.test_output_dir) def test_sketches_invalid_format(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the mesh test_invalid_dir = self.data_dir / "yo" r = self.client.sketches(test_invalid_dir) self.assertIsNone(r, msg="sketch response is None") # Clear r = self.client.clear() def test_screenshot(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the brep r = self.client.screenshot(self.test_screenshot_png_file) self.assertIsNotNone(r, msg="screenshot response is not None") self.assertEqual(r.status_code, 200, msg="screenshot status code") self.assertTrue(self.test_screenshot_png_file.exists(), msg="screenshot exists") self.assertGreater(self.test_screenshot_png_file.stat().st_size, 0, msg="screenshot file size greater than 0") # Clear r = self.client.clear() if self.clean_output: self.test_screenshot_png_file.unlink() def test_screenshot_with_args(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) # Save out the brep r = self.client.screenshot(self.test_screenshot_png_file, 100, 100, False) self.assertIsNotNone(r, msg="screenshot response is not None") self.assertEqual(r.status_code, 200, msg="screenshot status code") self.assertTrue(self.test_screenshot_png_file.exists(), msg="screenshot exists") self.assertGreater(self.test_screenshot_png_file.stat().st_size, 0, msg="screenshot file size greater than 0") # Clear r = self.client.clear() if self.clean_output: self.test_screenshot_png_file.unlink() def test_screenshot_invalid_format(self): # Reconstruct first r = self.client.reconstruct(self.box_design_json_file) test_invalid_file = self.data_dir / "file.gif" r = self.client.screenshot(test_invalid_file) self.assertIsNone(r, msg="screenshot response is None") # Clear r = self.client.clear() def test_graph_per_face(self): # Reconstruct first r = self.client.reconstruct(self.couch_design_json_file) # Get the graph r = self.client.graph( format="PerFace", sequence=False ) self.assertIsNotNone(r, msg="graph response is not None") self.assertEqual(r.status_code, 200, msg="graph status code") response_json = r.json() common_test.check_graph_format(self, response_json["data"], mode="PerFace") common_test.check_bounding_box(self, response_json["data"]) r = self.client.clear() def test_graph_per_face_labels(self): # Reconstruct first r = self.client.reconstruct(self.couch_design_json_file) r = self.client.graph( format="PerFace", sequence=False, labels=True ) self.assertIsNotNone(r, msg="graph response is not None") self.assertEqual(r.status_code, 200, msg="graph status code") response_json = r.json() common_test.check_graph_format( self, response_json["data"],mode="PerFace", labels=True) common_test.check_bounding_box(self, response_json["data"]) r = self.client.clear() def test_graph_per_extrude(self): # Reconstruct first r = self.client.reconstruct(self.couch_design_json_file) # Get the graph r = self.client.graph( format="PerExtrude", sequence=False ) self.assertIsNotNone(r, msg="graph response is not None") self.assertEqual(r.status_code, 200, msg="graph status code") response_json = r.json() common_test.check_graph_format(self, response_json["data"], mode="PerExtrude") common_test.check_bounding_box(self, response_json["data"]) r = self.client.clear() def test_graph_per_extrude_labels(self): # Reconstruct first r = self.client.reconstruct(self.couch_design_json_file) r = self.client.graph( format="PerExtrude", sequence=False, labels=True ) self.assertIsNotNone(r, msg="graph response is not None") self.assertEqual(r.status_code, 200, msg="graph status code") response_json = r.json() common_test.check_graph_format( self, response_json["data"], mode="PerExtrude", labels=True) common_test.check_bounding_box(self, response_json["data"]) r = self.client.clear() def test_graph_sequence_per_face(self): # Reconstruct first r = self.client.reconstruct(self.couch_design_json_file) # Make the folder if not self.test_output_dir.exists(): self.test_output_dir.mkdir() # Save out the graphs r = self.client.graph( self.couch_design_json_file, self.test_output_dir, format="PerFace", sequence=True ) self.assertIsNotNone(r, msg="graph response is not None") self.assertEqual(r.status_code, 200, msg="graph status code") graph_file = self.test_output_dir / f"{self.couch_design_json_file.stem}_0000.json" self.assertTrue(graph_file.exists(), msg="graph file exists") self.assertGreater(graph_file.stat().st_size, 0, msg="graph file size greater than 0") common_test.check_graph_format(self, graph_file, mode="PerFace") graph_file = self.test_output_dir / f"{self.couch_design_json_file.stem}_0001.json" self.assertTrue(graph_file.exists(), msg="graph file exists") self.assertGreater(graph_file.stat().st_size, 0, msg="graph file size greater than 0") common_test.check_graph_format(self, graph_file, mode="PerFace") seq_file = self.test_output_dir / f"{self.couch_design_json_file.stem}_sequence.json" self.assertTrue(seq_file.exists(), msg="sequence file exists") self.assertGreater(seq_file.stat().st_size, 0, msg="sequence file size greater than 0") # Clear r = self.client.clear() if self.clean_output: shutil.rmtree(self.test_output_dir) def test_graph_sequence_per_face_labels(self): # Reconstruct first r = self.client.reconstruct(self.couch_design_json_file) # Make the folder if not self.test_output_dir.exists(): self.test_output_dir.mkdir() # Save out the graphs r = self.client.graph( self.couch_design_json_file, self.test_output_dir, format="PerFace", sequence=True, labels=True ) self.assertIsNotNone(r, msg="graph response is not None") self.assertEqual(r.status_code, 200, msg="graph status code") graph_file = self.test_output_dir / f"{self.couch_design_json_file.stem}_0000.json" self.assertTrue(graph_file.exists(), msg="graph file exists") self.assertGreater(graph_file.stat().st_size, 0, msg="graph file size greater than 0") common_test.check_graph_format(self, graph_file, mode="PerFace", labels=True) graph_file = self.test_output_dir / f"{self.couch_design_json_file.stem}_0001.json" self.assertTrue(graph_file.exists(), msg="graph file exists") self.assertGreater(graph_file.stat().st_size, 0, msg="graph file size greater than 0") common_test.check_graph_format(self, graph_file, mode="PerFace", labels=True) seq_file = self.test_output_dir / f"{self.couch_design_json_file.stem}_sequence.json" self.assertTrue(seq_file.exists(), msg="sequence file exists") self.assertGreater(seq_file.stat().st_size, 0, msg="sequence file size greater than 0") # Clear r = self.client.clear() if self.clean_output: shutil.rmtree(self.test_output_dir) def
""" gtp_connection.py Module for playing games of Go using GoTextProtocol Parts of this code were originally based on the gtp module in the Deep-Go project by <NAME> and <NAME> at the University of Edinburgh. """ import signal, os import traceback from sys import stdin, stdout, stderr from board_util import GoBoardUtil, BLACK, WHITE, EMPTY, BORDER, PASS, \ MAXSIZE, coord_to_point import numpy as np import re import time import random class GtpConnection(): def __init__(self, go_engine, board, debug_mode = False): """ Manage a GTP connection for a Go-playing engine Parameters ---------- go_engine: a program that can reply to a set of GTP commandsbelow board: Represents the current board state. """ self.totalTime = 0 self.count = 0 self.nodeExp = 0 self.timeLimit = 1 self.to_play = BLACK #H table is a dictionary that stores (state,value) pairs #value = Black win -> 1, White win -1 self.H_table = {} self._winner = '' self._optimal_move = '' self._debug_mode = debug_mode self.go_engine = go_engine self.board = board self.commands = { "protocol_version": self.protocol_version_cmd, "quit": self.quit_cmd, "name": self.name_cmd, "boardsize": self.boardsize_cmd, "showboard": self.showboard_cmd, "clear_board": self.clear_board_cmd, "komi": self.komi_cmd, "version": self.version_cmd, "known_command": self.known_command_cmd, "genmove": self.genmove_cmd, "list_commands": self.list_commands_cmd, "play": self.play_cmd, "legal_moves": self.legal_moves_cmd, "gogui-rules_game_id": self.gogui_rules_game_id_cmd, "gogui-rules_board_size": self.gogui_rules_board_size_cmd, "gogui-rules_legal_moves": self.gogui_rules_legal_moves_cmd, "gogui-rules_side_to_move": self.gogui_rules_side_to_move_cmd, "gogui-rules_board": self.gogui_rules_board_cmd, "gogui-rules_final_result": self.gogui_rules_final_result_cmd, "gogui-analyze_commands": self.gogui_analyze_cmd, "timelimit": self.timelimit_cmd, "solve":self.solve_cmd } # used for argument checking # values: (required number of arguments, # error message on argnum failure) self.argmap = { "boardsize": (1, 'Usage: boardsize INT'), "komi": (1, 'Usage: komi FLOAT'), "known_command": (1, 'Usage: known_command CMD_NAME'), "genmove": (1, 'Usage: genmove {w,b}'), "play": (2, 'Usage: play {b,w} MOVE'), "legal_moves": (1, 'Usage: legal_moves {w,b}'), "timelimit": (1, 'Usage: timelimit INT, 1 <= INT <= 100'), } def write(self, data): stdout.write(data) def flush(self): stdout.flush() def start_connection(self): """ Start a GTP connection. This function continuously monitors standard input for commands. """ line = stdin.readline() while line: self.get_cmd(line) line = stdin.readline() def get_cmd(self, command): """ Parse command string and execute it """ if len(command.strip(' \r\t')) == 0: return if command[0] == '#': return # Strip leading numbers from regression tests if command[0].isdigit(): command = re.sub("^\d+", "", command).lstrip() elements = command.split() if not elements: return command_name = elements[0]; args = elements[1:] if self.has_arg_error(command_name, len(args)): return if command_name in self.commands: try: self.commands[command_name](args) except Exception as e: self.debug_msg("Error executing command {}\n".format(str(e))) self.debug_msg("Stack Trace:\n{}\n". format(traceback.format_exc())) raise e else: self.debug_msg("Unknown command: {}\n".format(command_name)) self.error('Unknown command') stdout.flush() def has_arg_error(self, cmd, argnum): """ Verify the number of arguments of cmd. argnum is the number of parsed arguments """ if cmd in self.argmap and self.argmap[cmd][0] != argnum: self.error(self.argmap[cmd][1]) return True return False def debug_msg(self, msg): """ Write msg to the debug stream """ if self._debug_mode: stderr.write(msg) stderr.flush() def error(self, error_msg): """ Send error msg to stdout """ stdout.write('? {}\n\n'.format(error_msg)) stdout.flush() def respond(self, response=''): """ Send response to stdout """ stdout.write('= {}\n\n'.format(response)) stdout.flush() def reset(self, size): """ Reset the board to empty board of given size """ self.board.reset(size) def board2d(self): return str(GoBoardUtil.get_twoD_board(self.board)) def protocol_version_cmd(self, args): """ Return the GTP protocol version being used (always 2) """ self.respond('2') def quit_cmd(self, args): """ Quit game and exit the GTP interface """ self.respond() exit() def name_cmd(self, args): """ Return the name of the Go engine """ self.respond(self.go_engine.name) def version_cmd(self, args): """ Return the version of the Go engine """ self.respond(self.go_engine.version) def clear_board_cmd(self, args): """ clear the board """ self.reset(self.board.size) self.respond() def boardsize_cmd(self, args): """ Reset the game with new boardsize args[0] """ self.reset(int(args[0])) self.respond() #newly added def timelimit_cmd(self, args): """ Reset the game with new timelimit args[0] """ self.timeLimit = int(args[0]) self.respond() def showboard_cmd(self, args): self.respond('\n' + self.board2d()) def komi_cmd(self, args): """ Set the engine's komi to args[0] """ self.go_engine.komi = float(args[0]) self.respond() def known_command_cmd(self, args): """ Check if command args[0] is known to the GTP interface """ if args[0] in self.commands: self.respond("true") else: self.respond("false") def list_commands_cmd(self, args): """ list all supported GTP commands """ self.respond(' '.join(list(self.commands.keys()))) def legal_moves_cmd(self, args): """ List legal moves for color args[0] in {'b','w'} """ board_color = args[0].lower() color = color_to_int(board_color) moves = GoBoardUtil.generate_legal_moves(self.board, color) gtp_moves = [] for move in moves: coords = point_to_coord(move, self.board.size) gtp_moves.append(format_point(coords)) sorted_moves = ' '.join(sorted(gtp_moves)) self.respond(sorted_moves) def play_cmd(self, args): """ play a move args[1] for given color args[0] in {'b','w'} """ try: board_color = args[0].lower() board_move = args[1] if board_color != "b" and board_color !="w": self.respond("illegal move: \"{}\" wrong color".format(board_color)) return color = color_to_int(board_color) #change turn to the other player self.to_play = GoBoardUtil.opponent(color) if args[1].lower() == 'pass': self.respond("illegal move: \"{} {}\" wrong coordinate".format(args[0], args[1])) return coord = move_to_coord(args[1], self.board.size) if coord: move = coord_to_point(coord[0],coord[1], self.board.size) else: self.error("Error executing move {} converted from {}" .format(move, args[1])) return if not self.board.play_move(move, color): self.respond("illegal move: \"{} {}\" ".format(args[0], board_move)) return else: self.debug_msg("Move: {}\nBoard:\n{}\n". format(board_move, self.board2d())) self.respond() except Exception as e: self.respond('illegal move: \"{} {}\" {}'.format(args[0], args[1], str(e))) def solve_helper(self): winner = 'unknown' #the copy of board can be viewed as a state cp_board = self.board.copy() start = time.time() signal.signal(signal.SIGALRM, handler) signal.alarm(self.timeLimit) try: value,move = self.advanced_search(cp_board,81,-1,1) except Exception as e: value,move = 0,None #print("nodeExp",self.nodeExp) #print("count",self.count) signal.alarm(0) end = time.time() print("time: ",end - start) #print("partial time: ",self.totalTime) if value == 1: winner = 'b' elif value == -1: winner = 'w' if (winner == 'b' and self.to_play !=BLACK) or (winner == 'w' and self.to_play !=WHITE): move = None return winner,move #newly added def solve_cmd(self,args): moveStr = '' winner,move = self.solve_helper() if move: moveStr = ' '+ coord_to_move(move,self.board.size) self.respond(winner+moveStr) #alpha beta pruning, referencing from wikipedia: https://en.wikipedia.org/wiki/Alpha%E2%80%93beta_pruning #color is the player. black is max player, white is min player def ab_search(self, color, copy_of_board, depth, alpha, beta): _alpha = alpha _beta = beta bestMove = None #base case, no more legal move #print(GoBoardUtil.generate_legal_moves(copy_of_board, color)) if depth == 0 or (GoBoardUtil.generate_legal_moves(copy_of_board, color) == []): #depth should always be >0 #since NOGO cannot capture nor suiside, if last move is by WHITE/BLACK, it must be a BLACK/WHITE win. if color == WHITE: return 1,None #color == BLACK else: return -1,None #color is black; max player if color == BLACK: value = -1000000 #make a copy of current state allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, color) #print("allmoves:") #print(allmoves) for move in allmoves: child = copy_of_board.copy() child.play_move(move, color) childValue,_ = self.ab_search(WHITE,child,depth-1,_alpha,_beta) value = max(value,childValue) _alpha = max(_alpha,value) bestMove = move #beta cut-off if _alpha >= _beta: break return value,bestMove #color is white; min player else: value = 1000000 allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, color) #print("allmoves:") #print(allmoves) for move in allmoves: child = copy_of_board.copy() child.play_move(move, color) childValue,_ = self.ab_search(BLACK,child,depth-1,_alpha,_beta) value = min(value,childValue) _beta = min(_beta,value) bestMove = move #alpha cut-off if _alpha >= _beta: break return value,bestMove def advanced_search(self,copy_of_board,depth,alpha,beta): _alpha = alpha _beta = beta bestMove = None self.nodeExp += 1 #base case, depth 0 if depth == 0: return 0,None #Start = time.time() allmoves = GoBoardUtil.generate_legal_moves(copy_of_board, copy_of_board.current_player) #End =time.time() #self.totalTime += End-Start #base case, no more legal move if allmoves == []: #since NOGO cannot capture nor suiside, if last move is by WHITE/BLACK, it must be a BLACK/WHITE win. if copy_of_board.current_player == WHITE: self.H_table[self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))] = 1 return 1,None #color == BLACK else: self.H_table[self.tuple_to_str(self.matrix_to_tuple(GoBoardUtil.get_twoD_board(copy_of_board),copy_of_board.size))] = -1 return -1,None searchedMoves = [] unsearchedMoves = [] unsearched = {} searchedValue = {} isoSet = set() singleMoveIsoSet = set() for move in allmoves: singleMoveIsoSet.clear() child = copy_of_board.copy() child.play_move(move, copy_of_board.current_player) #get all isomorphics of the board, in order to prunning as many as redundent states possible isomorphics = self.get_all_isomorphic(GoBoardUtil.get_twoD_board(child),child.size) found = False for iso in isomorphics: if self.tuple_to_str(iso) in self.H_table: found = True searchedMoves.append(move) searchedValue[move] = self.H_table[self.tuple_to_str(iso)] break if iso in isoSet: found = True break else: isoSet.add(iso) singleMoveIsoSet.add(iso) if not found: ''' the following is the heuristic I created for ordering the moves: (1) eye-filling is the last thing we want to do; (2) the few the number of player's stones
port, unix_socket = sys_settings.database.socket, use_utf = use_utf, ) def _create_protein_deletion_stored_procedure(self): '''This stored procedure returns 1 on error, -1 when there was no associated Protein record, and 0 on success.''' self.execute('DROP PROCEDURE IF EXISTS _DELETE_PROTEIN') if '_DELETE_PROTEIN' not in self.list_stored_procedures(): self.execute(""" CREATE PROCEDURE _DELETE_PROTEIN(IN protein_ID VARCHAR(18), OUT error_code TINYINT) BEGIN DECLARE number_of_deleted_rows INT; DECLARE EXIT HANDLER FOR SQLEXCEPTION ROLLBACK; DECLARE EXIT HANDLER FOR SQLWARNING ROLLBACK; START TRANSACTION; SET error_code = 1; DELETE FROM ProteinResidue WHERE ProteinID=protein_ID; DELETE FROM ProteinName WHERE ProteinID=protein_ID; DELETE FROM ProteinOrganism WHERE ProteinID=protein_ID; DELETE FROM ProteinSegment WHERE ProteinID=protein_ID; DELETE FROM Protein WHERE ID=protein_ID; SET number_of_deleted_rows = ROW_COUNT(); IF number_of_deleted_rows > 0 THEN SET error_code = 0; ELSE SET error_code = -1; END IF; COMMIT; END """, allow_unsafe_query = True) def _delete_protocol(self, ProtocolID): # ProtocolCleaner -> ProtocolStep -> (Protocol, Toolx2, Command) # ProtocolFilter-> ProtocolGraphEdge -> ProtocolStep # ProtocolParameter-> ProtocolStep # ProtocolScorer (unused so far) # Delete in order ProtocolCleaner, ProtocolFilter, ProtocolParameter, ProtocolGraphEdge, ProtocolStep, Protocol #protocol_steps = self.execute("SELECT FROM ProtocolStep WHERE ProtocolID=%s", parameters = (ProtocolID,)) print('here') if self.execute('''SELECT ID FROM Prediction WHERE ProtocolID=%s''', parameters = (ProtocolID,)): return False print('there') command_IDs = [r['CommandID'] for r in self.execute('''SELECT CommandID FROM ProtocolStep WHERE ProtocolID=%s''', parameters = (ProtocolID,))] tool_IDs = [r['ToolID'] for r in self.execute('''SELECT ToolID FROM ProtocolStep WHERE ProtocolID=%s''', parameters = (ProtocolID,)) ] + [r['DatabaseToolID'] for r in self.execute('''SELECT DatabaseToolID FROM ProtocolStep WHERE ProtocolID=%s''', parameters = (ProtocolID,))] self.execute('''DELETE FROM ProtocolCleaner WHERE ProtocolID=%s''', parameters = (ProtocolID,)) self.execute('''DELETE FROM ProtocolFilter WHERE ProtocolID=%s''', parameters = (ProtocolID,)) self.execute('''DELETE FROM ProtocolParameter WHERE ProtocolID=%s''', parameters = (ProtocolID,)) self.execute('''DELETE FROM ProtocolGraphEdge WHERE ProtocolID=%s''', parameters = (ProtocolID,)) self.execute('''DELETE FROM ProtocolStep WHERE ProtocolID=%s''', parameters = (ProtocolID,)) self.execute('''DELETE FROM Protocol WHERE ID=%s''', parameters = (ProtocolID,)) try: for command_ID in command_IDs: self.execute('''DELETE FROM Command WHERE ID=%s''', parameters = (command_ID,)) except: pass return True def _delete_protein(self, ProteinID): '''Returns True if deletion was successful and False if no records were deleted. An exception is raised if deletion was not possible.''' if '_DELETE_PROTEIN' not in self.list_stored_procedures(): self._create_protein_deletion_stored_procedure() results = self.callproc('_DELETE_PROTEIN', parameters=(ProteinID, '@return_value')) assert(len(results) == 1) if int(results[0]['return_value']) > 0: raise DatabaseCannotDeleteRecordException(ProteinID, 'Protein') return int(results[0]['return_value']) == 0 def _add_protein_residues(self, ProteinID, update = False): results = self.execute_select("SELECT * FROM Protein WHERE ID=%s", parameters=(ProteinID,)) if len(results) != 1: raise DatabaseMissingKeyException(ProteinID, 'Protein') sequence = results[0]['Sequence'] # Sanity check stored data results = self.execute_select("SELECT COUNT(ResidueID) AS ResidueCount FROM ProteinResidue WHERE ProteinID=%s", parameters=(ProteinID,)) assert(len(results) == 1) if results[0]['ResidueCount'] > 0: if results[0]['ResidueCount'] == len(sequence): results = self.execute_select("SELECT ResidueAA FROM ProteinResidue WHERE ProteinID=%s ORDER BY ResidueID", parameters=(ProteinID,)) stored_sequence = "".join([r['ResidueAA'] for r in results]) assert(stored_sequence == sequence) if not(update): return else: raise DatabaseBadDataException('ProteinResidue', 'Expected %d results, got %d results' % (len(sequence), results[0]['ResidueCount'])) # Delete if updating if update: results = self.execute("DELETE FROM ProteinResidue WHERE ProteinID=%s", parameters=(ProteinID,)) for c in range(len(sequence)): x = sequence[c] if x not in relaxed_amino_acid_codes: # Allow X for some proteins e.g. UPI000012EE21 / P00346 raise Exception("Unknown amino acid '%s' at position %d of the sequence of protein %s." % (x, c + 1, ProteinID)) assert(x in relaxed_amino_acid_codes) for c in range(len(sequence)): d = { 'ProteinID': ProteinID, 'ResidueID': c + 1, 'ResidueAA': sequence[c], } self.insertDictIfNew('ProteinResidue', d, ['ProteinID', 'ResidueID']) # Sanity check again stored_sequence = "".join([r['ResidueAA'] for r in self.execute_select("SELECT ResidueAA FROM ProteinResidue WHERE ProteinID=%s ORDER BY ResidueID", parameters=(ProteinID,))]) assert(stored_sequence == sequence) def look_for_protein_sequence(self, sequence): digest = CRC64.CRC64digest(sequence) results = self.execute_select("SELECT * FROM Protein WHERE CRC_64_ISO_Digest=%s", parameters=(digest,)) for r in results: if r['Sequence'] == sequence: return r return None def add_raw_protein_sequence(self, sequence, IDScheme, IsASegmentOf = None, AddProteinResidues = True): # Determine the ID prefix ID_prefix = None if IDScheme == 'Kortemme Lab': ID_prefix = 'KOR' elif IDScheme == 'UniParcSegment': ID_prefix = 'SEG' else: raise Exception("The ID scheme %s is not recognized." % IDScheme) # Sanity check the sequence, allowing unknown residues 'X' for x in sequence: assert(x in relaxed_amino_acid_codes) # Sanity check IsASegmentOf if IsASegmentOf: results = self.execute_select("SELECT Sequence FROM Protein WHERE ID=%s", parameters=(IsASegmentOf,)) if not results: raise DatabaseMissingKeyException(IsASegmentOf, 'Protein') else: assert(len(results) == 1) if results[0]['Sequence'].find(sequence) == -1: raise Exception("The protein with sequence\n%s\ndoes not seem to be a segment of protein %s with sequence:\n%s" % (sequence, IsASegmentOf, results[0]['Sequence'])) # Sanity check existing records ProteinID = None existing_sequence = self.look_for_protein_sequence(sequence) if existing_sequence: # We already have this sequence stored in the database ProteinID = existing_sequence['ID'] if existing_sequence['IDScheme'] == 'UniParc': raise Exception("You are trying to add a raw protein sequence but an existing UniParc record with the same sequence exists with ID %s." % ProteinID) assert(existing_sequence['UniParcID'] == None) assert(existing_sequence['Sequence'] == sequence) # We do not need to check the CRC64 digest assert(existing_sequence['Mass'] == None) assert(existing_sequence['IsASegmentOf'] == IsASegmentOf) else: # Create a ProteinID if none exist. These are increasing integers just like the UniParc ID scheme. if not ProteinID: ProteinID = 1 results = self.execute_select("SELECT ID FROM Protein WHERE ID LIKE '%s%%' ORDER BY ID DESC" % ID_prefix) if results: ProteinID = int(results[0]['ID'][3:], base=16) + 1 ProteinID = "%s%015x" % (ID_prefix, ProteinID) assert(len(ProteinID) == 18) # Protein record d = { 'ID': ProteinID, 'IDScheme': IDScheme, 'UniParcID': None, 'Sequence': sequence, 'CRC_64_ISO_Digest': CRC64.CRC64digest(sequence), 'Mass': None, 'IsASegmentOf': IsASegmentOf, } self.insertDictIfNew('Protein', d, ['ID']) # ProteinResidue records if AddProteinResidues: self._add_protein_residues(ProteinID, False) return ProteinID def add_protein_from_UniParc_ID(self, UniParcID, cache_dir = None): '''Adds a Protein record and related records for the UniParc sequence with ID UniParcID. Returns the ID of the protein.''' from klab.bio import uniprot # import moved here to get around differences in the custom-built simplejson package on the webserver (Python 2.4.3) and those shipped with Python 2.7.3 uniparco = uniprot.UniParcEntry(UniParcID, cache_dir = cache_dir) # Sanity check the sequence, allowing unknown residues 'X' for x in uniparco.sequence: assert(x in relaxed_amino_acid_codes) # Sanity check existing records existing_sequence = self.look_for_protein_sequence(uniparco.sequence) if existing_sequence: # We already have this sequence stored in the database assert(existing_sequence['ID'] == UniParcID) assert(existing_sequence['IDScheme'] == 'UniParc') assert(existing_sequence['UniParcID'] == int(UniParcID[3:], 16)) assert(existing_sequence['Sequence'] == uniparco.sequence) assert(existing_sequence['CRC_64_ISO_Digest'] == uniparco.CRC64Digest) assert(existing_sequence['Mass'] == uniparco.atomic_mass) assert(existing_sequence['IsASegmentOf'] == None) else: # Protein record d = { 'ID': UniParcID, 'IDScheme': 'UniParc', 'UniParcID': int(UniParcID[3:], 16), 'Sequence': uniparco.sequence, 'CRC_64_ISO_Digest': uniparco.CRC64Digest, 'Mass': uniparco.atomic_mass, 'IsASegmentOf': None, } self.insertDictIfNew('Protein', d, ['ID']) for protein_segment in uniparco.subsections.sections: d = protein_segment.to_db() SegmentProteinID = None if d['StartResidue'] == 1 and d['EndResidue'] == len(uniparco.sequence): subsequence = uniparco.sequence SegmentProteinID = UniParcID else: subsequence = uniparco.sequence[d['StartResidue'] - 1:d['EndResidue']] SegmentProteinID = self.add_raw_protein_sequence(subsequence, 'UniParcSegment', IsASegmentOf = None, AddProteinResidues = False) d['ProteinID'] = UniParcID d['DefinedBy'] = 'UniProt' d['SegmentProteinID'] = SegmentProteinID self.insertDictIfNew('ProteinSegment', d, ['ProteinID', 'StartResidue', 'EndResidue']) # Fill in the Occurrence fields for each segment db_segments = self.execute("SELECT * FROM ProteinSegment WHERE ProteinID=%s ORDER BY StartResidue", parameters=(UniParcID,)) occurrences = {} for db_segment in db_segments: occurrences[db_segment['SegmentProteinID']] = occurrences.get(db_segment['SegmentProteinID'], 0) + 1 occurrence = occurrences[db_segment['SegmentProteinID']] self.execute("UPDATE ProteinSegment SET Occurrence=%s WHERE ProteinID=%s AND StartResidue=%s AND EndResidue=%s", parameters=(occurrence, UniParcID, db_segment['StartResidue'], db_segment['EndResidue'])) for AC, names in uniparco.organisms.iteritems(): d = names d['UniProt_ACC'] = AC self.insertDictIfNew('ProteinOrganism', d, ['UniProt_ACC', 'scientific']) # ProteinName record assert(uniparco.recommended_name) ECNumber = None if len(uniparco.recommended_name['EC numbers']) == 1: ECNumber = uniparco.recommended_name['EC numbers'][0] ProteinName = uniparco.recommended_name['Name'] d = { 'ProteinID' : UniParcID, 'NameOrder' : 0, 'Name' : ProteinName, 'ECNumber' : ECNumber, 'Validity' : None, } self.insertDictIfNew("_ProteinName", {'ProteinName' : ProteinName}, ['ProteinName']) self.insertDictIfNew("ProteinName", d, ['ProteinID', 'NameOrder']) # ProteinDatabaseIdentifier records for UniProtAC in uniparco.UniProtACs: d = { 'ProteinID' : UniParcID, 'Scheme' : 'UniProt_ACC', 'SchemeID' : UniProtAC, } self.insertDictIfNew('ProteinDatabaseIdentifier', d, ['ProteinID', 'Scheme', 'SchemeID']) for UniProtID in uniparco.UniProtIDs: d = { 'ProteinID' : UniParcID, 'Scheme' : 'UniProt_ID', 'SchemeID' : UniProtID, } self.insertDictIfNew('ProteinDatabaseIdentifier', d, ['ProteinID', 'Scheme', 'SchemeID']) # ProteinResidue records self._add_protein_residues(UniParcID, False) def fill_in_publication_information(self, PublicationID): raise Exception('This function was folded into the Publication object') def addChainWarning(self, pdbID, associatedRecords, c): chainWarnings = self.chainWarnings chainWarnings[pdbID] = chainWarnings.get(pdbID) or [] chainWarnings[pdbID].append((associatedRecords, c)) def addChainError(self, pdbID, c): chainErrors = self.chainErrors chainErrors[pdbID] = chainErrors.get(pdbID) or [] chainErrors[pdbID].append(c) def addTechniquesFields(self): '''Used to update missing Techniques fields as this field was added after the initial PDB import.''' return results = self.locked_execute("SELECT * FROM Structure") for result in results: pdbID = result[FieldNames_.PDB_ID] contents = result[FieldNames_.Content] lines = contents.split("\n") for line in lines: if line.startswith("EXPDTA"): techniques = line[10:71].split(";") for k in range(len(techniques)): techniques[k] = techniques[k].strip() techniques = join(techniques, ";") break if not result[FieldNames_.Techniques]: SQL
<gh_stars>0 from mesh.generic.nodeComm import NodeComm from switch import switch import random, time, math from math import ceil from mesh.generic.slipMsg import SLIP_END_TDMA from mesh.generic.radio import RadioMode from mesh.generic.cmds import TDMACmds from mesh.generic.tdmaState import TDMAStatus, TDMAMode, TDMABlockTxStatus from mesh.generic.cmdDict import CmdDict from mesh.generic.command import Command from mesh.generic.customExceptions import InvalidTDMASlotNumber from mesh.generic.commProcessor import CommProcessor from mesh.generic.tdmaCmdProcessor import TDMACmdProcessor import struct class TDMAComm(NodeComm): def __init__(self, commProcessor, radio, msgParser, nodeParams): if not commProcessor: commProcessor = CommProcessor([TDMACmdProcessor], nodeParams) NodeComm.__init__(self, commProcessor, radio, msgParser, nodeParams) # TDMA config self.tdmaMode = TDMAMode.sleep self.frameStartTime = [] self.nodeParams.commStartTime = [] # time that TDMA comm was started - initialized manually by first node or parsed from messages received for nodes joining existing mesh self.maxNumSlots = nodeParams.config.commConfig['maxNumSlots'] # Maximum number of slots self.enableLength = nodeParams.config.commConfig['enableLength'] self.slotTime = 0.0 self.slotNum = 1 self.slotStartTime = 0.0 # TDMA Frame variables self.frameTime = 0.0 self.frameLength = nodeParams.config.commConfig['frameLength'] self.cycleLength = nodeParams.config.commConfig['cycleLength'] # Mesh initialization variables self.inited = False self.initTimeToWait = nodeParams.config.commConfig['initTimeToWait'] # Time to wait before assuming no existing mesh network self.initStartTime = [] self.tdmaCmds = dict() # Transmit period variables self.transmitSlot = nodeParams.config.commConfig['transmitSlot'] # Slot in cycle that this node is schedule to transmit self.beginTxTime = self.enableLength + nodeParams.config.commConfig['preTxGuardLength'] self.endTxTime = self.beginTxTime + nodeParams.config.commConfig['txLength'] self.transmitComplete = False # Receive period variables self.beginRxTime = self.enableLength self.endRxTime = self.beginRxTime + nodeParams.config.commConfig['rxLength'] self.slotLength = nodeParams.config.commConfig['slotLength'] # total length of slot self.rxLength = nodeParams.config.commConfig['rxLength'] self.rxReadTime = self.beginTxTime + nodeParams.config.commConfig['rxDelay'] # time to begin reading serial self.receiveComplete = False # Command buffer self.cmdBuffer = dict() # Current read position in radio rx buffer self.rxBufferReadPos = 0 # Block TX init self.resetBlockTxStatus() self.clearDataBlock() # Comm enable flag self.enabled = True def execute(self): """Execute communication functions.""" currentTime = time.time() # Initialize mesh network if self.inited == False: self.init(currentTime) return else: # perform TDMA execution logic self.executeTDMAComm(currentTime) def updateFrameTime(self, currentTime): self.frameTime = currentTime - self.frameStartTime if self.frameTime >= self.frameLength: # Start new frame #print(str(currentTime) + ": Node " + str(self.nodeParams.config.nodeId) + " - New frame started") self.syncTDMAFrame(currentTime) if self.frameTime < self.cycleLength: frameStatus = 0 else: # sleep period frameStatus = 1 return frameStatus def executeTDMAComm(self, currentTime): """Execute TDMA communication scheme.""" # Check for block transfers self.monitorBlockTx() # Update frame time frameStatus = self.updateFrameTime(currentTime) if (frameStatus == 1): self.sleep() return # Check for mode updates self.updateMode(self.frameTime) # Perform mode specific behavior for case in switch(self.tdmaMode): if case(TDMAMode.sleep): # Set radio to sleep mode self.radio.setMode(RadioMode.sleep) break if case(TDMAMode.init): # Prepare radio to receive or transmit if self.slotNum == self.transmitSlot: # Set radio to transmit mode self.radio.setMode(RadioMode.transmit) else: # Set radio to receive mode self.radio.setMode(RadioMode.receive) pass break if case(TDMAMode.receive): # Read data if TDMA message end not yet found if self.receiveComplete == False and self.slotTime >= self.rxReadTime: self.radio.setMode(RadioMode.receive) # set radio mode # Delay so we aren't hammering the radio #remainingRxTime = self.rxLength - (self.slotTime - self.enableLength) #time.sleep(remainingRxTime*0.2) self.receiveComplete = self.readMsg() if self.receiveComplete == True: # Set radio to sleep self.radio.setMode(RadioMode.sleep) break if case(TDMAMode.transmit): # Send data if self.transmitComplete == False: self.radio.setMode(RadioMode.transmit) # set radio mode self.sendMsg() else: # Set radio to sleep self.radio.setMode(RadioMode.sleep) break if case(TDMAMode.failsafe): # Read only failsafe mode # Enable radio in receive mode and read data self.radio.setMode(RadioMode.receive) self.readMsg() break if case(TDMAMode.blockRx): # Block receive mode self.radio.setMode(RadioMode.receive) self.readMsg() break if case(TDMAMode.blockTx): # Block transmit mode self.radio.setMode(RadioMode.transmit) self.sendBlock() break def init(self, currentTime): if self.nodeParams.commStartTime == []: # Mesh not initialized self.initComm(currentTime) return else: # Join existing mesh self.initMesh() def initMesh(self, currentTime=time.time()): """Initialize node mesh networks.""" # Create tdma comm messages flooredStartTime = math.floor(self.nodeParams.commStartTime) self.tdmaCmds[TDMACmds['MeshStatus']] = Command(TDMACmds['MeshStatus'], {'commStartTimeSec': int(flooredStartTime), 'status': self.nodeParams.tdmaStatus}, [TDMACmds['MeshStatus'], self.nodeParams.config.nodeId], self.nodeParams.config.commConfig['statusTxInterval']) self.tdmaCmds[TDMACmds['LinkStatus']] = Command(TDMACmds['LinkStatus'], {'linkStatus': self.nodeParams.linkStatus, 'nodeId': self.nodeParams.config.nodeId}, [TDMACmds['LinkStatus'], self.nodeParams.config.nodeId], self.nodeParams.config.commConfig['linksTxInterval']) if self.nodeParams.config.nodeId != 0: # stop ground node from broadcasting time offset self.tdmaCmds[TDMACmds['TimeOffset']] = Command(TDMACmds['TimeOffset'], {'nodeStatus': self.nodeParams.nodeStatus[self.nodeParams.config.nodeId-1]}, [TDMACmds['TimeOffset'], self.nodeParams.config.nodeId], self.nodeParams.config.commConfig['offsetTxInterval']) # Determine where in frame mesh network currently is self.syncTDMAFrame(currentTime) self.inited = True print("Node " + str(self.nodeParams.config.nodeId) + " - Initializing comm") def initComm(self, currentTime): if self.initStartTime == []: # Start mesh initialization timer self.initStartTime = currentTime print("Node " + str(self.nodeParams.config.nodeId) + " - Starting initialization timer") return elif (currentTime - self.initStartTime) >= self.initTimeToWait: # Assume no existing mesh and initialize network self.nodeParams.commStartTime = math.ceil(currentTime) print("Initializing new mesh network") self.initMesh() else: # Wait for initialization timer to lapse # Turn on radios and check for comm messages self.checkForInit() def checkForInit(self): # Look for tdma status message self.radio.setMode(RadioMode.receive) self.readBytes(True) if self.radio.bytesInRxBuffer > 0: self.parseMsgs() while self.msgParser.parsedMsgs: msg = self.msgParser.parsedMsgs.pop(0) cmdId = struct.unpack('=B',msg[0:1])[0] if cmdId == TDMACmds['MeshStatus']: print("Mesh status received") self.processMsg(msg, {'nodeStatus': self.nodeParams.nodeStatus, 'comm': self, 'clock': self.nodeParams.clock}) def syncTDMAFrame(self, currentTime=time.time()): """Determine where in frame mesh network currently is to ensure time sync.""" self.frameTime = (currentTime - self.nodeParams.commStartTime)%self.frameLength self.frameStartTime = currentTime - self.frameTime print(str(self.frameStartTime),"- Frame start") # Update periodic mesh messages if (TDMACmds['MeshStatus'] in self.tdmaCmds): self.tdmaCmds[TDMACmds['MeshStatus']].cmdData['status'] = self.nodeParams.tdmaStatus # Reset buffer read position self.rxBufferReadPos = 0 # Check for tdma failsafe self.nodeParams.checkTimeOffset() def sleep(self): """Sleep until end of frame.""" # Sleep until next frame to save CPU usage try: # wrap in try in case of negative sleep time # Sleep remaining frame length minus some delta to ensure waking in time remainingFrameTime = (self.frameLength - (time.time() - self.frameStartTime)) if remainingFrameTime > 0.010: time.sleep(remainingFrameTime - 0.010) except ValueError: print("WARNING: Frame length exceeded") def updateMode(self, frameTime): # Update slot self.resetTDMASlot(frameTime) # Check for TDMA failsafe if self.nodeParams.tdmaFailsafe == True: self.setTDMAMode(TDMAMode.failsafe) return if frameTime >= self.cycleLength: # Cycle complete self.setTDMAMode(TDMAMode.sleep) #print str(frameTime) + " - Cycle complete, sleeping" return # Check for block transmit if self.blockTxStatus['status'] == TDMABlockTxStatus.active: if self.blockTxStatus['txNode'] == self.nodeParams.config.nodeId: # this node is transmitting self.setTDMAMode(TDMAMode.blockTx) else: # this node is receiving self.setTDMAMode(TDMAMode.blockRx) return # Normal cycle sequence if self.slotTime < self.enableLength: # Initialize comm at start of slot self.setTDMAMode(TDMAMode.init) else: # Transmit slot if self.slotNum == self.transmitSlot: if self.slotTime >= self.beginTxTime: if self.slotTime < self.endTxTime: # Begin transmitting self.setTDMAMode(TDMAMode.transmit) else: # Sleep self.setTDMAMode(TDMAMode.sleep) # Receive slot else: if self.slotTime >= self.beginRxTime: if self.slotTime < self.endRxTime: # Begin receiving self.setTDMAMode(TDMAMode.receive) else: # Sleep self.setTDMAMode(TDMAMode.sleep) #def resetTDMASlot(self, frameTime, currentTime=time.time(), slotNum=[]): def resetTDMASlot(self, frameTime, slotNum=[]): # Reset slot number if slotNum: if isinstance(slotNum, int) and slotNum <= self.maxNumSlots: self.slotNum = slotNum else: # invalid number raise InvalidTDMASlotNumber("Provided TDMA slot number is not valid") else: #if self.slotStartTime: # if (currentTime - self.slotStartTime) >= self.slotLength and self.slotNum < self.maxNumSlots: # self.slotNum = int(frameTime/self.slotLength) + 1 #else: if frameTime < self.cycleLength: # during cycle self.slotNum = int(frameTime/self.slotLength) + 1 else: # during sleep period self.slotNum = self.maxNumSlots #self.slotStartTime = currentTime - (frameTime - (self.slotNum-1)*self.slotLength) #self.slotTime = frameTime - self.slotStartTime self.slotStartTime = (self.slotNum-1)*self.slotLength self.slotTime = frameTime - self.slotStartTime #print("Updating slot number: " + str(self.slotNum)) def setTDMAMode(self, mode): if self.tdmaMode != mode: #print("Setting mode:", mode) self.tdmaMode = mode #print str(self.slotTime) + " - TDMA mode change: " + str(self.tdmaMode) if mode == TDMAMode.receive: self.receiveComplete = False elif mode == TDMAMode.transmit: self.transmitComplete = False #print str(time.time()) + ": " + str(frameTime) + " - Node " + str(self.nodeId) + " - Transmitting" def sendMsg(self): if (self.enabled == False): # Don't send anything if disabled return # Send periodic TDMA commands self.sendTDMACmds() if self.tdmaMode == TDMAMode.transmit: if self.cmdBuffer: # command buffer noRepeatCmds = [] for key in self.cmdBuffer: self.bufferTxMsg(self.cmdBuffer[key]['bytes']) if self.cmdBuffer[key]['txInterval'] == 0: # no repeat noRepeatCmds.append(key) for key in noRepeatCmds: # remove non-repeat commands self.cmdBuffer.pop(key) if self.cmdRelayBuffer: # Add commands to tx buffer and clear relay buffer #for cmd in cmdRelayBuffer: # self.bufferTxMsg(cmd) self.radio.bufferTxMsg(self.cmdRelayBuffer) self.cmdRelayBuffer = bytearray() self.radio.bufferTxMsg(SLIP_END_TDMA) self.radio.sendBuffer(self.nodeParams.config.commConfig['maxTransferSize']) # End transmit period self.transmitComplete = True else: pass #print "Slot " + str(self.slotNum) + " - Node " + str(self.nodeId) + " - Can't send. Wrong mode: " + str(self.tdmaMode) def sendBlock(self): if self.dataBlock: if len(self.dataBlock[self.dataBlockPos:]) >
<reponame>spensmith/tsfl<filename>tsfl.py import os import pathlib import subprocess import sys import time import tkinter as tk from tkinter import filedialog import numpy as np import pandas as pd root = tk.Tk() width = int(1.0 * root.winfo_screenwidth()) height = int(0.8 * root.winfo_screenheight()) root.geometry(f'{width}x{height}') root.withdraw() TESTING = True if 'spencer' in os.getcwd() else False RESULT_COLUMNS = ['Sorting Name', 'Name on Sheet', 'Correct'] def potential_sleep(sleep_seconds): time.sleep(0 if TESTING is True else sleep_seconds * 0.5) def empty_string_to_null(input_object): if pd.isna(input_object): return np.nan elif str(input_object).lower() in ('', 'nan', 'nat', 'none'): return np.nan elif isinstance(input_object, str) and any([input_object.isspace(), not input_object]): return np.nan elif input_object is None: return np.nan return input_object def get_master_from_xlsx(path_to_master_file): master_all_sheets = pd.ExcelFile(path_to_master_file) master_dataframe = master_all_sheets.parse('Schedule', header=None, usecols='B:G') if len(list(master_dataframe)) == 5: master_dataframe['points'] = '' master_dataframe.columns = ['visitors_choice', 'visitors', 'name', 'home_choice', 'home', 'points'] master_dataframe = master_dataframe.applymap(empty_string_to_null) master_dataframe['visitor_potential_game'] = pd.notna(master_dataframe['visitors']) master_dataframe['home_potential_game'] = pd.notna(master_dataframe['home']) master_dataframe['not_visitor_home'] = np.logical_not(master_dataframe['home'].str.upper().str.contains('TEAM')) master_dataframe['is_a_game'] = master_dataframe['visitor_potential_game'] & master_dataframe[ 'home_potential_game'] & master_dataframe['not_visitor_home'] master_dataframe['says_football'] = master_dataframe['visitors_choice'].str.upper().str.contains('FOOTBALL') master_dataframe['above_says_football'] = master_dataframe['says_football'].shift(1) master_dataframe['says_week'] = master_dataframe['visitors_choice'].str.upper().str.contains('WEEK') master_dataframe['is_week_number'] = master_dataframe['above_says_football'] & master_dataframe['says_week'] master_dataframe['dad_marked_visitor'] = pd.notna(master_dataframe['visitors_choice']) master_dataframe['dad_marked_home'] = pd.notna(master_dataframe['home_choice']) master_dataframe['dad_marked_something'] = master_dataframe['dad_marked_visitor'] | master_dataframe[ 'dad_marked_home'] master_dataframe['dad_marked_nothing'] = np.logical_not(master_dataframe['dad_marked_something']) master_dataframe['visitor_won'] = master_dataframe['is_a_game'] & master_dataframe['dad_marked_visitor'] master_dataframe['home_won'] = master_dataframe['is_a_game'] & master_dataframe['dad_marked_home'] master_dataframe['complete_game'] = master_dataframe['is_a_game'] & master_dataframe['dad_marked_something'] master_dataframe['incomplete_game'] = master_dataframe['is_a_game'] & master_dataframe['dad_marked_nothing'] print('\nOkay, I think the winners are:\n') potential_sleep(0.5) games_not_completed = len(master_dataframe[master_dataframe['incomplete_game'] == True]) if games_not_completed > 0: print('It looks like I have', games_not_completed, 'unfinished games.') if input('Does that seem right? (y/n) ') != 'y': print('Please exit the program and correct the file.\n') sys.exit() total_points_correct = 0 master_dataframe['is_tie_breaker'] = np.where( master_dataframe['visitors_choice'].str.contains('Total Combined Points') & master_dataframe['visitors_choice'].notna(), True, False) for index, row in master_dataframe.iterrows(): visitor_team = row.visitors home_team = row.home visitor_won = row.visitor_won home_won = row.home_won if visitor_won & home_won: winner = 'Tie between the ' + visitor_team + ' and the ' + home_team print(winner) elif visitor_won & np.logical_not(home_won): winner = visitor_team print(winner) elif np.logical_not(visitor_won) & home_won: winner = home_team print(winner) if row.is_week_number: week = str(master_dataframe.at[index, 'visitors_choice']).strip() week_number = int(''.join(filter(str.isdigit, week))) print(f'WEEK {week_number}\n') if row.is_tie_breaker: try: for column_to_try in ('points', 'visitors_choice'): guess_cell = empty_string_to_null( str(master_dataframe.at[index, column_to_try]).strip().split('.0')[0]) if pd.notna(guess_cell): break total_points_correct = int(''.join(filter(str.isdigit, guess_cell))) print('\nTotal Points Combined:', total_points_correct, '\n') potential_sleep(0.5) # I now have which games are correct, as well as the correct point total. if input('Is this what you have? Enter to continue, type anything if not. ') != '': print('\nPlease correct the file and restart the program.') potential_sleep(0.5) sys.exit() except ValueError: if input('\nHmmm.. I don\'t see any points for Monday. Does that sound right? (y/n) ') != 'y': print('\nPlease correct the file and restart the program.\n') potential_sleep(0.5) sys.exit() else: # I should have some correct games, and now correct points is zero if input( '\nAlright, so continuing like normal. Is the above what you have? Enter to continue, type anything if not. ') != '': print('\nPlease correct the file and restart the program.') sys.exit() return master_dataframe, week_number, total_points_correct def potentially_inspect(dataframe, sheet, filename_with_xlsx, look_at=None): if look_at: if not look_at.endswith('.xlsx'): look_at += '.xlsx' if look_at == filename_with_xlsx: satisfied = False while not satisfied: try: results_name = f'Inspection of {look_at}' with pd.ExcelWriter(results_name) as writer: dataframe.to_excel(writer, sheet_name=sheet, index=False) format_excel_worksheet(writer.sheets[sheet], dataframe) if sys.platform == "win32": os.startfile(results_name) else: opener = "open" if sys.platform == "darwin" else "xdg-open" subprocess.call([opener, results_name]) except Exception: pass else: satisfied = True def grade_participant(master_dataframe, filename_with_xlsx, path, total_points_correct, look_at=None): filename_w_o_xlsx = filename_with_xlsx.split('.xlsx')[0] participant_all_sheets = pd.ExcelFile(path + '/' + filename_with_xlsx) for sheet in set(participant_all_sheets.sheet_names).difference( {'Weekly Results', 'WeeklyResults', 'Export Summary'}): try: participant_dataframe = participant_all_sheets.parse(sheet, header=None, usecols='B:G') if len(list(participant_dataframe)) == 5: participant_dataframe['points'] = '' participant_dataframe.columns = ['visitors_choice', 'visitors', 'name', 'home_choice', 'home', 'points'] participant_dataframe = participant_dataframe.applymap(empty_string_to_null) participant_dataframe['is_correct'] = '' participant_dataframe['extra_stuff --->'] = ' ' participant_dataframe['is_a_game'] = master_dataframe['is_a_game'] participant_dataframe['dad_marked_something'] = master_dataframe['dad_marked_something'] participant_dataframe['visitor_won'] = master_dataframe['visitor_won'] participant_dataframe['home_won'] = master_dataframe['home_won'] participant_dataframe['is_tie_breaker'] = master_dataframe['is_tie_breaker'] participant_dataframe['complete_game'] = master_dataframe['complete_game'] participant_dataframe['incomplete_game'] = master_dataframe['incomplete_game'] participant_dataframe['p_marked_visitor'] = pd.notna(participant_dataframe['visitors_choice']) participant_dataframe['p_marked_home'] = pd.notna(participant_dataframe['home_choice']) participant_dataframe['p_visitor_chosen'] = participant_dataframe['is_a_game'] & participant_dataframe[ 'p_marked_visitor'] participant_dataframe['p_home_chosen'] = participant_dataframe['is_a_game'] & participant_dataframe[ 'p_marked_home'] participant_name = str(participant_dataframe.at[0, 'name']).strip() total_points_guessed = -1000 points_off = -1000 points_off_sort = -1001 for index, row in participant_dataframe.iterrows(): visitor_won = row.visitor_won home_won = row.home_won complete_game = row.complete_game if complete_game: # determine outcome if visitor_won and home_won: outcome = 'Tie' elif visitor_won and not home_won: outcome = 'Visitor' elif not visitor_won and home_won: outcome = 'Home' else: outcome = 'No game chosen yet' # determine choice picked_visitor = row.p_marked_visitor picked_home = row.p_marked_home if picked_visitor and picked_home: choice = 'Tie' elif picked_visitor and not picked_home: choice = 'Visitor' elif not picked_visitor and picked_home: choice = 'Home' else: choice = 'No choice made' is_correct = (outcome == choice) participant_dataframe.at[index, 'is_correct'] = is_correct if row.is_tie_breaker: for column_to_try in ('points', 'visitors_choice'): guess_cell = empty_string_to_null( str(participant_dataframe.at[index, column_to_try]).strip().split('.0')[0]) if pd.notna(guess_cell): break try: total_points_guessed = int(''.join(filter(str.isdigit, guess_cell))) except ValueError: total_points_guessed = -1000 if total_points_guessed != -1000: try: points_off = int(abs(total_points_guessed - total_points_correct)) except ValueError: points_off = -1000 if points_off == -1000: points_off_sort = np.inf else: points_off_sort = points_off p_games_correct = len(participant_dataframe[participant_dataframe['complete_game'] & participant_dataframe[ 'is_correct'] == True]) p_games_incorrect = len(participant_dataframe[participant_dataframe['complete_game'] & ~( participant_dataframe['is_correct'] == True)]) participant_score_row = { 'Sorting Name': filename_w_o_xlsx, 'Name on Sheet': participant_name, 'Correct': p_games_correct, 'Incorrect': p_games_incorrect, 'Points Guessed': total_points_guessed, 'Points off Sort': points_off_sort, 'Points off': points_off, } potentially_inspect(participant_dataframe, sheet, filename_with_xlsx, look_at) return participant_score_row except Exception as e: print(f'Unable to parse {sheet} within {filename_with_xlsx}. The exception is {e}') pass def format_excel_worksheet(worksheet, dataframe): for i, col in enumerate(list(dataframe)): iterate_length = dataframe[col].astype(str).str.len().max() header_length = len(col) max_size = max(iterate_length, header_length) + 1 worksheet.set_column(i, i, max_size) def conditional_format(worksheet, workbook, column_format_range, winning_number_of_games): if workbook: colors_dictionary = { '0': { 'bg_color': '#FFC7CE', 'font_color': '#9C0006' }, winning_number_of_games: { 'bg_color': '#C6EFCE', 'font_color': '#006100' } } for if_equals, format_dictionary in colors_dictionary.items(): excel_format = workbook.add_format(format_dictionary) worksheet.conditional_format(column_format_range, { 'type': 'cell', 'criteria': '=', 'value': if_equals, 'format': excel_format }) def remove_inbetween_quotations(name): try: index_for_first_quotation = name.find('"') index_for_second_quotation = name.find('"', index_for_first_quotation + 1) return name[:index_for_first_quotation] + name[index_for_second_quotation + 1:] except Exception: return name def remove_inbetween_open_and_close_paren(name): try: index_for_open_paren = name.find('(') index_for_close_paren = name.find(')', index_for_open_paren + 1) return name[:index_for_open_paren] + name[index_for_close_paren + 1:] except Exception: return name def remove_and_following(name, and_phrase): try: index_for_and = name.find(and_phrase) index_for_following = name.find(' ', index_for_and + 1) return name[:index_for_and] + name[index_for_following + 1:] except Exception: return name def quotation_cleaner(name): while '"' in name: name = remove_inbetween_quotations(name) return name def paren_cleaner(name): while '(' in name and ')' in name: name = remove_inbetween_open_and_close_paren(name) return name def and_cleaner(name): for and_phrase in (' and ', ' & '): while and_phrase in name: name = remove_and_following(name, and_phrase=and_phrase) return name def get_first_and_last_with_chars(name, first_name_stub_size, last_name_stub_size, use_first_letter_of_third_word): name = str(name).strip() for cleaner in (quotation_cleaner, paren_cleaner, and_cleaner): name = cleaner(name) formatted_name = '' name_split = list(filter(None, [word.strip() for word in name.split(' ')])) for i, word in enumerate(name_split): formatted_name += ' ' if 0 < i < len(name_split) else '' if i == 0: formatted_name += word[:first_name_stub_size] if i == 1: formatted_name += word[:last_name_stub_size] elif i == 2 and use_first_letter_of_third_word is True: formatted_name += word[0] return formatted_name.strip() def get_letter_from_column(dataframe, week_string): for i, col in enumerate(list(dataframe)): if col == week_string: return 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'[i] def get_filename_and_sheetname(label): if label.endswith('.xlsx'): return label, label.split('xlsx')[0] else: return label + '.xlsx', label def export_excel(dataframe, label): filename, sheetname = get_filename_and_sheetname(label) print(f'Now exporting: {filename}') with pd.ExcelWriter(filename) as report_writer: dataframe.to_excel(report_writer, sheet_name=sheetname, index=False) format_excel_worksheet(report_writer.sheets[sheetname], dataframe) def get_name_iterator(): for use_first_letter_of_third_word in [True, False]: for first_name_stub_size in [4, 3]: for last_name_stub_size in [4, 3]: yield first_name_stub_size, last_name_stub_size, use_first_letter_of_third_word def get_current_column_name(week_number, column_names): for column_name in column_names: if f'{week_number:02}' in column_name and 'week' in column_name: return column_name return f'Week {week_number:02}' def export_results(path_to_masterfile, label, week_number, winning_number_of_games, results_dataframe): filename, sheetname = get_filename_and_sheetname(label) print(f'Now exporting: {filename}') weekly_results = pd.ExcelFile(path_to_masterfile).parse('Weekly Results') for column in RESULT_COLUMNS: weekly_results[column] = np.nan for i, (first_name_stub_size, last_name_stub_size, use_first_letter_of_third_word) in enumerate(get_name_iterator()): joining_column_name = f'first_{first_name_stub_size}_and_last_{last_name_stub_size}_with_{"initial" if use_first_letter_of_third_word else "no_initial"}' # create joining column weekly_results[joining_column_name] = \ weekly_results[list(weekly_results)[0]].apply( get_first_and_last_with_chars, first_name_stub_size=first_name_stub_size, last_name_stub_size=last_name_stub_size, use_first_letter_of_third_word=use_first_letter_of_third_word ) # do a left join and add a suffix for any repeated column name weekly_results = pd.merge( weekly_results, results_dataframe[RESULT_COLUMNS], how='outer' if joining_column_name == 'first_3_and_last_3_with_no_initial' else 'left', left_on=joining_column_name, right_on='Sorting Name', suffixes=['', f'_{joining_column_name}'] ) # we always fill the initial fill_column and we want to fillna with that value for column in RESULT_COLUMNS: fill_column = f'{column}_{joining_column_name}' weekly_results[column] = weekly_results[column].fillna(weekly_results[fill_column]) # remove any columns that were duplicated, including the correct one, as we just fillna'd above weekly_results.drop(columns=[x for x in list(weekly_results) if x.endswith(f'_{joining_column_name}')], inplace=True)
to 0" fat, select, raw 23174: [], # Beef, rib eye steak, boneless, lip off, separable lean only, trimmed to 0" fat, all grades, cooked, grilled 23175: [], # Beef, rib eye steak, boneless, lip off, separable lean only, trimmed to 0" fat, all grades, raw 23176: [], # Beef, rib eye steak, boneless, lip off, separable lean only, trimmed to 0" fat, choice, cooked, grilled 23177: [], # Beef, rib eye steak, boneless, lip off, separable lean only, trimmed to 0" fat, choice, raw 23178: [], # Beef, rib eye steak, boneless, lip off, separable lean only, trimmed to 0" fat, select, cooked, grilled 23179: [], # Beef, rib eye steak, boneless, lip off, separable lean only, trimmed to 0" fat, select, raw 23180: [], # Beef, rib, back ribs, bone-in, separable lean only, trimmed to 0" fat, all grades, cooked, braised 23181: [], # Beef, rib, back ribs, bone-in, separable lean only, trimmed to 0" fat, all grades, raw 23182: [], # Beef, rib, back ribs, bone-in, separable lean only, trimmed to 0" fat, choice, cooked, braised 23183: [], # Beef, rib, back ribs, bone-in, separable lean only, trimmed to 0" fat, choice, raw 23184: [], # Beef, rib, back ribs, bone-in, separable lean only, trimmed to 0" fat, select, cooked, braised 23185: [], # Beef, rib, back ribs, bone-in, separable lean only, trimmed to 0" fat, select, raw 23186: [], # Beef, rib eye steak, bone-in, lip-on, separable lean and fat, trimmed to 1/8" fat, choice, cooked, grilled 23187: [], # Beef, rib eye steak, bone-in, lip-on, separable lean and fat, trimmed to 1/8" fat, select, cooked, grilled 23188: [], # Beef, rib eye steak, bone-in, lip-on, separable lean and fat, trimmed to 1/8" fat, all grades, cooked, grilled 23189: [], # Beef, rib eye roast, bone-in, lip-on, separable lean and fat, trimmed to 1/8" fat, choice, cooked, roasted 23190: [], # Beef, rib eye roast, bone-in, lip-on, separable lean and fat, trimmed to 1/8" fat, select, cooked, roasted 23191: [], # Beef, rib eye roast, bone-in, lip-on, separable lean and fat, trimmed to 1/8" fat, all grades, cooked, roasted 23192: [], # Beef, rib eye steak/roast, bone-in, lip-on, separable lean and fat, trimmed to 1/8" fat, all grades, raw 23193: [], # Beef, rib eye steak/roast, bone-in, lip-on, separable lean and fat, trimmed to 1/8" fat, choice, raw 23194: [], # Beef, rib eye steak/roast, bone-in, lip-on, separable lean and fat, trimmed to 1/8" fat, select, raw 23195: [], # Beef, rib eye steak, boneless, lip-on, separable lean and fat, trimmed to 1/8" fat, choice, cooked, grilled 23196: [], # Beef, rib eye steak, boneless, lip-on, separable lean and fat, trimmed to 1/8" fat, select, cooked, grilled 23197: [], # Beef, rib eye steak, boneless, lip-on, separable lean and fat, trimmed to 1/8" fat, all grades, cooked, grilled 23198: [], # Beef, rib eye roast, boneless, lip-on, separable lean and fat, trimmed to 1/8" fat, all grades, cooked, roasted 23199: [], # Beef, rib eye roast, boneless, lip-on, separable lean and fat, trimmed to 1/8" fat, choice, cooked, roasted 23200: [], # Beef, rib eye roast, boneless, lip-on, separable lean and fat, trimmed to 1/8" fat, select, cooked, roasted 23201: [], # Beef, rib eye steak/roast, boneless, lip-on, separable lean and fat, trimmed to 1/8" fat, all grades, raw 23202: [], # Beef, rib eye steak/roast, boneless, lip-on, separable lean and fat, trimmed to 1/8" fat, choice, raw 23213: [], # Beef, rib eye steak/roast, boneless, lip-on, separable lean and fat, trimmed to 1/8" fat, select, raw 23214: [], # Beef, plate steak, boneless, inside skirt, separable lean and fat, trimmed to 0" fat, all grades, cooked, grilled 23215: [], # Beef, plate steak, boneless, inside skirt, separable lean and fat, trimmed to 0" fat, choice, cooked, grilled 23216: [], # Beef, plate steak, boneless, inside skirt, separable lean and fat, trimmed to 0" fat, select, cooked, grilled 23217: [], # Beef, plate steak, boneless, inside skirt, separable lean and fat, trimmed to 0" fat, all grades, raw 23218: [], # Beef, plate steak, boneless, inside skirt, separable lean and fat, trimmed to 0" fat, choice, raw 23219: [], # Beef, plate steak, boneless, inside skirt, separable lean and fat, trimmed to 0" fat, select, raw 23220: [], # Beef, ground, unspecified fat content, cooked 23221: [], # Beef, plate steak, boneless, outside skirt, separable lean and fat, trimmed to 0" fat, all grades, cooked, grilled 23222: [], # Beef, plate steak, boneless, outside skirt, separable lean and fat, trimmed to 0" fat, choice, cooked, grilled 23223: [], # Beef, plate steak, boneless, outside skirt, separable lean and fat, trimmed to 0" fat, select, cooked, grilled 23224: [], # Beef, plate steak, boneless, outside skirt, separable lean and fat, trimmed to 0" fat, all grades, raw 23225: [], # Beef, plate steak, boneless, outside skirt, separable lean and fat, trimmed to 0" fat, choice, raw 23226: [], # Beef, plate steak, boneless, outside skirt, separable lean and fat, trimmed to 0" fat, select, raw 23227: [], # Beef, rib eye steak, boneless, lip off, separable lean and fat, trimmed to 0" fat, all grades, cooked, grilled 23228: [], # Beef, rib eye steak, boneless, lip off, separable lean and fat, trimmed to 0" fat, choice, cooked, grilled 23229: [], # Beef, rib eye steak, boneless, lip off, separable lean and fat, trimmed to 0" fat, select, cooked, grilled 23230: [], # Beef, rib eye steak, boneless, lip off, separable lean and fat, trimmed to 0" fat, all grades, raw 23231: [], # Beef, rib eye steak, boneless, lip off, separable lean and fat, trimmed to 0" fat, choice, raw 23232: [], # Beef, rib eye steak, boneless, lip off, separable lean and fat, trimmed to 0" fat, select, raw 23233: [], # Beef, rib, back ribs, bone-in, separable lean and fat, trimmed to 0" fat, all grades, cooked, braised 23234: [], # Beef, rib, back ribs, bone-in, separable lean and fat, trimmed to 0" fat, choice, cooked, braised 23235: [], # Beef, rib, back ribs, bone-in, separable lean and fat, trimmed to 0" fat, select, cooked, braised 23236: [], # Beef, rib, back ribs, bone-in, separable lean and fat, trimmed to 0" fat, all grades, raw 23237: [], # Beef, rib, back ribs, bone-in, separable lean and fat, trimmed to 0" fat, choice, raw 23238: [], # Beef, rib, back ribs, bone-in, separable lean and fat, trimmed to 0" fat, select, raw 23239: [], # Beef, loin, top sirloin petite roast, boneless, separable lean only, trimmed to 0" fat, choice, cooked, roasted 23240: [], # Beef, loin, top sirloin petite roast/filet, boneless, separable lean only, trimmed to 0" fat, choice, raw 23241: [], # Beef, loin, top sirloin cap steak, boneless, separable lean only, trimmed to 1/8" fat, all grades, cooked, grilled 23242: [], # Beef, loin, top sirloin cap steak, boneless, separable lean only, trimmed to 1/8" fat, choice, cooked, grilled 23243: [], # Beef, loin, top sirloin cap steak, boneless, separable lean only, trimmed to 1/8" fat, select, cooked, grilled 23244: [], # Beef, loin, top sirloin cap steak, boneless, separable lean only, trimmed to 1/8" fat, all grades, raw 23245: [], # Beef, loin, top sirloin cap steak, boneless, separable lean only, trimmed to 1/8" fat, choice, raw 23246: [], # Beef, loin, top sirloin cap steak, boneless, separable lean only, trimmed to 1/8" fat, select, raw 23247: [], # Beef, top loin filet, boneless, separable lean only, trimmed to 1/8" fat, all grades, cooked, grilled 23248: [], # Beef, top loin filet, boneless, separable lean only, trimmed to 1/8" fat, choice, cooked, grilled 23249: [], # Beef, top loin filet, boneless, separable
import base64 import math import os import uuid from datetime import datetime from django.contrib.auth.models import User from django.core.validators import MaxValueValidator, MinValueValidator from django.db import models from django.db.models import Q """ Project model.""" class Project(models.Model): name = models.CharField(max_length=100) introduction = models.TextField(blank=True, default="") conclusion = models.TextField(blank=True, default="") scope = models.TextField(blank=True, default="") added = models.DateTimeField(default=datetime.now) start_date = models.DateField(null=True) end_date = models.DateField(null=True) archived = models.BooleanField(default=False) pentesters = models.ManyToManyField(User, related_name='%(class)s_pentesters') viewers = models.ManyToManyField(User, related_name='%(class)s_viewers') def __str__(self): return self.name def p1_hits(self): """Return all P1 hits for the project.""" return self.hits_by_severity(1) def p2_hits(self): """Return all P2 hits for the project.""" return self.hits_by_severity(2) def p3_hits(self): """Return all P3 hits for the project.""" return self.hits_by_severity(3) def p4_hits(self): """Return all P4 hits for the project.""" return self.hits_by_severity(4) def p5_hits(self): """Return all P5 hits for the project.""" return self.hits_by_severity(5) def hits_by_severity(self, severity): """Filter hits by severity for the project.""" hits = [] for assessment in self.assessment_set.all() : hits.extend(assessment.hits_by_severity(severity)) return hits def get_viewable(user): """Returns all viewable & non-archived projects""" return Project.objects.filter(Q(pentesters__in=[user]) | Q(viewers__in=[user])).filter(archived = False).distinct() def get_archived_viewable(user): """Returns all viewable & non-archived projects""" return Project.objects.filter(Q(pentesters__in=[user]) | Q(viewers__in=[user])).filter(archived = True).distinct() def is_user_can_view(self, user): """Verify if the user have read access for this project""" result = False if user in self.pentesters.all() or user in self.viewers.all() : result = True return result def is_user_can_edit(self, user): """Verify if the user have write access for this project""" result = False if user in self.pentesters.all() : result = True return result def is_user_can_create(self, user): """Verify if the user can create this project""" return True class Meta: ordering = ('name',) """Assesment model.""" class Assessment(models.Model): name = models.CharField(max_length=100) project = models.ForeignKey(Project, on_delete=models.CASCADE) added = models.DateTimeField(default=datetime.now) def __str__(self): return self.name def displayable_hits(self): return self.hit_set.filter(displayable = True) def p1_hits(self): """Return all P1 hits for the assessment.""" return self.hits_by_severity(1) def p2_hits(self): """Return all P2 hits for the assessment.""" return self.hits_by_severity(2) def p3_hits(self): """Return all P3 hits for the assessment.""" return self.hits_by_severity(3) def p4_hits(self): """Return all P4 hits for the assessment.""" return self.hits_by_severity(4) def p5_hits(self): """Return all P5 hits for the assessment.""" return self.hits_by_severity(5) def hits_by_severity(self, severity): """Filter hits by severity for the assessment.""" hits = [] for hit in self.hit_set.filter(severity = severity).all() : hits.append(hit) return hits def open_flags(self): """Return all open flags for the assessment.""" return self.flag_set.filter(done = False) def get_viewable(user): """Returns all viewable assessments""" return Assessment.objects.filter(project__in=Project.get_viewable(user)) def is_user_can_view(self, user): """Verify if the user have read access for this assessment""" return self.project.is_user_can_view(user) def is_user_can_edit(self, user): """Verify if the user have write access for this assessment""" return self.project.is_user_can_edit(user) def is_user_can_create(self, user): """Verify if the user can create this assessment""" return self.project.is_user_can_edit(user) class Meta: ordering = ('name',) """Label model.""" class Label(models.Model): title = models.CharField(max_length=200) color = models.CharField(max_length=7) deprecated = models.BooleanField(default=False) def __str__(self): return self.title def get_viewable(user): """Returns all viewable labels""" return Label.objects.all() def get_not_deprecated(user): """Returns not deprecated labels""" return Label.objects.filter(deprecated=False) def is_user_can_view(self, user): """Verify if the user have read access for this label""" return True def is_user_can_edit(self, user): """Verify if the user have write access for this label""" return user.is_staff def is_user_can_create(self, user): """Verify if the user can create this label""" return user.is_staff class Meta: ordering = ('pk',) """CvssV3.1 model""" class Cvss(models.Model): NALP_CHOICES = ( ('N', 'Network'), ('A', 'Adjacent'), ('L', 'Local'), ('P', 'Physical') ) LH_CHOICES = ( ('L', 'Low'), ('H', 'High') ) NLH_CHOICES = ( ('N', 'None'), ('L', 'Low'), ('H', 'High') ) NR_CHOICES = ( ('N', 'None'), ('R', 'Required') ) UC_CHOICES = ( ('U', 'Unchanged'), ('C', 'Changed') ) """CVSS String values""" attack_vector = models.CharField(max_length=1,choices=NALP_CHOICES) attack_complexity = models.CharField(max_length=1,choices=LH_CHOICES) privilege_required = models.CharField(max_length=1,choices=NLH_CHOICES) user_interaction = models.CharField(max_length=1,choices=NR_CHOICES) scope = models.CharField(max_length=1,choices=UC_CHOICES) confidentiality = models.CharField(max_length=1,choices=NLH_CHOICES) integrity = models.CharField(max_length=1,choices=NLH_CHOICES) availability = models.CharField(max_length=1,choices=NLH_CHOICES) """Values for usage""" decimal_value = models.DecimalField(max_digits=3, decimal_places=1, default=-1.0) def __round_up(self, n, decimals=0): multiplier = 10 ** decimals return math.ceil(n * multiplier) / multiplier def __get_cia_value(self,value) : if value == "H": return 0.56 elif value == "L": return 0.22 else : return 0.0 def __get_confidentiality_value(self) : return self.__get_cia_value(self.confidentiality) def __get_integrity_value(self) : return self.__get_cia_value(self.integrity) def __get_availability_value(self) : return self.__get_cia_value(self.availability) def __get_attack_vector_value(self) : if self.attack_vector == "N": return 0.85 elif self.attack_vector == "A": return 0.62 elif self.attack_vector == "L": return 0.55 else : return 0.2 def __get_attack_complexity_value(self) : if self.attack_complexity == "L" : return 0.77 else : return 0.44 def __get_privilege_required_value(self) : if self.privilege_required == "N" : return 0.85 elif self.privilege_required == "L" : if self.scope == "U" : return 0.62 else : return 0.68 else : if self.scope == "U" : return 0.27 else : return 0.50 def __get_user_interaction_value(self) : if self.user_interaction == "N" : return 0.85 else : return 0.62 def __get_exploitability(self) : return 8.22 * self.__get_attack_vector_value() * self.__get_attack_complexity_value() * self.__get_privilege_required_value() * self.__get_user_interaction_value() def __get_isc_base(self) : return 1.0 - ((1.0 - self.__get_confidentiality_value()) * (1.0 - self.__get_integrity_value()) * (1.0 - self.__get_availability_value())) def __get_isc(self, isc_base) : if self.scope == "U" : return 6.42 * isc_base else : return 7.52 * (isc_base - 0.029) - 3.25 * (isc_base - 0.02)**15 def compute_cvss_value(self) : isc_base = self.__get_isc_base() isc = self.__get_isc(isc_base) exploitability = self.__get_exploitability() if isc > 0.0 : exploitability = self.__get_exploitability() if self.scope == "U" : self.decimal_value = self.__round_up(min(isc + exploitability, 10.0), 1) else : self.decimal_value = self.__round_up(min(1.08 * (isc + exploitability), 10.0), 1) else : self.decimal_value = 0.0 class Meta: ordering = ('decimal_value',) """Hit model.""" class Hit(models.Model): title = models.CharField(max_length=200) body = models.TextField(blank=True, default="") remediation = models.TextField(blank=True, default="") asset = models.CharField(blank=True, max_length=256, default="") assessment = models.ForeignKey(Assessment, null=True, on_delete=models.CASCADE) added = models.DateTimeField(default=datetime.now) severity = models.IntegerField(default=5, validators=[MinValueValidator(0), MaxValueValidator(5)]) fix_complexity = models.IntegerField(default=0, validators=[MinValueValidator(0), MaxValueValidator(3)]) displayable = models.BooleanField(default=True) cvss = models.OneToOneField(Cvss, null=True, on_delete=models.SET_NULL) labels = models.ManyToManyField(Label) def __str__(self): return self.title def get_viewable(user): """Returns all viewable hits""" return Hit.objects.filter(assessment__in=Assessment.get_viewable(user)) def is_user_can_view(self, user): """Verify if the user have read access for this hit""" return self.assessment.is_user_can_view(user) def is_user_can_edit(self, user): """Verify if the user have write access for this hit""" return self.assessment.is_user_can_edit(user) def is_user_can_create(self, user): """Verify if the user can create this hit""" return self.assessment.is_user_can_edit(user) def get_unique_id(self): """Return a pretty value of the ID, ex: PTART-2022-<id>""" return "PTART-" + str(self.added.year) + "-" + str(self.id).zfill(5) def get_fix_complexity_str(self) : value = "N/D" if self.fix_complexity == 1 : value = "Hard" elif self.fix_complexity == 2 : value = "Moderate" elif self.fix_complexity == 3 : value = "Easy" return value def get_cvss_value(self): """Return the decimal value of the cvss""" if self.cvss is None : return "---" else : return self.cvss.decimal_value def get_cvss_string(self): """Return the string value of the cvss""" if self.cvss is None : return "" else : return "CVSS:3.1/AV:" + self.cvss.attack_vector + "/AC:" + self.cvss.attack_complexity + "/PR:" + self.cvss.privilege_required + "/UI:" + self.cvss.user_interaction + "/S:" + self.cvss.scope + "/C:" + self.cvss.confidentiality + "/I:" + self.cvss.integrity + "/A:" + self.cvss.availability def delete(self, *args, **kwargs): if self.cvss: self.cvss.delete() return super(self.__class__, self).delete(*args, **kwargs) class Meta: ordering = ('severity', '-cvss', 'title',) """Comment model.""" class Comment(models.Model): hit = models.ForeignKey(Hit, null=True, on_delete=models.CASCADE) text = models.CharField(max_length=1000, default="") author = models.ForeignKey(User, null=True, on_delete=models.PROTECT) added = models.DateTimeField(default=datetime.now) def get_viewable(user): """Returns all viewable comments""" return Comment.objects.filter(hit__in=Hit.get_viewable(user)) def is_user_can_view(self, user): """Verify if the user have read access for this comment""" return self.hit.is_user_can_view(user) def is_user_can_edit(self, user): """Verify if the user have write access for this comment""" return self.hit.is_user_can_edit(user) def is_user_can_create(self, user): """Verify if the user can create this comment""" return self.hit.is_user_can_edit(user) def __str__(self): return self.text class Meta: ordering = ('added',) """Screenshot model.""" class Screenshot(models.Model): upload_folder = 'screenshots' hit = models.ForeignKey(Hit, null=True, on_delete=models.CASCADE) screenshot = models.ImageField(upload_to=upload_folder) caption = models.CharField(blank=True, max_length=256, default="") def get_data(self): """Get screenshot data in Base64""" encoded_string = '' extension = os.path.splitext(self.screenshot.url)[1] with open(self.screenshot.url, 'rb') as img_f: encoded_string = base64.b64encode(img_f.read()) return 'data:image/%s;base64,%s' % (extension,encoded_string.decode("utf-8")) def get_raw_data(self): """Get screenshot data in binary format""" result = '' with open(self.screenshot.url, 'rb') as img_f: result = img_f.read() return result def delete(self): """Delete file related to the screenshot""" os.remove(self.screenshot.url) super(Screenshot, self).delete() def get_viewable(user): """Returns all viewable screenshots""" return Screenshot.objects.filter(hit__in=Hit.get_viewable(user)) def is_user_can_view(self, user): """Verify if the user have read access for this screenshot""" return self.hit.is_user_can_view(user) def is_user_can_edit(self, user): """Verify if the user have write access for this screenshot""" return self.hit.is_user_can_edit(user) def is_user_can_create(self, user): """Verify if the user can create
<reponame>marvintau/congram<gh_stars>0 # -*- encoding: utf-8 -*- import os import sys import itertools import time import numpy as np def flatten(l): if l == []: return [] elif not isinstance(l[0], list): return l else: return list(itertools.chain.from_iterable(l)) def group_by(lis, key): groups = itertools.groupby(sorted(lis, key=key), key) return [list(dat) for _, dat in groups] color_func = { "BlueGreenYellow" : lambda (x):Color( int((0.14628343 - 0.61295736*x + 1.36894882*x*x)*127), int((0.01872288 + 1.65862067*x - 0.8011199 *x*x)*127), int((0.42712882 + 0.5047786 *x - 0.61649645*x*x)*127) ), "Sandy": lambda x:Color( int(( 0.60107395 + 1.63435499*x - 1.9800948 *x*x)*127), int(( 0.25372145 + 1.98482627*x - 1.93612357*x*x)*127), int(( 0.20537569 + 0.42332151*x - 0.47753999*x*x)*127) ), "Plum" : lambda x:Color( int((0.136180 + 0.775009*x + -0.133166*x*x)*127), int((0.036831 + 0.040629*x + 0.781372*x*x)*127), int((-0.087716 + 1.345565*x + -0.743961*x*x)*127) ) } def full_color(color_scheme_name, val, minval, maxval): normed_val = (val-minval)/(maxval-minval) color = color_func[color_scheme_name](normed_val) return FullColor(color + 127, color) def ranged_color(color_func, val, minval, maxval): return color_func((val-minval)/(maxval-minval)) class Pos: def __init__(self, row, col): self.row = row self.col = col def __add__(self, pos): return Pos(self.row + pos.row, self.col + pos.col) def __mul__(self, pos_time): if type(pos_time) is tuple: return Pos(self.row * pos_time[0], self.col * pos_time[1]) else: return Pos(int(self.row * pos_time.row), int(self.col * pos_time.col)) def __str__(self): return "{%d, %d}" % (self.row, self.col) def t(self): return Pos(self.col, self.row) def corners(self): return [Pos(0, 0), Pos(self.row, 0), Pos(self.row, self.col), Pos(0, self.col)] def center(self): return Pos(int(round(self.row*0.5)), int(round(self.col*0.5))) # check if pos is on bottom-right side of self. Used for checking a rect is # enclosed by another. def deeper_than(self, pos): return self.row >= pos.row and self.col >= pos.col def shallower_than(self, pos): return self.row <= pos.row and self.col <= pos.col class Color: def __init__(self, r, g, b): self.r = int(r) self.g = int(g) self.b = int(b) def __add__(self, inc): if type(inc) == inc and len(inc) == 3: return Color(self.r + inc[0], self.g + inc[1], self.b + inc[2]) elif type(inc) == Color: return Color(self.r + inc.r, self.g + inc.g, self.b + inc.b) elif type(inc) == int: return Color(self.r + inc, self.g + inc, self.b + inc) else: raise TypeError("operand type must be either 3-tuple or Color") def __mul__(self, inc): if type(inc) == tuple and len(inc) == 3: return Color(self.r * inc[0], self.g * inc[1], self.b * inc[2]) elif type(inc) == int: return Color(self.r * inc, self.g * inc, self.b * inc) else: raise TypeError("operand type must be either 3-tuple or int") def __str__(self): return "{%d, %d, %d}" % (self.r, self.g, self.b) class FullColor: def __init__(self, fore=None, back=None): if fore is None: self.fore = Color(0, 0, 0) elif type(fore) == tuple and len(fore) == 3: self.fore = Color(*fore) else: self.fore = fore if back is None: self.back = Color(0, 0, 0) elif type(back) == tuple and len(back) == 3: self.back = Color(*back) else: self.back = back def __add__(self, inc): if type(inc) == tuple: if len(inc) == 2: return FullColor(self.fore + inc[0], self.back + inc[1]) elif len(inc) == 3: return FullColor(self.fore + inc, self.back + inc) else: raise TypeError("operand type must be tuple") elif type(inc) is int: return FullColor(self.fore + inc, self.back + inc) else: raise TypeError("operand type must be tuple") def __mul__(self, inc): if type(inc) == tuple: if len(inc) == 2: return FullColor(self.fore * inc[0], self.back * inc[1]) elif len(inc) == 3: return FullColor(self.fore * inc, self.back * inc) else: raise TypeError("operand type must be tuple") elif type(inc) is int: return FullColor(self.fore * inc, self.back * inc) else: raise TypeError("operand type must be tuple") def __str__(self): return str(self.fore) + " " + str(self.back) class Stroke: def __init__(self, pos, text, color): self.pos = pos self.color = color self.text = text def trunc(self, num, is_from_left=True): # 当从左边trunc时,text删去开头的num个字符,同时pos向右推进num个字符 # ;当从右边trunc时,只需要去掉text末尾的num个字符,不需要改起始位置 if is_from_left: trunced = len(self.text) - num return Stroke(self.pos + Pos(0, trunced), self.text[trunced:], self.color) else: return Stroke(self.pos, self.text[:num], self.color) def shaded_by(self, next): # 讨论next(另一个Stroke)对自己遮挡的情况,先获取两个对象的左右 # 边界。首先考虑互不遮挡的情况,即next右边界在self左边界的左边,或者next # 左边界在self右边界的右边。若不是这种情况,则当next左边界在self左边界右边 self_l = self.pos.col self_r = self.pos.col + len(self.text) next_l = next.pos.col next_r = next.pos.col + len(next.text) l_shaded = next_l <= self_l r_shaded = next_r >= self_r dodged = self.pos.row != next.pos.row or next_r < self_l or next_l > self_r if dodged: # dodged return [self] elif l_shaded and r_shaded: return [] elif not (l_shaded or r_shaded): return [self.trunc(next_l - self_l, is_from_left=False), self.trunc(self_r - next_r, is_from_left=True)] else: if l_shaded: return [self.trunc(self_r - next_r, is_from_left=True)] if r_shaded: return [self.trunc(next_l - self_l, is_from_left=False)] def __str__(self): COL_FORE = 38 COL_BACK = 48 COL_RESET = '\x01\x1b[0m\x02' COL_SEQ = '\x01\x1b[{z};2;{r};{g};{b}m\x02' c = self.color fore = COL_SEQ.format(z=COL_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b) back = COL_SEQ.format(z=COL_BACK, r=c.back.r, g=c.back.g, b=c.back.b) return fore + back + self.text + COL_RESET class Rect: render_time = 0 render_count = 0 def __init__(self, pos=Pos(0, 0), size=Pos(10, 20), text="text", color=FullColor((240, 240, 240), (20, 20, 20))): self.pos = pos self.size = size self.text = text self.color = color self.children = [] def add_child(self, child): self_bottom_right = self.pos + self.size child_bottom_right = child.pos + child.size if child.pos.deeper_than(self.pos) and\ child_bottom_right.shallower_than(self_bottom_right): self.children.append(child) ### Override this for more effective rendering def render_rect(self, pos): strokes = [] # 以下是当前Rect生成的Stroke. for line in range(self.size.row): if line == int(round(self.size.row*0.5)) - 1: stroke_text = self.text.center(self.size.col, " ") else: stroke_text = "".ljust(self.size.col, " ") stroke_pos = self.pos + pos + Pos(line, 0) strokes.append(Stroke(stroke_pos, stroke_text, self.color)) return strokes def render(self, pos): strokes = self.render_rect(pos) for child in self.children: strokes.extend(child.render(self.pos + pos)) return strokes def draw(self): strokes = self.render(Pos(0, 0)) strokes = group_by(strokes, lambda rs:rs.pos.row) for line in strokes: curr_line = [line[0]] for next_stroke in line[1:]: curr_line = flatten([curr.shaded_by(next_stroke) for curr in curr_line]) curr_line.append(next_stroke) for rs in sorted(curr_line, key=lambda rs:rs.pos.col): sys.stdout.write(unicode(rs)) sys.stdout.write('\n') sys.stdout.flush() class Canvas(Rect): def __init__(self): rows, cols = os.popen('stty size', 'r').read().split() size = Pos(int(rows)-1, int(cols)) color = FullColor() Rect.__init__(self, Pos(0, 0), size, "", color) self.cursor = Pos(0, 0) class Grid(Rect): def __init__(self, pos=Pos(0, 0), table=[[]], grid_size=Pos(3, 3), back_color = FullColor((255, 255, 255), (127, 127, 127))): max_cell_size = 0 for line in table: for cell, _ in line: if max_cell_size < len(cell): max_cell_size = len(cell) grid_width = max_cell_size + 2 if max_cell_size + 2 > grid_size.col else grid_size.col grid_size = Pos(grid_size.row, max_cell_size + 2) table_size = Pos(len(table), len(table[0])) * grid_size Rect.__init__(self, Pos(0, 0), table_size, "", back_color) for row, line in enumerate(table): for col, (cell, color) in enumerate(line): cell_pos = grid_size * Pos(row, col) self.add_child(Rect(cell_pos, grid_size, cell, color)) class Heatmap(Grid): def __init__(self, pos=Pos(0, 0), table=[[]], grid_size=Pos(3, 3), color_scheme="Sandy", back_color = FullColor()): minval = min([min(l) for l in table]) maxval = max([max(l) for l in table]) def table_item(cell, min_val, max_val, color_scheme): return ("%1.2f" % c, full_color(color_scheme, c, minval, maxval)) table = [[ table_item(c, minval, maxval, color_scheme) for c in line] for line in table] Grid.__init__(self, Pos(0, 0), table, grid_size) class Frame(Rect): def __init__(self, pos=Pos(0, 0), rect=Rect(), sides = ('left', 'right', 'top', 'bottom'), ticks = ('left', 'bottom'), frame_margin = Pos(2, 4), tick_rep = Pos(3, 6), tick_off = Pos(1, 5), corner_style = 'round' ): Rect.__init__(self, text="", size=rect.size + frame_margin) self.frame_margin = frame_margin self.sides = sides self.ticks = ticks rect.pos = rect.pos + frame_margin * Pos(0.5, 0.5) self.add_child(rect) self.corner_style = corner_style self.tick_rep = tick_rep self.tick_off = tick_off def render_rect(self, pos): corner_styles = { 'rect' : [u"┌", u"└", u"┘", u"┐"], 'round': [u"╭", u"╰", u"╯", u"╮"] } corners = corner_styles[self.corner_style] HORI_BAR = u"─" VERT_BAR = u"│" hori_tick = u"┴" vert_tick = u"├" size = self.size + Pos(-1, -1) pos = self.pos + pos margin = self.frame_margin * Pos(0.5, 0.5) hori_tick_pos = [p for p in range(size.col) if (p - self.tick_off.col) % self.tick_rep.col == 0] print hori_tick_pos strokes = [] ### fill up the background for line in range(0, size.row+1): strokes.append(Stroke(pos+Pos(line, 0), " "*size.col, self.color)) ### top and bottom axes if 'bottom' in self.sides: bottom_str = HORI_BAR * size.col if 'bottom' in self.ticks: for i in hori_tick_pos: bottom_str = bottom_str[:i] + hori_tick + bottom_str[i+1:] strokes.append(Stroke(pos+Pos(size.row,0), bottom_str, self.color)) if 'top' in self.sides: strokes.append(Stroke(pos, HORI_BAR * size.col, self.color)) ### left and right axes if 'left' in self.sides: for line in range(1,size.row): strokes.append(Stroke(Pos(line, 0), VERT_BAR + (" " * (margin.col-1)), self.color)) if 'right' in self.sides: for line in range(1,size.row): strokes.append(Stroke(Pos(line, size.col - margin.col+1), (" " * (margin.col-1)) + VERT_BAR, self.color)) ### corners corner_cond = [('left','top'),('left', 'bottom'), ('right','bottom'), ('right', 'top')] for
- 29: iIii1I11I1II1 - OoO0O00 + I1IiiI % iIii1I11I1II1 % OOooOOo if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi if 69 - 69: OoOoOO00 if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111 def lisp_get_local_interfaces ( ) : for Ooooo in netifaces . interfaces ( ) : iIiiiIiIi = lisp_interface ( Ooooo ) iIiiiIiIi . add_interface ( ) if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00 return if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o if 88 - 88: o0oOOo0O0Ooo if 1 - 1: OoooooooOO if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo if 40 - 40: i11iIiiIii . iIii1I11I1II1 if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00 if 3 - 3: OoooooooOO def lisp_get_loopback_address ( ) : for I1Iii1I in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] : if ( I1Iii1I [ "peer" ] == "127.0.0.1" ) : continue return ( I1Iii1I [ "peer" ] ) if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o return ( None ) if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111 if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1 if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0 if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII if 27 - 27: OOooOOo def lisp_is_mac_string ( mac_str ) : o0O0oO0 = mac_str . split ( "/" ) if ( len ( o0O0oO0 ) == 2 ) : mac_str = o0O0oO0 [ 0 ] return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 ) if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo if 74 - 74: oO0o if 34 - 34: iII111i if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I if 43 - 43: OoO0O00 % OoO0O00 if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO def lisp_get_local_macs ( ) : for Ooooo in netifaces . interfaces ( ) : if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i if 45 - 45: Ii1I - OOooOOo if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi i1i11ii1Ii = Ooooo . replace ( ":" , "" ) i1i11ii1Ii = Ooooo . replace ( "-" , "" ) if ( i1i11ii1Ii . isalnum ( ) == False ) : continue if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1 if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i if 36 - 36: I11i % OOooOOo if 72 - 72: I1IiiI / iII111i - O0 + I11i if 83 - 83: O0 try : oOOOOOo = netifaces . ifaddresses ( Ooooo ) except : continue if 50 - 50: I1Ii111 + ooOoO0o + iII111i if ( oOOOOOo . has_key ( netifaces . AF_LINK ) == False ) : continue o0O0oO0 = oOOOOOo [ netifaces . AF_LINK ] [ 0 ] [ "addr" ] o0O0oO0 = o0O0oO0 . replace ( ":" , "" ) if 15 - 15: I11i if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o if 41 - 41: I1ii11iIi11i if 5 - 5: Oo0Ooo if 100 - 100: Ii1I + iIii1I11I1II1 if ( len ( o0O0oO0 ) < 12 ) : continue if 59 - 59: IiII if ( lisp_mymacs . has_key ( o0O0oO0 ) == False ) : lisp_mymacs [ o0O0oO0 ] = [ ] lisp_mymacs [ o0O0oO0 ] . append ( Ooooo ) if 89 - 89: OoOoOO00 % iIii1I11I1II1 if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00 lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) ) return if 45 - 45: I1IiiI * OOooOOo % OoO0O00 if 24 - 24: ooOoO0o - I11i * oO0o if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii if 79 - 79: IiII % OoO0O00 if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII if 32 - 32: O0 . OoooooooOO if 15 - 15: I1IiiI . OoO0O00 def lisp_get_local_rloc ( ) : IiiIi = commands . getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" ) if ( IiiIi == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) ) if 42 - 42: iII111i + iIii1I11I1II1 if 21 - 21: OoOoOO00 - Oo0Ooo % O0 . OoO0O00 + OoOoOO00 if 41 - 41: II111iiii * ooOoO0o if 68 - 68: Ii1I - I1IiiI IiiIi = IiiIi . split ( "\n" ) [ 0 ] Ooooo = IiiIi . split ( ) [ - 1 ] if 41 - 41: oO0o I1Iii1I = "" I11II1 = lisp_is_macos ( ) if ( I11II1 ) : IiiIi = commands . getoutput ( "ifconfig {} | egrep 'inet '" . format ( Ooooo ) ) if ( IiiIi == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) ) else : i1i1i1I = 'ip addr show | egrep "inet " | egrep "{}"' . format ( Ooooo ) IiiIi = commands . getoutput ( i1i1i1I ) if ( IiiIi == "" ) : i1i1i1I = 'ip addr show | egrep "inet " | egrep "global lo"' IiiIi = commands . getoutput ( i1i1i1I ) if 60 - 60: Oo0Ooo + I1ii11iIi11i - i11iIiiIii - I1ii11iIi11i % Oo0Ooo / iIii1I11I1II1 if ( IiiIi == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) ) if 14 - 14: OoO0O00 / ooOoO0o - OOooOOo / I1IiiI if 27 - 27: i1IIi + I1IiiI * I1ii11iIi11i + OOooOOo . oO0o if 1 - 1: OOooOOo * IiII + I11i if 77 - 77: oO0o % i11iIiiIii . OOooOOo % OOooOOo if 36 - 36: Oo0Ooo % Ii1I / i11iIiiIii % I1Ii111 + OoO0O00 if 23 - 23: II111iiii I1Iii1I = "" IiiIi = IiiIi . split ( "\n" ) if 93 - 93: oO0o . I11i / i1IIi for i11ii in IiiIi : OOOO0o = i11ii . split ( ) [ 1 ] if ( I11II1 == False ) : OOOO0o = OOOO0o . split ( "/" ) [ 0 ] oOOOOO0Ooooo = lisp_address ( LISP_AFI_IPV4 , OOOO0o , 32 , 0 ) return ( oOOOOO0Ooooo ) if 57 - 57: Ii1I - OoooooooOO return ( lisp_address ( LISP_AFI_IPV4 , I1Iii1I , 32 , 0 ) ) if 68 - 68: o0oOOo0O0Ooo % I1ii11iIi11i / I1Ii111 + I1Ii111 - I1Ii111 . OoO0O00 if 100 - 100: OoOoOO00 % Oo0Ooo if 76 - 76: II111iiii / OoO0O00 + OoooooooOO . I1ii11iIi11i . I11i . ooOoO0o if 43 - 43: i1IIi if 17 - 17: O0 - OoOoOO00 if 81 - 81: I1IiiI - iIii1I11I1II1 / I1IiiI / O0 if 34 - 34: Ii1I * Ii1I - I1ii11iIi11i - O0 . i11iIiiIii if
<filename>data_editor_no_chars/snippest.py #".QFrame{border: 1px solid black;}" # import sys # from PyQt5 import QtWidgets # app = QtWidgets.QApplication(sys.argv) # screen = app.primaryScreen() # print('Screen: %s' % screen.name()) # size = screen.size() # print('Size: %d x %d' % (size.width(), size.height())) # rect = screen.availableGeometry() # print('Available: %d x %d' % (rect.width(), rect.height())) #https://stackoverflow.com/questions/39488901/change-qsortfilterproxymodel-behaviour-for-multiple-column-filtering # STEPS # 0. # python -m PyQt5.uic.pyuic -x data_editor_no_chars.ui -o aquarium.py # # 1. Copy # snippest def changeState(self, state): # snippest import urllib.request # # 2. Connect # self.actionSave.triggered.connect(MainWindow.saveAll) # self.actionSave.triggered.connect(self.saveAll) # # 3. QTableView: QtWidgets.QTableView(self.mainWidgetContents) # self.productData = QtWidgets.QTableView(self.mainWidgetContents) # ProductDataTableView(self.mainWidgetContents) import urllib.request import shutil import sys import os from PyQt5.QtSql import * def changeState(self, state): # self.metaData.hide() # Remove for no_chars version self.productData.hide() if state == 0: self.state = 1 self.detailsArea.raise_() else: if state == 1: self.productData.show() #self.mainArea.setGeometry(QtCore.QRect(260, 10, 574, 651)) #self.mainWidgetContents.setGeometry(QtCore.QRect(0, 0, 572, 649)) elif state == 2: self.metaData.show() self.categoryArea.raise_() self.state = state def switchDataViewInit(self): self.categories.selectionModel().clearSelection() self.changeState(0) def switchDataViewCat(self, cat): self.changeState(1) if not(hasattr(self, "productsModel")): srcModel = QSqlTableModel(None, self.db) proxyModel = QtCore.QSortFilterProxyModel(None) #self.productsModel.setTable("products") #self.productsModel.select() #self.productsModel.setEditStrategy(QSqlTableModel.OnManualSubmit) srcModel.setTable("products") srcModel.select() srcModel.setEditStrategy(QSqlTableModel.OnManualSubmit) proxyModel.setSourceModel(srcModel) self.productsModel = proxyModel self.productData.setModel(self.productsModel) self.productData.selectionModel().selectionChanged.connect(self.changeProductData) self.productData.setItemDelegateForColumn(0, ReadOnlyDelegate(self.productData)) #for i in [1, 2] + list(range(7, 13)): for i in [1, 2] + list(range(7, 12)): self.productData.hideColumn(i) self.productsModel.setFilterKeyColumn(1) #self.productsModel.sort(2, QtCore.Qt.DescendingOrder) #self.productsModel.sort(0, QtCore.Qt.DescendingOrder) # QtCore.Qt.AscendingOrder #self.productsModel.sort(0, QtCore.Qt.AscendingOrder) #self.productData.setItemDelegateForColumn(0, ReadOnlyDelegate(self.productData)) headers = {0: "Mã số", 3: "Tên", 4: "Giá 0", 5: "Giá 1", 6: "Liên kết ảnh", 12: "Liên kết ảnh tóm tắt"} for key in headers: self.productsModel.setHeaderData(key, QtCore.Qt.Horizontal, headers[key]) sizes = {0: 60, 3: 120, 4: 60, 5: 60, 6: 120, 12:120} for key in sizes: self.productData.setColumnWidth(key, sizes[key]) #self.productsModel.setFilterRegExp(str(cat)) self.productsModel.setFilterRegExp("^" + str(cat) + "$") if self.productsModel.rowCount() == 0: srcModel = self.productsModel.sourceModel() rowCount = srcModel.rowCount() srcModel.insertRows(rowCount, 1) index = srcModel.index(rowCount, 1) srcModel.setData(index, cat, QtCore.Qt.EditRole) # self.productsModel.insertRows(self.productsModel.rowCount(), 1) # index = self.productsModel.index(self.productsModel.rowCount(), 1) # self.productsModel.setData(index, cat, QtCore.Qt.EditRole) #self.productsModel.setSourceModel(self.productsModel2) # totalShow = 0 # for i in range(0, self.productsModel.rowCount()): # if (self.productsModel.data(self.productsModel.index(i, 1))) == cat: # self.productData.showRow(i) # totalShow += 1 # else: # self.productData.hideRow(i) # if totalShow == 0: # rowId = self.productsModel.rowCount() # self.productsModel.insertRows(rowId, 1) # index = self.productsModel.index(rowId, 1) # self.productsModel.setData(index, cat, QtCore.Qt.EditRole) # print("======Themtai======" + str(rowId)) self.productData.defaultValues = {1: cat} def showCropImageDialog(self, o): if self.targetProduct != None: index = self.targetProduct.siblingAtColumn(8) model = self.productsModel scale = model.data(index) div = 1 for i in range(0, scale): div /= 2.0 try: cropXidx = self.targetProduct.siblingAtColumn(9) posX = self.productData.model().data(cropXidx) cropYidx = self.targetProduct.siblingAtColumn(10) posY = self.productData.model().data(cropYidx) posX = int(posX / 2) posY = int(posY / 2) except: posX = 0 posY = 0 self.cropImgDialog.openDialog(int(1800 * div), int(1200 * div), posX, posY) print(o) def setupUiEx(self, MainWindow): import tkinter as tk self.tkClipboard = tk.Tk() # Set up on description change self.description.setAcceptRichText(False) self.description.textChanged.connect(self.onDescriptionTextChanged) #@TODO: move initial to setupUiEx #@TODO: pass two param to __init__: self and MainWindow self.cropImgDialog = CropImgDialog(MainWindow, self) self.dropBriefImg.clicked.connect(self.showCropImageDialog) self.tmpImgs = {"cur": 1} if os.path.exists("./tmp"): shutil.rmtree("./tmp") os.makedirs("./tmp") self.curMetaDataRow = None databaseFileName = "simple-e-commerce.db" if not os.path.exists(databaseFileName): print("".join(["Không thấy tập tin ", databaseFileName, "."])) exit(0) catFolder = os.getcwd() + '/cat' if not os.path.exists(catFolder): print("Không thấy thư mục 'cat'.") exit(0) # Decorate MainWindow MainWindow.showMaximized() # Setup categories GUI self.catModel = QtWidgets.QFileSystemModel() self.catModel.setRootPath(catFolder) self.categories.setModel(self.catModel) self.categories.setRootIndex(self.catModel.index(catFolder)) #self.categories.collapsed.connect(self.oh) ############################# #self.categories.expanded.connect(self.oh) ############################# self.categories.selectionModel().selectionChanged.connect(self.changeCat) ##### for i in range(1, 4): self.categories.setColumnHidden(i, True) # # Setup list of values GUI # Remove for no_chars version # Setup main GUI - characteristics view self.db = QSqlDatabase.addDatabase("QSQLITE") self.db.setDatabaseName(databaseFileName) self.db.open() #self.switchCharacteristicsView() self.switchDataViewInit() # Setup Radio button self.targetProduct = None self.activeImgLov = [self.prodImg, self.catImg] self.briefImgScaleLov = [self.briefFull, self.briefHalf, self.briefQuarter, self.briefMin] self.prodImg.toggled.connect(lambda:self.onActiveImgChange(self.prodImg)) self.catImg.toggled.connect(lambda:self.onActiveImgChange(self.catImg)) self.briefFull.toggled.connect(lambda:self.onBriefImgScaleChange(self.briefFull)) self.briefHalf.toggled.connect(lambda:self.onBriefImgScaleChange(self.briefHalf)) self.briefQuarter.toggled.connect(lambda:self.onBriefImgScaleChange(self.briefQuarter)) self.briefMin.toggled.connect(lambda:self.onBriefImgScaleChange(self.briefMin)) def onActiveImgChange(self, radio): if radio.isChecked(): try: activeImgVal = self.activeImgLov.index(radio) if self.targetProduct != None: index = self.targetProduct.siblingAtColumn(7) model = self.productsModel model.setData(index, activeImgVal, QtCore.Qt.EditRole) except: pass def onBriefImgScaleChange(self, radio): if radio.isChecked(): try: briefImgScaleVal = self.briefImgScaleLov.index(radio) if self.targetProduct != None: index = self.targetProduct.siblingAtColumn(8) model = self.productsModel model.setData(index, briefImgScaleVal, QtCore.Qt.EditRole) except Exception: pass def saveAll(self): if hasattr(self, "productsModel"): if self.productsModel.sourceModel().submitAll(): print("self.productsModel.sourceModel().submitAll(): SUCCESS") else: print("self.productsModel.sourceModel().submitAll(): FAILURE") delattr(self, "productsModel") #@TODO prevent all other submitAll is commited def changeCat(self, cur, pre): indexes = cur.indexes() if len(indexes) > 0: fileName = self.catModel.fileName(indexes[0]) cat = fileName.split("_") if len(cat) > 0: cat = int(cat[0]) if self.state == 1: self.switchDataViewCat(cat) elif self.state == 2: #self.switchMetaDataViewCat(cat) pass def changeMetaData(self, cur, pre): indexes = cur.indexes() if len(indexes) == 1: self.curMetaDataRow = indexes[0] #index = self.curMetaDataRow.siblingAtColumn(2) #dataType = self.metaData.model().data(index, QtCore.Qt.DisplayRole) #if dataType != "Danh mục": #self.curMetaDataRow = None def showProductDetail(self, cur): self.targetProduct = cur # show image try: imgUrl = self.productsModel.data(cur.siblingAtColumn(6)) print(imgUrl) #if not(hasattr(self.tmpImgs, imgUrl)): if not(imgUrl in self.tmpImgs): print("*") response = urllib.request.urlopen(imgUrl) print("*") data = response.read() print("*") fn = "./tmp/" + str(self.tmpImgs["cur"]) + ".jpg" self.tmpImgs["cur"] += 1 self.tmpImgs[imgUrl] = fn f = open(fn, "wb") print("*") f.write(data) print("*") f.close() print("*") fn = self.tmpImgs[imgUrl] print (fn) self.targetProductImgFn = fn pixMap = QtGui.QPixmap(fn) pixMap = pixMap.scaled(225, 150, QtCore.Qt.KeepAspectRatio) self.img.setPixmap(pixMap) except: pass # show image setting value activeImg = self.productsModel.data(cur.siblingAtColumn(7)) briefImgScale = self.productsModel.data(cur.siblingAtColumn(8)) activeImg = activeImg if isinstance(activeImg, int) else 0 briefImgScale = briefImgScale if isinstance(briefImgScale, int) else 0 self.activeImgLov[activeImg].setChecked(True) self.briefImgScaleLov[briefImgScale].setChecked(True) # show description htmlDescription = self.productsModel.data(cur.siblingAtColumn(11)) self.description.setPlainText(htmlDescription) def onDescriptionTextChanged(self): if self.targetProduct != None: idx = self.targetProduct.siblingAtColumn(11) data = self.description.toPlainText() self.productsModel.setData(idx, data, QtCore.Qt.EditRole) def changeProductData(self, cur, pre): indexes = cur.indexes() if len(indexes) == 1: curProductData = indexes[0] self.showProductDetail(curProductData) def changeLov(self, cur, pre): if self.curMetaDataRow != None: indexes = cur.indexes() if len(indexes) > 0: fileName = self.catModel.fileName(indexes[0]) cat = fileName.split("_") if len(cat) > 0: cat = int(cat[0]) index = self.curMetaDataRow.siblingAtColumn(2) dataType = self.metaData.model().data(index, QtCore.Qt.DisplayRole) if dataType == "Danh mục": index = self.curMetaDataRow.siblingAtColumn(3) self.metaData.model().setData(index, str(cat), QtCore.Qt.EditRole) def onCropFinished(self, position): if position != None: cropXidx = self.targetProduct.siblingAtColumn(9) cropYidx = self.targetProduct.siblingAtColumn(10) self.productData.model().setData(cropXidx, position.x() * 2, QtCore.Qt.EditRole) self.productData.model().setData(cropYidx, position.y() * 2, QtCore.Qt.EditRole) class ProductDataTableView(QtWidgets.QTableView): defaultValues = {} def keyPressEvent(self, event): if event.key() == 16777220: #colCount = self.model().columnCount() rowCount = self.model().rowCount() index = self.currentIndex() rowId = index.row() colId = index.column() #if colId + 1 < 7: if colId < 6: try: nextId = {0:3, 3:4, 4:5, 5:6}[colId] except: nextId = colId + 1 self.setCurrentIndex(index.siblingAtColumn(nextId)) else: if rowId + 1 == rowCount: srcModel = self.model().sourceModel() totalRowCount = srcModel.rowCount() srcModel.insertRows(totalRowCount, 1) for key in self.defaultValues: data = self.defaultValues[key] baseIndex = srcModel.index(totalRowCount, key) srcModel.setData(baseIndex, data, QtCore.Qt.EditRole) #print("\tAssign " + str(key) + " with " + str(data)) self.setCurrentIndex(index.siblingAtRow(rowId + 1).siblingAtColumn(3)) super(ProductDataTableView, self).keyPressEvent(event) class SqlQueryModel(QSqlTableModel): comboItems=["", "Có/Không", "Khoảng", "Tham khảo", "Danh mục", "Chữ", "Chiều"] def flags(self, index): fl = QSqlTableModel.flags(self, index) fl |= QtCore.Qt.ItemIsEditable return fl def data(self, index, o): #TODO generictized me FIXME idx = super().data(index, o) if (index.column() == 2): if idx in range(0, len(self.comboItems)): return self.comboItems[idx] return idx class TypeDelegate(QtWidgets.QItemDelegate): # @TODO fix bug: switch combo box type, regular expression of description # affection is too late. (need clear & re-enter to make new affection) comboItems=["", "Có/Không", "Khoảng", "Tham khảo", "Danh mục", "Chữ", "Chiều"] def __init__(self, parent): self._parent = parent QtWidgets.QItemDelegate.__init__(self, parent) def createEditor(self, parent, option, index): combo = QtWidgets.QComboBox(parent) combo.addItems(self.comboItems) return combo def setModelData(self, combo, model, index): model.setData(index, combo.currentIndex()) class IdDelegate(QtWidgets.QItemDelegate): def __init__(self, parent): QtWidgets.QItemDelegate.__init__(self, parent) def createEditor(self, parent, option, index): self.label = QtWidgets.QLabel(parent) return self.label class ReadOnlyDelegate(QtWidgets.QItemDelegate): def __init__(self, parent): QtWidgets.QItemDelegate.__init__(self, parent) def createEditor(self, parent, option, index): self.label = QtWidgets.QLabel(parent) return self.label def setModelData(self, label, model, index): #super().setModelData(label, model, index) pass class DefinitionDelegate(QtWidgets.QItemDelegate): def __init__(self, parent): self.model = parent.model() QtWidgets.QItemDelegate.__init__(self, parent) def createEditor(self, parent, option, index): self.text = super().createEditor(parent, option, index) cat = self.model.data(index.siblingAtColumn(2), QtCore.Qt.DisplayRole) if cat == "Khoảng": reg_ex = QtCore.QRegExp("#?-?[0-9]+,\s#?-?[0-9]+,\s.+") elif cat == "Danh mục": reg_ex = QtCore.QRegExp("[0-9]+n?") elif cat == "Chiều": reg_ex = QtCore.QRegExp("[0-9]+\s.+") else: reg_ex = QtCore.QRegExp("^") input_validator = QtGui.QRegExpValidator(reg_ex, self.text) self.text.setValidator(input_validator) return self.text class CropImgRegion(QtWidgets.QGroupBox): def __init__(self, parent): self.dropAt = None QtWidgets.QDialog.__init__(self, parent) def enableDragDrop(self): self.allowDragDrop = True def disableDragDrop(self): self.allowDragDrop = False def mousePressEvent(self, event): if self.allowDragDrop: self.__mousePressPos = None self.__mouseMovePos = None if event.button() == QtCore.Qt.LeftButton: self.__mousePressPos = event.globalPos() self.__mouseMovePos = event.globalPos() super(CropImgRegion, self).mousePressEvent(event) def mouseMoveEvent(self, event): if self.allowDragDrop: if event.buttons() == QtCore.Qt.LeftButton: # adjust offset from clicked point to origin of widget currPos = self.mapToGlobal(self.pos()) globalPos = event.globalPos() diff = globalPos - self.__mouseMovePos newPos = self.mapFromGlobal(currPos + diff) self.dropAt = newPos self.move(newPos) self.__mouseMovePos = globalPos super(CropImgRegion, self).mouseMoveEvent(event) def mouseReleaseEvent(self, event): if self.allowDragDrop: if self.__mousePressPos is not None: moved = event.globalPos()
at .errdump ') print('Also the .lastdf contains .errdump, for inspecting ') self.print_eq_values(errvar,self.errdump,per=[self.periode]) if hasattr(self,'dumplist'): self.dumpdf= pd.DataFrame(self.dumplist) del self.dumplist self.dumpdf.columns= ['fair','per','iteration']+self.dump pass def newton1per(self, databank, start='', slut='', silent=1,samedata=0,alfa=1.0,stats=False,first_test=1, antal=20,conv=[],absconv=0.01,relconv=0.00001, nonlin=False ,timeit = False,reset=1, dumpvar=[],ldumpvar=False,dumpwith=15,dumpdecimal=5,chunk=None,ljit=False, fairopt={'fairantal':1},**kwargs): '''Evaluates this model on a databank from start to slut (means end in Danish). First it finds the values in the Dataframe, then creates the evaluater function through the *outeval* function (:func:`modelclass.model.fouteval`) then it evaluates the function and returns the values to a the Dataframe in the databank. The text for the evaluater function is placed in the model property **make_los_text** where it can be inspected in case of problems. ''' # print('new nwwton') starttimesetup=time.time() fairantal = {**fairopt,**kwargs}.get('fairantal',1) sol_periode = self.smpl(start,slut,databank) if self.maxlag and not (self.current_per[0]+self.maxlag) in databank.index : print('***** Warning: You are solving the model before all lags are avaiable') print('Maxlag:',self.maxlag,'First solveperiod:',self.current_per[0],'First dataframe index',databank.index[0]) sys.exit() if not silent : print ('Will start calculating: ' + self.name) # if not samedata or not hasattr(self,'new2d') : # if (not hasattr(self,'solvenew2d')) or (not self.eqcolumns(self.genrcolumns,databank.columns)): # databank=insertModelVar(databank,self) # fill all Missing value with 0.0 # for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]: # databank.loc[:,i]=databank.loc[:,i].astype('O') # Make sure columns with matrixes are of this type # # self.make_new_text2d = self.outsolve2dcunk(databank,chunk=chunk, # ljit=ljit, debug=kwargs.get('debug',1),type='res') # exec(self.make_new_text2d,globals()) # creates the los function # self.pronew2d,self.solvenew2d,self.epinew2d = make_los(self.funks,self.errfunk) if not self.eqcolumns(self.genrcolumns,databank.columns): databank=insertModelVar(databank,self) # fill all Missing value with 0.0 for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]: databank.loc[:,i]=databank.loc[:,i].astype('O') # Make sure columns with matrixes are of this type newdata = True else: newdata = False if ljit: if newdata or not hasattr(self,'pronew2d_jit'): if not silent: print(f'Create compiled solving function for {self.name}') self.make_newlos_text2d_jit = self.outsolve2dcunk(databank,chunk=chunk,ljit=ljit, debug=kwargs.get('debug',1),type='res') exec(self.make_newlos_text2d_jit,globals()) # creates the los function self.pronew2d_jit,self.solvenew2d_jit,self.epinew2d_jit = make_los(self.funks,self.errfunk) self.pronew2d,self.solvenew2d,self.epinew2d = self.pronew2d_jit,self.solvenew2d_jit,self.epinew2d_jit else: if newdata or not hasattr(self,'pronew2d_nojit'): if not silent: print(f'Create solving function for {self.name}') self.make_newlos_text2d_nojit = self.outsolve2dcunk(databank,chunk=chunk,ljit=ljit, debug=kwargs.get('debug',1),type='res') exec(self.make_newlos_text2d_nojit,globals()) # creates the los function self.pronew2d_nojit,self.solvenew2d_nojit,self.epinew2d_nojit = make_los(self.funks,self.errfunk) self.pronew2d,self.solvenew2d,self.epinew2d = self.pronew2d_nojit,self.solvenew2d_nojit,self.epinew2d_nojit values = databank.values.copy() outvalues = np.empty_like(values)# if not hasattr(self,'newton_diff'): endovar = self.coreorder if self.use_preorder else self.solveorder self.newton_1per_diff = newton_diff(self,forcenum=1,df=databank, endovar = endovar, ljit=ljit,nchunk=chunk,onlyendocur=True ) if not hasattr(self,'newton_1per_solver') or reset: # breakpoint() self.newton_1per_solver = self.newton_1per_diff.get_solve1per(df=databank,periode=[self.current_per[0]])[self.current_per[0]] newton_col = [databank.columns.get_loc(c) for c in self.newton_1per_diff.endovar] self.genrcolumns = databank.columns.copy() self.genrindex = databank.index.copy() convvar = [conv.upper()] if isinstance(conv,str) else [c.upper() for c in conv] if conv != [] else list(self.endogene) convplace=[databank.columns.get_loc(c) for c in convvar] # this is how convergence is measured convergence = True if ldumpvar: self.dumplist = [] self.dump = convvar if dumpvar == [] else [v for v in self.vlist(dumpvar) if v in self.endogene] dumpplac = [databank.columns.get_loc(v) for v in self.dump] ittotal = 0 endtimesetup=time.time() starttime=time.time() for fairiteration in range(fairantal): if fairantal >=2: print(f'Fair-Taylor iteration: {fairiteration}') for self.periode in sol_periode: row=databank.index.get_loc(self.periode) if ldumpvar: self.dumplist.append([fairiteration,self.periode,int(0)]+[values[row,p] for p in dumpplac]) itbefore = [values[row,c] for c in convplace] self.pronew2d(values, values, row , alfa ) for iteration in range(antal): with ttimer(f'sim per:{self.periode} it:{iteration}',0) as xxtt: before = values[row,newton_col] self.solvenew2d(values, outvalues, row , alfa ) now = outvalues[row,newton_col] distance = now-before newton_conv =np.abs(distance).sum() if not silent : print(f'Iteration {iteration} sum of distances {newton_conv}') if newton_conv <= 0.000001 : break # breakpoint() if iteration != 0 and nonlin and not (iteration % nonlin): with ttimer('Updating solver',timeit) as t3: if not silent :print(f'Updating solver, iteration {iteration}') df_now = pd.DataFrame(values,index=databank.index,columns=databank.columns) self.newton_1per_solver = self.newton_1per_diff.get_solve1per(df=df_now,periode=[self.periode])[self.periode] with ttimer('Update solution',0): # update = self.solveinv(distance) update = self.newton_1per_solver(distance) values[row,newton_col] = before - update ittotal += 1 if ldumpvar: self.dumplist.append([fairiteration,self.periode, int(iteration+1)]+[values[row,p] for p in dumpplac]) # if iteration > first_test: # itafter=[values[row,c] for c in convplace] # convergence = True # for after,before in zip(itafter,itbefore): ## print(before,after) # if before > absconv and abs(after-before)/abs(before) > relconv: # convergence = False # break # if convergence: # break # else: # itbefore=itafter self.epinew2d(values, values, row , alfa ) if not silent: if not convergence : print(f'{self.periode} not converged in {iteration} iterations') else: print(f'{self.periode} Solved in {iteration} iterations') if ldumpvar: self.dumpdf= pd.DataFrame(self.dumplist) del self.dumplist self.dumpdf.columns= ['fair','per','iteration']+self.dump if fairantal<=2 : self.dumpdf.drop('fair',axis=1,inplace=True) outdf = pd.DataFrame(values,index=databank.index,columns=databank.columns) if stats: numberfloats = self.calculate_freq[-1][1]*ittotal endtime = time.time() self.simtime = endtime-starttime self.setuptime = endtimesetup - starttimesetup print(f'Setup time (seconds) :{self.setuptime:>15,.2f}') print(f'Foating point operations :{self.calculate_freq[-1][1]:>15,}') print(f'Total iterations :{ittotal:>15,}') print(f'Total floating point operations :{numberfloats:>15,}') print(f'Simulation time (seconds) :{self.simtime:>15,.2f}') if self.simtime > 0.0: print(f'Floating point operations per second : {numberfloats/self.simtime:>15,.1f}') if not silent : print (self.name + ' solved ') return outdf def newtonstack(self, databank, start='', slut='', silent=1,samedata=0,alfa=1.0,stats=False,first_test=1, antal=20,conv=[],absconv=0.01,relconv=0.00001, dumpvar=[],ldumpvar=False,dumpwith=15,dumpdecimal=5,chunk=None,nchunk=None,ljit=False,nljit=0, fairopt={'fairantal':1},debug=False,timeit=False,nonlin=False,nonlinfirst=0, newtonalfa = 1.0 , newtonnodamp=0,forcenum=True,reset = False, **kwargs): '''Evaluates this model on a databank from start to slut (means end in Danish). First it finds the values in the Dataframe, then creates the evaluater function through the *outeval* function (:func:`modelclass.model.fouteval`) then it evaluates the function and returns the values to a the Dataframe in the databank. The text for the evaluater function is placed in the model property **make_los_text** where it can be inspected in case of problems. ''' # print('new nwwton') ittotal = 0 diffcount = 0 starttimesetup=time.time() fairantal = {**fairopt,**kwargs}.get('fairantal',1) sol_periode = self.smpl(start,slut,databank) if self.maxlag and not (self.current_per[0]+self.maxlag) in databank.index : print('***** Warning: You are solving the model before all lags are avaiable') print('Maxlag:',self.maxlag,'First solveperiod:',self.current_per[0],'First dataframe index',databank.index[0]) sys.exit() if not silent : print ('Will start calculating: ' + self.name) # if not samedata or not hasattr(self,'solve2d') : # if (not hasattr(self,'solvestack2d')) or (not self.eqcolumns(self.genrcolumns,databank.columns)): # databank=insertModelVar(databank,self) # fill all Missing value with 0.0 # for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]: # databank.loc[:,i]=databank.loc[:,i].astype('O') # Make sure columns with matrixes are of this type # # self.make_losstack_text2d = self.outsolve2dcunk(databank,chunk=chunk, # ljit=ljit, debug=debug,type='res') # exec(self.make_losstack_text2d,globals()) # creates the los function # self.prostack2d,self.solvestack2d,self.epistack2d = make_los(self.funks,self.errfunk) if not self.eqcolumns(self.genrcolumns,databank.columns): databank=insertModelVar(databank,self) # fill all Missing value with 0.0 for i in [j for j in self.allvar.keys() if self.allvar[j]['matrix']]: databank.loc[:,i]=databank.loc[:,i].astype('O') # Make sure columns with matrixes are of this type newdata = True else: newdata = False if ljit: if newdata or not hasattr(self,'pronew2d_jit'): if not silent: print(f'Create compiled solving function for {self.name}') self.make_newlos_text2d_jit = self.outsolve2dcunk(databank,chunk=chunk,ljit=ljit, debug=kwargs.get('debug',1),type='res') exec(self.make_newlos_text2d_jit,globals()) # creates the los function self.pronew2d_jit,self.solvenew2d_jit,self.epinew2d_jit = make_los(self.funks,self.errfunk) self.pronew2d,self.solvenew2d,self.epinew2d = self.pronew2d_jit,self.solvenew2d_jit,self.epinew2d_jit else: if newdata or not hasattr(self,'pronew2d_nojit'): if not silent: print(f'Create solving function for {self.name}') self.make_newlos_text2d_nojit = self.outsolve2dcunk(databank,chunk=chunk,ljit=ljit, debug=kwargs.get('debug',1),type='res') exec(self.make_newlos_text2d_nojit,globals()) # creates the los function self.pronew2d_nojit,self.solvenew2d_nojit,self.epinew2d_nojit = make_los(self.funks,self.errfunk) self.pronew2d,self.solvenew2d,self.epinew2d = self.pronew2d_nojit,self.solvenew2d_nojit,self.epinew2d_nojit values = databank.values.copy() outvalues = np.empty_like(values)# if not hasattr(self,'newton_diff_stack'): self.newton_diff_stack = newton_diff(self,forcenum=1,df=databank,ljit=nljit,nchunk=nchunk) if not hasattr(self,'stacksolver'): self.getsolver = self.newton_diff_stack.get_solvestacked diffcount += 1 self.stacksolver = self.getsolver(databank) print(f'Creating new derivatives and new solver') self.old_stack_periode = sol_periode.copy() elif reset or not all(self.old_stack_periode[[0,-1]] == sol_periode[[0,-1]]) : print(f'Creating new solver') diffcount += 1 self.stacksolver = self.getsolver(databank) self.old_stack_periode = sol_periode.copy() newton_col = [databank.columns.get_loc(c) for c in self.newton_diff_stack.endovar] self.newton_diff_stack.timeit = timeit self.genrcolumns = databank.columns.copy() self.genrindex = databank.index.copy() convvar = [conv.upper()] if isinstance(conv,str) else [c.upper() for c in conv] if conv != [] else list(self.endogene) convplace=[databank.columns.get_loc(c) for c in convvar] # this is how convergence is measured convergence = False if ldumpvar: self.dumplist = [] self.dump = convvar if dumpvar == [] else [v for v in self.vlist(dumpvar) if v in self.endogene] dumpplac = [databank.columns.get_loc(v) for v in self.dump] ittotal = 0 endtimesetup=time.time() starttime=time.time() self.stackrows=[databank.index.get_loc(p) for p in sol_periode] self.stackrowindex = np.array([[r]*len(newton_col) for r in self.stackrows]).flatten() self.stackcolindex = np.array([newton_col for r in self.stackrows]).flatten() # breakpoint() # if ldumpvar: # self.dumplist.append([fairiteration,self.periode,int(0)]+[values[row,p] # for p in dumpplac]) # itbefore = values[self.stackrows,convplace] # self.pro2d(values, values, row , alfa ) for iteration in range(antal): with ttimer(f'\nNewton it:{iteration}',timeit) as xxtt: before = values[self.stackrowindex,self.stackcolindex] with