input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
<filename>cotengra/path_greedy.py
import math
import heapq
import random
import functools
import itertools
import collections
from opt_einsum.paths import (
ssa_greedy_optimize,
register_path_fn,
ssa_to_linear,
)
from opt_einsum.path_random import thermal_chooser
from .core import (
jitter_dict,
ContractionTree,
get_hypergraph,
)
from .utils import oset
from .hyper import register_hyper_function
# ------------------------------ GREEDY HYPER ------------------------------- #
def cost_memory_removed_mod(
size12, size1, size2, k12, k1, k2,
costmod=1, usesizes=True,
):
"""The default heuristic cost, corresponding to the total reduction in
memory of performing a contraction.
"""
if usesizes:
return size12 - costmod * (size1 + size2)
return len(k12) - costmod * max(len(k1), len(k2))
def trial_greedy(inputs, output, size_dict,
random_strength=0.1,
temperature=1.0,
rel_temperature=True,
costmod=1,
usesizes=True):
rand_size_dict = jitter_dict(size_dict, random_strength)
cost_fn = functools.partial(cost_memory_removed_mod,
costmod=costmod, usesizes=usesizes)
choose_fn = functools.partial(thermal_chooser, temperature=temperature,
rel_temperature=rel_temperature)
ssa_path = ssa_greedy_optimize(inputs, output, rand_size_dict,
choose_fn=choose_fn, cost_fn=cost_fn)
return ContractionTree.from_path(inputs, output, size_dict,
ssa_path=ssa_path)
register_hyper_function(
name='greedy',
ssa_func=trial_greedy,
space={
'random_strength': {'type': 'FLOAT_EXP', 'min': 0.01, 'max': 10.},
'temperature': {'type': 'FLOAT_EXP', 'min': 0.01, 'max': 10.},
'rel_temperature': {'type': 'BOOL'},
'costmod': {'type': 'FLOAT', 'min': 0.0, 'max': 2.0},
'usesizes': {'type': 'BOOL'},
},
)
# --------------------------------------------------------------------------- #
def greconf_rf(inputs, output, size_dict, memory_limit=None):
"""Greedy-reconf path -- find a single greedy path then perform a round of
cheap subtree reconfigurations to optimize it.
"""
ssa_path = ssa_greedy_optimize(inputs, output, size_dict)
tree = ContractionTree.from_path(
inputs, output, size_dict, ssa_path=ssa_path)
tree.subtree_reconfigure_(subtree_size=6, minimize='flops')
return tree.path()
register_path_fn('greedy-rf', greconf_rf)
def greconf_rw(inputs, output, size_dict, memory_limit=None):
"""Greedy-reconf path -- find a single greedy path then perform a round of
cheap subtree reconfigurations to optimize it.
"""
ssa_path = ssa_greedy_optimize(inputs, output, size_dict)
tree = ContractionTree.from_path(
inputs, output, size_dict, ssa_path=ssa_path)
tree.subtree_reconfigure_(subtree_size=6, minimize='write')
return tree.path()
register_path_fn('greedy-rw', greconf_rw)
def greconf_rc(inputs, output, size_dict, memory_limit=None):
"""Greedy-reconf path -- find a single greedy path then perform a round of
cheap subtree reconfigurations to optimize it.
"""
ssa_path = ssa_greedy_optimize(inputs, output, size_dict)
tree = ContractionTree.from_path(
inputs, output, size_dict, ssa_path=ssa_path)
tree.subtree_reconfigure_(subtree_size=6, minimize='combo')
return tree.path()
register_path_fn('greedy-rc', greconf_rc)
# --------------------------------------------------------------------------- #
def _binary_combine(func, x, y):
if func == 'sum':
return x + y
if func == 'mean':
return (x + y) / 2
if func == 'max':
return max(x, y)
if func == 'min':
return min(x, y)
if func == 'diff':
return abs(x - y)
def gumbel():
return -math.log(-math.log(random.uniform(0.0, 1.0)))
try:
import numba as nb
gumbel = nb.njit(gumbel)
except ImportError:
pass
class GreedyCompressed:
"""A greedy contraction path finder that takes into account the effect of
compression, and can also make use of subgraph size and centrality.
Parameters
----------
chi : int
The maximum bond size between nodes to compress to.
coeff_size_compressed : float, optional
When assessing contractions, how to weight the size of the output
tensor, post compression.
coeff_size : float, optional
When assessing contractions, how to weight the size of the output
tenor, pre compression.
coeff_size_inputs : float, optional
When assessing contractions, how to weight the maximum size of the
inputs tensors.
score_size_inputs : {'sum', 'mean', 'max', 'min', 'diff'}, optional
When assessing contractions, how to score the combination of the two
input tensor sizes.
coeff_subgraph_size : float, optional
When assessing contractions, how to weight the total subgraph size
corresponding to the inputs tensors.
score_subgraph_size : {'sum', 'mean', 'max', 'min', 'diff'}, optional
When assessing contractions, how to score the combination of the two
input subgraph sizes.
coeff_centrality : float, optional
When assessing contractions, how to weight the combined centrality
of the inputs tensors.
centrality_combine : {'sum', 'mean', 'max', 'min'}, optional
When performing the contraction, how to combine the two input tensor
centralities to produce a new one.
score_centrality : {'sum', 'mean', 'max', 'min', 'diff'}, optional
When assessing contractions, how to score the combination of the two
input tensor centralities.
temperature : float, optional
A noise level to apply to the scores when choosing nodes to expand to.
"""
def __init__(
self,
chi,
coeff_size_compressed=1.0,
coeff_size=0.0,
coeff_size_inputs=0.0,
score_size_inputs='max',
coeff_subgraph=0.0,
score_subgraph='sum',
coeff_centrality=0.0,
centrality_combine='max',
score_centrality='diff',
temperature=0.0,
score_perm='',
):
self.chi = chi
self.coeff_size_compressed = coeff_size_compressed
self.coeff_size = coeff_size
self.coeff_size_inputs = coeff_size_inputs
self.score_size_inputs = score_size_inputs
self.coeff_subgraph = coeff_subgraph
self.score_subgraph = score_subgraph
self.coeff_centrality = coeff_centrality
self.centrality_combine = centrality_combine
self.score_centrality = score_centrality
self.temperature = temperature
self.score_perm = score_perm
def _score(self, i1, i2):
# the two inputs tensors (with prior compressions)
size1 = self.hg.node_size(i1)
size2 = self.hg.node_size(i2)
# the new tensor inds, plus indices that will be available to compress
old_size = self.hg.candidate_contraction_size(i1, i2)
new_size = self.hg.candidate_contraction_size(i1, i2, chi=self.chi)
scores = {
'R': self.coeff_size_compressed * math.log2(new_size),
'O': self.coeff_size * math.log2(old_size),
# weight some combination of the inputs sizes
'I': self.coeff_size_inputs * _binary_combine(
self.score_size_inputs, math.log2(size1), math.log2(size2)
),
# weight some combination of the inputs subgraph sizes
'S': self.coeff_subgraph * _binary_combine(
self.score_subgraph,
math.log(self.sgsizes[i1]),
math.log(self.sgsizes[i2]),
),
# weight some combination of the inputs centralities
'L': self.coeff_centrality * _binary_combine(
self.score_centrality, self.sgcents[i1], self.sgcents[i2]
),
# randomize using boltzmann sampling trick
'T': max(0.0, self.temperature) * gumbel(),
}
if self.score_perm == '':
return sum(scores.values())
return tuple(scores[p] for p in self.score_perm)
def ssa_path(self, inputs, output, size_dict):
self.candidates = []
self.ssapath = []
self.hg = get_hypergraph(inputs, output, size_dict, accel='auto')
# compute hypergraph centralities to use heuristically
self.sgcents = self.hg.simple_centrality()
self.sgsizes = {i: 1 for i in range(len(inputs))}
# populate initial scores with contractions among leaves
for _, edge_nodes in self.hg.edges.items():
for nodes in itertools.combinations(edge_nodes, 2):
candidate = (self._score(*nodes), *nodes)
heapq.heappush(self.candidates, candidate)
while self.hg.get_num_nodes() > 2:
if not self.candidates:
# this occurs with disconneted sub-graphs -> pick any two
i1, i2, *_ = self.hg.nodes
else:
# get the next best score contraction
_, i1, i2 = heapq.heappop(self.candidates)
if not (self.hg.has_node(i1) and self.hg.has_node(i2)):
# invalid - either node already contracted
continue
# perform contraction
i12 = self.hg.contract(i1, i2)
self.hg.compress(chi=self.chi, edges=self.hg.get_node(i12))
# build the path
self.ssapath.append((i1, i2))
# propagate some meta information up the contraction tree
self.sgsizes[i12] = self.sgsizes.pop(i1) + self.sgsizes.pop(i2)
self.sgcents[i12] = _binary_combine(self.centrality_combine,
self.sgcents.pop(i1),
self.sgcents.pop(i2))
# assess / re-assess new and also neighboring contractions
# n.b. duplicate scores should be lower and heap-popped first
for e in self.hg.neighbor_edges(i12):
for nodes in itertools.combinations(self.hg.get_edge(e), 2):
candidate = (self._score(*nodes), *nodes)
heapq.heappush(self.candidates, candidate)
self.ssapath.append(tuple(self.hg.nodes))
return self.ssapath
def __call__(self, inputs, output, size_dict, memory_limit=None):
return ssa_to_linear(self.ssa_path(inputs, output, size_dict))
def greedy_compressed(inputs, output, size_dict, memory_limit=None, **kwargs):
chi = max(size_dict.values())**2
return GreedyCompressed(chi, **kwargs)(inputs, output, size_dict)
def trial_greedy_compressed(inputs, output, size_dict, **kwargs):
opt = GreedyCompressed(**kwargs)
ssa_path = opt.ssa_path(inputs, output, size_dict)
tree = ContractionTree.from_path(
inputs, output, size_dict, ssa_path=ssa_path)
tree.set_surface_order_from_path(ssa_path)
return tree
register_hyper_function(
name='greedy-compressed',
ssa_func=trial_greedy_compressed,
space={
'coeff_size_compressed': {'type': 'FLOAT', 'min': 0.5, 'max': 2.0},
'coeff_size': {'type': 'FLOAT', 'min': 0.0, 'max': 1.0},
'coeff_size_inputs': {'type': 'FLOAT', 'min': -1.0, 'max': 1.0},
'score_size_inputs': {
'type': 'STRING',
'options': ['min', 'max', 'mean', 'sum', 'diff']},
'coeff_subgraph': {'type': 'FLOAT', 'min': -1.0, 'max': 1.0},
'score_subgraph': {
'type': 'STRING',
'options': ['min', 'max', 'mean', 'sum', 'diff']},
'coeff_centrality': {'type': 'FLOAT', 'min': -10.0, 'max': 10.0},
'centrality_combine': {
'type': 'STRING',
'options': ['min', 'max', 'mean']},
'score_centrality': {
'type': 'STRING',
'options': ['min', 'max', 'mean', 'diff']},
'temperature': {'type': 'FLOAT', 'min': -0.1, 'max': 1.0},
'chi': {'type': 'INT', 'min': 2, 'max': 128},
},
)
# --------------------------------------------------------------------------- #
class GreedySpan:
"""A contraction path optimizer that greedily generates spanning trees out
of particular nodes, suitable for e.g. compressed contraction paths.
Parameters
----------
start : {'max', 'min'}, optional
Whether to start the span from the maximum or minimum centrality point.
coeff_connectivity : float, optional
When considering adding nodes to the span, how to weight the nodes
connectivity to the current span.
coeff_ndim : float, optional
When considering adding nodes to the span, how to weight the nodes
total rank.
coeff_distance : float, optional
When considering adding nodes to the span, how to weight the nodes
distance to the starting point.
coeff_next_centrality : float, optional
When considering adding nodes to the span, how to weight the nodes
centrality.
temperature : float, optional
A noise level to apply to the scores when choosing nodes to expand to.
"""
def __init__(
self,
start='max',
coeff_connectivity=1.0,
coeff_ndim=1.0,
coeff_distance=-1.0,
coeff_next_centrality=0.0,
connectivity_weight_bonds=True,
temperature=0.0,
score_perm='CNDLTI',
distance_p=1,
distance_steal='abs',
):
self.start = start
self.coeff_connectivity = coeff_connectivity
self.coeff_ndim = coeff_ndim
self.coeff_distance = coeff_distance
self.coeff_next_centrality = coeff_next_centrality
self.connectivity_weight_bonds = connectivity_weight_bonds
self.temperature = temperature
self.score_perm = score_perm
self.distance_p = distance_p
self.distance_steal = distance_steal
def ssa_path(self, inputs, output, size_dict):
self.hg = get_hypergraph(inputs, output, size_dict, accel='auto')
self.cents = self.hg.simple_centrality()
def region_choose_sorter(node):
return self.cents[node] + 1e-2 * random.random()
if output:
region = oset(self.hg.output_nodes())
elif self.start == 'max':
region = oset([max(self.cents.keys(), key=self.cents.__getitem__)])
elif self.start == 'min':
region = oset([min(self.cents.keys(), key=self.cents.__getitem__)])
else:
region = oset(self.start)
candidates = []
merges | |
<filename>experiments/2_incident_points_6_ambo/amboworld/environment.py<gh_stars>1-10
import gym
from gym import spaces
import matplotlib.pyplot as plt
import numpy as np
import random
import simpy
from amboworld.ambulance import Ambulance
from amboworld.patient import Patient
from amboworld.utils import get_distance
class Env(gym.Env):
"""Custom Environment that follows gym interface.
A discrete event simulation environment using SimPy
Attributes
----------
action_number (int):
Number of possible actions
action_space (gym space - discrete):
Gym space listing discrete choice of dispatch points
ambo_speed (float):
Distance (km) covered in minute
ambos_assigned_to_dispatch_points (NumPy array):
Number of ambulances assigned to each dispatch points
ambulances (list, ambulance objects):
List of ambulance objects
calls (int):
Number of calls (demand)
completed_incidents (list of patients):
Completed patient objects
demand_met(int):
Number of patients ambulances respond to
dispatch_points (list of tuples):
List of x,y co-ordinate tuples of dispatch points
free_ambulances (list):
List of free ambulances
hospitals (list of tuples):
List of x,y co-ordinate tuples of hospitals points
incident_frequency (float):
Mean time between incidents
incident_points (list):
List of x,y co-ordinate tuples of centres of incident points
incident_range (int):
Limits of incidents around incident centre (plus/minus)
max_size (int):
Max x,y km co-ordinates
number_ambulances (int):
Number of ambulances in model
number_dispatch_points (int):
Number of dispatch points
number_epochs (int):
Number of epochs per day. Each epoch has a different set of incident
locations. Epochs are equally spaced throughout day.
number_hospitals (int):
Number of hospitals
number_incident_points (int):
Number of centre points where incidents may occur. Random range added
observation_size (int):
Number of observations returned
observation_space (gym space - box):
Gym space providing n-dimensional array of observation space.
Provides number of ambulances assigned to each dispatch point
patients_waiting_for_assignment (list)
List of patients waiting for ambulance assignment
patients_assignment_to_ambo_arrival (list)
List of patients waiting for ambulance arrival after assignment
patients_in_transit (list)
List of patients in transit to hospital
random_seed (int):
Random seed used to set hospital and dispatch point locations
render_env (bool):
Whether to render amboworld
render_grid_size (int):
Size (characters per line) of grid used to render environment
render_interval (int):
Time between rendering environment
reward_response_times (list):
List of ambulance response times in time between returning reward to
agent
results_assignment_to_ambo_arrival (list):
List of all times from ambo assignment to arrival with patient
results_call_to_ambo_arrival (list):
List of all times from call to ambo arrival with patient
sim_time_step (float):
Simulation time step (minutes)
unallocated_ambo (list):
List of ambulances waiting allocations (occurs each sim step)
Internal methods
----------------
__init__:
Constructor method
_calculate_reward:
Calculate reward
_get_observations:
Get current state observations
_set_dispatch_points:
Create list of x,y tuple dispatch points (using random uniform
distribution)
_set_hospital_locations:
Create list of x,y tuple hospitals (using random uniform distribution)
External facing methods
-----------------------
close
reset
step
render
"""
def __init__(self, max_size=50,
number_ambulances=8,
number_dispatch_points=25,
number_epochs=2,
number_incident_points=4,
incident_range=0.0,
number_hospitals=1,
duration_incidents=1e5,
ambo_kph=60.0,
random_seed=42,
incident_interval=20,
time_step=1,
render_env=False,
print_output=False,
render_grid_size=25,
render_interval=10,
ambo_free_from_hospital=True):
"""Constructor class for amboworld"""
# Inherit from super class
super(Env, self).__init__()
# Set attributes
self.action_number = int(number_dispatch_points)
self.ambo_free_from_hospital = ambo_free_from_hospital
self.ambo_speed = ambo_kph / 60
self.ambos_assigned_to_dispatch_points = \
np.zeros(number_dispatch_points)
self.counter_patients = 0
self.counter_ambulances = 0
self.dispatch_points = []
self.hospitals = []
self.incident_interval = incident_interval
self.incident_range = incident_range
self.incident_points = []
self.max_size = int(max_size)
self.number_ambulances = max(1, int(number_ambulances))
self.number_dispatch_points = max(1, int(number_dispatch_points))
self.number_epochs = number_epochs
self.number_hospitals = max(1, int(number_hospitals))
self.number_incident_points = number_incident_points
self.observation_size = number_dispatch_points + 3
self.random_seed = int(random_seed)
self.render_env = bool(render_env)
self.render_grid_size = render_grid_size
self.render_interval = render_interval
self.sim_duration = duration_incidents
self.sim_time_step = time_step
self.step_count = 0
# Set action space (as a choice from dispatch points)
self.action_space = spaces.Discrete(self.number_dispatch_points)
# Set observation space: number of ambos currently assigned to each
# dispatch point + Location of ambo to be assigned (as fraction 0-1),
# and time of day (as fraction 0-1)
self.observation_space = spaces.Box(
low=0, high=number_ambulances - 1,
shape=(number_dispatch_points + 3, 1), dtype=np.uint8)
# Set up hospital, dispatch and incident point locations
self._set_dispatch_points()
self._set_incident_locations()
self._set_hospital_locations()
# During writing use printing of times
self.print_output = print_output
def _assign_ambo(self, patient):
"""
Assign closest ambulance to patient
"""
# Get closest ambulance
best_distance = 9999999
for ambo in self.free_ambulances:
if ambo.at_dispatch_point or ambo.at_hospital:
ambo_x = ambo.current_x
ambo_y = ambo.current_y
else:
# Check if ambo may be assigned before reaching dispatch point
if self.ambo_free_from_hospital:
# Need to work out current location
time_elapsed = self.simpy_env.now - ambo.time_journey_start
fraction_travelled = time_elapsed / ambo.journey_time
ambo_x = (ambo.start_x + ((ambo.target_x - ambo.start_x) *
fraction_travelled))
ambo_y = (ambo.start_y + ((ambo.target_y - ambo.start_y) *
fraction_travelled))
# Get distance from patient to ambulance
distance = get_distance(patient.incident_x, patient.incident_y,
ambo_x, ambo_y)
if distance < best_distance:
# New best distance found
best_distance = distance
best_ambo = ambo
# Best ambo identified
ambo = best_ambo
# Move patient between lists and remove ambulance from free ambos list
self.patients_assignment_to_ambo_arrival.append(patient)
self.free_ambulances.remove(ambo)
ambo.travelling_to_patient = True
ambo.target_x = patient.incident_x
ambo.target_y = patient.incident_y
ambo.start_x = ambo.current_x
ambo.start_y = ambo.current_y
# Set time and calculate time to arrival
patient.time_ambo_assigned = self.simpy_env.now
ambo_travel_time = best_distance / self.ambo_speed
ambo.journey_time = ambo_travel_time
ambo.time_journey_start = self.simpy_env.now
ambo.free = False
if self.print_output:
print(f'Patient {patient.id} ambulance {ambo.ambulance_id} assigned: {self.simpy_env.now:0.1f}')
# SimPy timeout for ambulance travel
yield self.simpy_env.timeout(ambo_travel_time)
# Ambo has arrived with patient
self.demand_met += 1
ambo.travelling_to_patient = False
patient.time_ambo_arrive = self.simpy_env.now
ambo.current_x = patient.incident_x
ambo.current_y = patient.incident_y
if self.print_output:
print(f'Patient {patient.id} ambulance arrived: {self.simpy_env.now:0.1f}')
self.patients_assignment_to_ambo_arrival.remove(patient)
self.patients_in_transit.append(patient)
assigned_to_arrival = \
patient.time_ambo_arrive - patient.time_ambo_assigned
self.results_assignment_to_ambo_arrival.append(assigned_to_arrival)
call_to_arrival = patient.time_ambo_arrive - patient.time_call
self.results_call_to_ambo_arrival.append(call_to_arrival)
self.reward_response_times.append(assigned_to_arrival)
# Get closest hospital and set travel to hospital
ambo.travelling_to_hospital = True
best_distance = 9999999
for hospital_index in range(self.number_hospitals):
distance = get_distance(
patient.incident_x, patient.incident_y,
self.hospitals[hospital_index][0],
self.hospitals[hospital_index][1])
if distance < best_distance:
# New best distance found
best_distance = distance
best_hospital = hospital_index
patient.allocated_hospital = best_hospital
ambo.target_x = self.hospitals[best_hospital][0]
ambo.target_y = self.hospitals[best_hospital][1]
ambo_travel_time = distance / self.ambo_speed
# SimPy timeout for ambulance travel
ambo.time_journey_start = self.simpy_env.now
ambo.start_x = ambo.current_x
ambo.start_y = ambo.current_y
ambo.journey_time = ambo_travel_time
yield self.simpy_env.timeout(ambo_travel_time)
# Patient has arrived at hospital
ambo.travelling_to_hospital = False
ambo.current_x = self.hospitals[best_hospital][0]
ambo.current_y = self.hospitals[best_hospital][1]
if self.print_output:
print(f'Patient {patient.id} arrived at hospital: {self.simpy_env.now:0.1f}')
patient.time_arrive_at_hospital = self.simpy_env.now
self.completed_incidents.append(patient)
# Reset ambulance to wait for new dispatch point
self.free_ambulances.append(ambo)
ambo.time_journey_start = None
ambo.journey_time = None
ambo.start_x = None
ambo.start_y = None
ambo.at_hospital = True
self.ambos_assigned_to_dispatch_points[ambo.dispatch_point] -= 1
ambo.dispatch_point = None
self.unallocated_ambos.append(ambo)
def _calculate_reward(self):
"""
Calculate reward
"""
# Reward is negative of time to respond. Use oldest time each steo
reward = None
if len(self.reward_response_times) > 0:
reward = 0 - self.reward_response_times.pop(0)
reward = 0 - reward ** 2
return reward
def _check_for_unassigned_patients(self):
"""
Each minute check for unassigned patients and free ambulances
"""
while True:
yield self.simpy_env.timeout(1)
patients_waiting = len(self.patients_waiting_for_assignment)
ambos_free = len(self.free_ambulances)
patients_to_assign = min(patients_waiting, ambos_free)
if patients_to_assign > 0:
for i in range(patients_to_assign):
patient = self.patients_waiting_for_assignment.pop(0)
self.simpy_env.process(self._assign_ambo(patient))
def _generate_demand(self):
"""
Generate demand (in an infinite loop)
"""
while True:
# Sample time to next incident
time_out = random.expovariate(1 / self.incident_interval)
yield self.simpy_env.timeout(time_out)
# Generate patient
self.calls += 1
self.counter_patients += 1
patient = Patient(self.simpy_env, self.counter_patients,
self.number_incident_points, self.incident_points,
self.incident_range, self.max_size, self.number_epochs)
self.patients_waiting_for_assignment.append(patient)
if self.print_output:
print(f'Incident: {self.simpy_env.now:0.1f}')
def _get_observations(self, ambo):
"""
Return observations, including location of ambulance to assign dispatch
point
"""
# Get assigned dispatch points
obs = list(self.ambos_assigned_to_dispatch_points)
# Get x and y of ambo to be assigned
x = ambo.current_x / self.max_size
y = ambo.current_y / self.max_size
# Get time of day (0-1)
day = int(self.simpy_env.now / 1440)
time = self.simpy_env.now - (day * 1440)
time = time / 1440
obs.extend([x, y, time])
obs = np.array(obs)
return obs
def _set_dispatch_points(self):
"""
Set ambulance dispatch points using uniform random distribution
"""
# Get number of rows and cols to populate
rows_cols = int(self.number_dispatch_points ** 0.5)
add_random = self.number_dispatch_points - (rows_cols ** 2)
# Padding before any dispatch point is placed
pad = self.max_size / (rows_cols + 1) /2
points = np.linspace(pad, self.max_size - pad, rows_cols)
for x in points:
for y in points:
self.dispatch_points.append((x, y))
# Add any 'extra points' as random points
if add_random > 0:
random.seed(self.random_seed + 2)
for _ in range(self.add_random):
x = random.uniform(0, self.max_size)
y = random.uniform(0, self.max_size)
self.dispatch_points.append((x, y))
def | |
# Copyright 2015 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
from marathon.models import MarathonApp
import mock
from mock import patch
from pytest import raises
import marathon_tools
from utils import compose_job_id
from utils import DeploymentsJson
from utils import SystemPaastaConfig
class TestMarathonTools:
fake_marathon_app_config = marathon_tools.MarathonServiceConfig(
'servicename',
'instancename',
{
'instances': 3,
'cpus': 1,
'mem': 100,
'nerve_ns': 'fake_nerve_ns',
},
{
'docker_image': 'test_docker:1.0',
'desired_state': 'start',
'force_bounce': None,
}
)
fake_srv_config = {
'data': {},
'deploy': {},
'deployed_to': ['another-box'],
'lb_extras': {},
'monitoring': {},
'runs_on': ['some-box'],
'port': None,
'smartstack': {},
'vip': None,
}
fake_docker_registry = 'remote_registry.com'
fake_marathon_config = marathon_tools.MarathonConfig({
'cluster': 'test_cluster',
'url': 'http://test_url',
'user': 'admin',
'pass': '<PASSWORD>',
'docker_registry': fake_docker_registry,
'docker_volumes': [
{
'hostPath': '/var/data/a',
'containerPath': '/etc/a',
'mode': 'RO',
},
{
'hostPath': '/var/data/b',
'containerPath': '/etc/b',
'mode': 'RW',
},
],
}, '/some/fake/path/fake_file.json')
fake_service_namespace_config = marathon_tools.ServiceNamespaceConfig()
def test_load_marathon_service_config_happy_path(self):
fake_name = 'jazz'
fake_instance = 'solo'
fake_cluster = 'amnesia'
fake_dir = '/nail/home/sanfran'
with contextlib.nested(
mock.patch('marathon_tools.load_deployments_json', autospec=True),
mock.patch('service_configuration_lib.read_service_configuration', autospec=True),
mock.patch('service_configuration_lib.read_extra_service_information', autospec=True),
) as (
mock_load_deployments_json,
mock_read_service_configuration,
mock_read_extra_service_information,
):
mock_read_extra_service_information.return_value = {fake_instance: {}}
marathon_tools.load_marathon_service_config(
fake_name,
fake_instance,
fake_cluster,
soa_dir=fake_dir,
)
assert mock_read_service_configuration.call_count == 1
assert mock_read_extra_service_information.call_count == 1
mock_load_deployments_json.assert_called_once_with(fake_name, soa_dir=fake_dir)
def test_load_marathon_service_config_bails_with_no_config(self):
fake_name = 'jazz'
fake_instance = 'solo'
fake_cluster = 'amnesia'
fake_dir = '/nail/home/sanfran'
with contextlib.nested(
mock.patch('marathon_tools.load_deployments_json', autospec=True),
mock.patch('service_configuration_lib.read_service_configuration', autospec=True),
mock.patch('service_configuration_lib.read_extra_service_information', autospec=True),
) as (
mock_load_deployments_json,
mock_read_service_configuration,
mock_read_extra_service_information,
):
mock_read_extra_service_information.return_value = {}
with raises(marathon_tools.NoConfigurationForServiceError):
marathon_tools.load_marathon_service_config(
fake_name,
fake_instance,
fake_cluster,
soa_dir=fake_dir,
)
def test_read_service_config(self):
fake_name = 'jazz'
fake_instance = 'solo'
fake_cluster = 'amnesia'
fake_dir = '/nail/home/sanfran'
config_copy = self.fake_marathon_app_config.config_dict.copy()
expected = marathon_tools.MarathonServiceConfig(
fake_name,
fake_instance,
dict(
self.fake_srv_config.items() +
self.fake_marathon_app_config.config_dict.items()
),
{},
)
with contextlib.nested(
mock.patch(
'service_configuration_lib.read_service_configuration',
autospec=True,
return_value=self.fake_srv_config,
),
mock.patch(
'service_configuration_lib.read_extra_service_information',
autospec=True,
return_value={fake_instance: config_copy},
),
) as (
read_service_configuration_patch,
read_extra_info_patch,
):
actual = marathon_tools.load_marathon_service_config(
fake_name,
fake_instance,
fake_cluster,
load_deployments=False,
soa_dir=fake_dir,
)
assert expected.service == actual.service
assert expected.instance == actual.instance
assert expected.config_dict == actual.config_dict
assert expected.branch_dict == actual.branch_dict
assert read_service_configuration_patch.call_count == 1
read_service_configuration_patch.assert_any_call(fake_name, soa_dir=fake_dir)
assert read_extra_info_patch.call_count == 1
read_extra_info_patch.assert_any_call(fake_name, "marathon-amnesia", soa_dir=fake_dir)
def test_read_service_config_and_deployments(self):
fake_name = 'jazz'
fake_instance = 'solo'
fake_cluster = 'amnesia'
fake_dir = '/nail/home/sanfran'
fake_docker = 'no_docker:9.9'
config_copy = self.fake_marathon_app_config.config_dict.copy()
fake_branch_dict = {'desired_state': 'stop', 'force_bounce': '12345', 'docker_image': fake_docker},
deployments_json_mock = mock.Mock(
spec=DeploymentsJson,
get_branch_dict=mock.Mock(return_value=fake_branch_dict),
)
with contextlib.nested(
mock.patch(
'service_configuration_lib.read_service_configuration',
autospec=True,
return_value=self.fake_srv_config,
),
mock.patch(
'service_configuration_lib.read_extra_service_information',
autospec=True,
return_value={fake_instance: config_copy},
),
mock.patch(
'marathon_tools.load_deployments_json',
autospec=True,
return_value=deployments_json_mock,
),
) as (
read_service_configuration_patch,
read_extra_info_patch,
load_deployments_json_patch,
):
expected = marathon_tools.MarathonServiceConfig(
fake_name,
fake_instance,
dict(
self.fake_srv_config.items() +
self.fake_marathon_app_config.config_dict.items()
),
fake_branch_dict,
)
actual = marathon_tools.load_marathon_service_config(
fake_name,
fake_instance,
fake_cluster,
load_deployments=True,
soa_dir=fake_dir,
)
assert expected.service == actual.service
assert expected.instance == actual.instance
assert expected.config_dict == actual.config_dict
assert expected.branch_dict == actual.branch_dict
deployments_json_mock.get_branch_dict.assert_called_once_with(fake_name, 'paasta-amnesia.solo')
assert read_service_configuration_patch.call_count == 1
read_service_configuration_patch.assert_any_call(fake_name, soa_dir=fake_dir)
assert read_extra_info_patch.call_count == 1
read_extra_info_patch.assert_any_call(fake_name, "marathon-amnesia", soa_dir=fake_dir)
def test_load_marathon_config(self):
expected = {'foo': 'bar'}
file_mock = mock.MagicMock(spec=file)
with contextlib.nested(
mock.patch('marathon_tools.open', create=True, return_value=file_mock),
mock.patch('json.load', autospec=True, return_value=expected)
) as (
open_file_patch,
json_patch
):
assert marathon_tools.load_marathon_config() == expected
open_file_patch.assert_called_once_with('/etc/paasta/marathon.json')
json_patch.assert_called_once_with(file_mock.__enter__())
def test_load_marathon_config_path_dne(self):
fake_path = '/var/dir_of_fake'
with contextlib.nested(
mock.patch('marathon_tools.open', create=True, side_effect=IOError(2, 'a', 'b')),
) as (
open_patch,
):
with raises(marathon_tools.PaastaNotConfiguredError) as excinfo:
marathon_tools.load_marathon_config(fake_path)
assert str(excinfo.value) == "Could not load marathon config file b: a"
def test_get_all_namespaces_for_service(self):
name = 'vvvvvv'
soa_dir = '^_^'
t1_dict = {'hollo': 'werld', 'smark': 'stact'}
t2_dict = {'vataman': 'witir', 'sin': 'chaps'}
fake_smartstack = {
'smartstack': {'t1': t1_dict, 't2': t2_dict},
}
expected = [('vvvvvv.t2', t2_dict), ('vvvvvv.t1', t1_dict)]
expected_short = [('t2', t2_dict), ('t1', t1_dict)]
with mock.patch('service_configuration_lib.read_service_configuration', autospec=True,
return_value=fake_smartstack) as read_service_configuration_patch:
actual = marathon_tools.get_all_namespaces_for_service(name, soa_dir)
read_service_configuration_patch.assert_any_call(name, soa_dir)
assert sorted(expected) == sorted(actual)
actual_short = marathon_tools.get_all_namespaces_for_service(name, soa_dir, False)
read_service_configuration_patch.assert_any_call(name, soa_dir)
assert sorted(expected_short) == sorted(actual_short)
def test_get_all_namespaces(self):
soa_dir = 'carbon'
namespaces = [[('aluminum', {'hydrogen': 1}), ('potassium', {'helium': 2})],
[('uranium', {'lithium': 3}), ('gold', {'boron': 5})]]
expected = [('uranium', {'lithium': 3}), ('gold', {'boron': 5}),
('aluminum', {'hydrogen': 1}), ('potassium', {'helium': 2})]
with contextlib.nested(
mock.patch('os.path.abspath', autospec=True, return_value='oxygen'),
mock.patch('os.listdir', autospec=True, return_value=['rid1', 'rid2']),
mock.patch('marathon_tools.get_all_namespaces_for_service',
autospec=True,
side_effect=lambda a, b: namespaces.pop())
) as (
abspath_patch,
listdir_patch,
get_namespaces_patch,
):
actual = marathon_tools.get_all_namespaces(soa_dir)
assert expected == actual
abspath_patch.assert_called_once_with(soa_dir)
listdir_patch.assert_called_once_with('oxygen')
get_namespaces_patch.assert_any_call('rid1', soa_dir)
get_namespaces_patch.assert_any_call('rid2', soa_dir)
assert get_namespaces_patch.call_count == 2
def test_get_proxy_port_for_instance(self):
name = 'thats_no_moon'
instance = 'thats_a_space_station'
cluster = 'shot_line'
soa_dir = 'drink_up'
namespace = 'thirsty_mock'
fake_port = 1234567890
fake_nerve = marathon_tools.ServiceNamespaceConfig({'proxy_port': fake_port})
with contextlib.nested(
mock.patch('marathon_tools.read_namespace_for_service_instance',
autospec=True, return_value=namespace),
mock.patch('marathon_tools.load_service_namespace_config',
autospec=True, return_value=fake_nerve)
) as (
read_ns_patch,
read_config_patch
):
actual = marathon_tools.get_proxy_port_for_instance(name, instance, cluster, soa_dir)
assert fake_port == actual
read_ns_patch.assert_called_once_with(name, instance, cluster, soa_dir)
read_config_patch.assert_called_once_with(name, namespace, soa_dir)
def test_get_proxy_port_for_instance_defaults_to_none(self):
name = 'thats_no_moon'
instance = 'thats_a_space_station'
cluster = 'shot_line'
soa_dir = 'drink_up'
namespace = 'thirsty_mock'
expected = None
with contextlib.nested(
mock.patch('marathon_tools.read_namespace_for_service_instance',
autospec=True, return_value=namespace),
mock.patch('marathon_tools.load_service_namespace_config',
autospec=True, return_value={})
) as (
read_ns_patch,
read_config_patch
):
actual = marathon_tools.get_proxy_port_for_instance(name, instance, cluster, soa_dir)
assert expected == actual
read_ns_patch.assert_called_once_with(name, instance, cluster, soa_dir)
read_config_patch.assert_called_once_with(name, namespace, soa_dir)
def test_read_service_namespace_config_exists(self):
name = 'eman'
namespace = 'ecapseman'
soa_dir = 'rid_aos'
mode = 'http'
fake_uri = 'energy'
fake_timeout = -10103
fake_port = 777
fake_retries = 9001
fake_discover = 'myhabitat'
fake_advertise = ['red', 'blue']
fake_info = {
'healthcheck_uri': fake_uri,
'healthcheck_timeout_s': fake_timeout,
'proxy_port': fake_port,
'timeout_connect_ms': 192,
'timeout_server_ms': 291,
'timeout_client_ms': 912,
'updown_timeout_s': 293,
'retries': fake_retries,
'mode': mode,
'routes': [
{
'source': 'oregon',
'destinations': ['indiana']
},
{
'source': 'florida', 'destinations': ['miami', 'beach']
},
],
'discover': fake_discover,
'advertise': fake_advertise,
'extra_advertise': {
'alpha': ['beta'],
'gamma': ['delta', 'epsilon'],
},
'extra_healthcheck_headers': {
'Host': 'example.com'
},
}
fake_config = {
'smartstack': {
namespace: fake_info,
},
}
expected = {
'healthcheck_uri': fake_uri,
'healthcheck_timeout_s': fake_timeout,
'proxy_port': fake_port,
'timeout_connect_ms': 192,
'timeout_server_ms': 291,
'timeout_client_ms': 912,
'updown_timeout_s': 293,
'retries': fake_retries,
'mode': mode,
'routes': [
('oregon', 'indiana'), ('florida', 'miami'), ('florida', 'beach')
],
'discover': fake_discover,
'advertise': fake_advertise,
'extra_advertise': [
('alpha', 'beta'), ('gamma', 'delta'), ('gamma', 'epsilon')
],
'extra_healthcheck_headers': {
'Host': 'example.com'
},
}
with mock.patch('service_configuration_lib.read_service_configuration',
autospec=True,
return_value=fake_config) as read_service_configuration_patch:
actual = marathon_tools.load_service_namespace_config(name, namespace, soa_dir)
read_service_configuration_patch.assert_called_once_with(name, soa_dir)
assert sorted(actual) == sorted(expected)
def test_read_service_namespace_config_no_mode_with_no_smartstack(self):
name = 'eman'
namespace = 'ecapseman'
soa_dir = 'rid_aos'
fake_config = {}
with mock.patch('service_configuration_lib.read_service_configuration',
autospec=True,
return_value=fake_config) as read_service_configuration_patch:
actual = marathon_tools.load_service_namespace_config(name, namespace, soa_dir)
read_service_configuration_patch.assert_called_once_with(name, soa_dir)
assert actual.get('mode') is None
def test_read_service_namespace_config_no_mode_with_smartstack(self):
name = 'eman'
namespace = 'ecapseman'
soa_dir = 'rid_aos'
fake_config = {
'smartstack': {
namespace: {'proxy_port': 9001},
},
}
with mock.patch('service_configuration_lib.read_service_configuration',
autospec=True,
return_value=fake_config) as read_service_configuration_patch:
actual = marathon_tools.load_service_namespace_config(name, namespace, soa_dir)
read_service_configuration_patch.assert_called_once_with(name, soa_dir)
assert actual.get('mode') == 'http'
def test_read_service_namespace_config_no_file(self):
name = 'a_man'
namespace = 'a_boat'
soa_dir = 'an_adventure'
with mock.patch('service_configuration_lib.read_service_configuration',
side_effect=Exception) as read_service_configuration_patch:
with raises(Exception):
marathon_tools.load_service_namespace_config(name, namespace, soa_dir)
read_service_configuration_patch.assert_called_once_with(name, soa_dir)
@mock.patch('service_configuration_lib.read_extra_service_information', autospec=True)
def test_read_namespace_for_service_instance_has_value(self, read_info_patch):
name = 'dont_worry'
instance = 'im_a_professional'
cluster = 'andromeda'
namespace = 'spacename'
soa_dir = 'dirdirdir'
read_info_patch.return_value = {instance: {'nerve_ns': namespace}}
actual = marathon_tools.read_namespace_for_service_instance(name, instance, cluster, soa_dir)
assert actual == namespace
read_info_patch.assert_called_once_with(name, 'marathon-%s' % cluster, soa_dir)
@mock.patch('service_configuration_lib.read_extra_service_information', autospec=True)
def test_read_namespace_for_service_instance_no_value(self, read_info_patch):
name = 'wall_light'
instance = 'ceiling_light'
cluster = 'no_light'
soa_dir = 'warehouse_light'
read_info_patch.return_value = {instance: {'aaaaaaaa': ['bbbbbbbb']}}
actual = marathon_tools.read_namespace_for_service_instance(name, instance, cluster, soa_dir)
assert actual == instance
read_info_patch.assert_called_once_with(name, 'marathon-%s' % cluster, soa_dir)
@mock.patch('marathon_tools.get_local_slave_state', autospec=True)
def test_marathon_services_running_here(self, mock_get_local_slave_state):
id_1 = 'klingon.ships.detected.249qwiomelht4jioewglkemr'
id_2 = 'fire.photon.torpedos.jtgriemot5yhtwe94'
id_3 = 'dota.axe.cleave.482u9jyoi4wed'
id_4 = 'mesos.deployment.is.hard'
id_5 = 'how.to.fake.data'
ports_1 = '[111-111]'
ports_2 = '[222-222]'
ports_3 = '[333-333]'
ports_4 = '[444-444]'
ports_5 = '[555-555]'
mock_get_local_slave_state.return_value = {
'frameworks': [
{
'executors': [
{'id': id_1, 'resources': {'ports': ports_1},
'tasks': [{u'state': u'TASK_RUNNING'}]},
{'id': id_2, 'resources': {'ports': ports_2}, 'tasks': [{u'state': u'TASK_RUNNING'}]}
],
'name': 'marathon-1111111'
},
{
'executors': [
{'id': id_3, 'resources': {'ports': ports_3}, 'tasks': [{u'state': u'TASK_RUNNING'}]},
{'id': id_4, 'resources': {'ports': ports_4}, 'tasks': [{u'state': u'TASK_RUNNING'}]},
],
'name': 'marathon-3145jgreoifd'
},
{
'executors': [
{'id': id_5, 'resources': {'ports': ports_5}, 'tasks': [{u'state': u'TASK_STAGED'}]},
],
'name': 'marathon-754rchoeurcho'
},
{
'executors': [
{'id': 'bunk', 'resources': {'ports': '[65-65]'}, 'tasks': [{u'state': u'TASK_RUNNING'}]},
],
'name': 'super_bunk'
}
]
}
expected = [('klingon', 'ships', 111),
('fire', 'photon', 222),
('dota', 'axe', 333),
('mesos', 'deployment', 444)]
actual = marathon_tools.marathon_services_running_here()
mock_get_local_slave_state.assert_called_once_with()
assert expected == actual
def test_get_marathon_services_running_here_for_nerve(self):
cluster = 'edelweiss'
soa_dir = 'the_sound_of_music'
fake_marathon_services = [('no_test', 'left_behind', 1111),
('no_docstrings', 'forever_abandoned', 2222)]
namespaces = ['dos', 'uno']
nerve_dicts = [marathon_tools.ServiceNamespaceConfig({'binary': | |
<reponame>KarrLab/schema
""" Chemistry attributes
:Author: <NAME> <<EMAIL>>
:Date: 2017-05-10
:Copyright: 2017, Karr Lab
:License: MIT
"""
from .. import core
from wc_utils.util import chem
from wc_utils.util.enumerate import CaseInsensitiveEnum
import wc_utils.workbook.io
import bcforms
import bpforms
import bpforms.util
import lark
import math
import openbabel
import os.path
import pkg_resources
__all__ = [
'ChemicalFormulaAttribute',
'ChemicalStructure',
'ChemicalStructureFormat',
'ChemicalStructureAttribute',
'ReactionEquation',
'ReactionParticipant',
'ReactionEquationAttribute',
]
class ChemicalFormulaAttribute(core.LiteralAttribute):
""" Chemical formula attribute """
def __init__(self, default=None, none_value=None, verbose_name='', description="A chemical formula (e.g. 'H2O', 'CO2', or 'NaCl')",
primary=False, unique=False):
"""
Args:
default (:obj:`chem.EmpiricalFormula`, :obj:`dict`, :obj:`str`, or :obj:`None`, optional): default value
none_value (:obj:`object`, optional): none value
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
"""
if not isinstance(default, chem.EmpiricalFormula) and default is not None:
default = chem.EmpiricalFormula(default)
super(ChemicalFormulaAttribute, self).__init__(default=default, none_value=none_value,
verbose_name=verbose_name,
description=description,
primary=primary, unique=unique)
if primary:
self.type = chem.EmpiricalFormula
else:
self.type = (chem.EmpiricalFormula, None.__class__)
def deserialize(self, value):
""" Deserialize value
Args:
value (:obj:`str`): semantically equivalent representation
Returns:
:obj:`tuple`:
* :obj:`chem.EmpiricalFormula`: cleaned value
* :obj:`core.InvalidAttribute`: cleaning error
"""
if value:
try:
return (chem.EmpiricalFormula(value), None)
except ValueError as error:
return (None, core.InvalidAttribute(self, [str(error)]))
return (None, None)
def validate(self, obj, value):
""" Determine if :obj:`value` is a valid value
Args:
obj (:obj:`Model`): class being validated
value (:obj:`chem.EmpiricalFormula`): value of attribute to validate
Returns:
:obj:`core.InvalidAttribute` or None: None if attribute is valid, other return
list of errors as an instance of :obj:`core.InvalidAttribute`
"""
errors = []
if value is not None and not isinstance(value, chem.EmpiricalFormula):
errors.append('Value must be an instance of `chem.EmpiricalFormula`')
if self.primary and (not value or len(value) == 0):
errors.append('{} value for primary attribute cannot be empty'.format(
self.__class__.__name__))
if errors:
return core.InvalidAttribute(self, errors)
return None
def validate_unique(self, objects, values):
""" Determine if the attribute values are unique
Args:
objects (:obj:`list` of :obj:`Model`): list of :obj:`Model` objects
values (:obj:`list` of :obj:`chem.EmpiricalFormula`): list of values
Returns:
:obj:`core.InvalidAttribute` or None: None if values are unique, otherwise return a
list of errors as an instance of :obj:`core.InvalidAttribute`
"""
str_values = []
for v in values:
str_values.append(self.serialize(v))
return super(ChemicalFormulaAttribute, self).validate_unique(objects, str_values)
def serialize(self, value):
""" Serialize string
Args:
value (:obj:`chem.EmpiricalFormula`): Python representation
Returns:
:obj:`str`: simple Python representation
"""
if value is None:
return ''
return str(value)
def to_builtin(self, value):
""" Encode a value of the attribute using a simple Python representation (dict, list, str, float, bool, None)
that is compatible with JSON and YAML
Args:
value (:obj:`chem.EmpiricalFormula`): value of the attribute
Returns:
:obj:`dict`: simple Python representation of a value of the attribute
"""
if value:
return dict(value)
return None
def from_builtin(self, json):
""" Decode a simple Python representation (dict, list, str, float, bool, None) of a value of the attribute
that is compatible with JSON and YAML
Args:
json (:obj:`dict`): simple Python representation of a value of the attribute
Returns:
:obj:`chem.EmpiricalFormula`: decoded value of the attribute
"""
if json:
return chem.EmpiricalFormula(json)
return None
def get_xlsx_validation(self, sheet_models=None, doc_metadata_model=None):
""" Get XLSX validation
Args:
sheet_models (:obj:`list` of :obj:`Model`, optional): models encoded as separate sheets
doc_metadata_model (:obj:`type`): model whose worksheet contains the document metadata
Returns:
:obj:`wc_utils.workbook.io.FieldValidation`: validation
"""
validation = super(ChemicalFormulaAttribute, self).get_xlsx_validation(sheet_models=sheet_models,
doc_metadata_model=doc_metadata_model)
validation.type = wc_utils.workbook.io.FieldValidationType.any
input_message = ['Enter an chemical formula (e.g. "H2O").']
error_message = ['Value must be an chemical formula (e.g. "H2O").']
if self.unique:
input_message.append('Value must be unique.')
error_message.append('Value must be unique.')
if validation.input_message:
validation.input_message += '\n\n'
if input_message:
if not validation.input_message:
validation.input_message = ""
validation.input_message += '\n\n'.join(input_message)
if validation.error_message:
validation.error_message += '\n\n'
if error_message:
if not validation.error_message:
validation.error_message = ""
validation.error_message += '\n\n'.join(error_message)
return validation
class ChemicalStructureFormat(int, CaseInsensitiveEnum):
""" Format of a chemical structure """
inchi = 1
smiles = 2
bpforms = 3
bcforms = 4
class ChemicalStructure(object):
""" A chemical structure
Attributes
value (:obj:`openbabel.OBMol`, :obj:`bpforms.BpForm`, :obj:`bcforms.BcForm`): value
serialized_value (:obj:`str`): serialized value
serialized_format (:obj:`ChemicalStructureFormat`): serialized format
_value (:obj:`openbabel.OBMol`, :obj:`bpforms.BpForm`, :obj:`bcforms.BcForm`): value
_serialized_value (:obj:`str`): serialized value
_serialized_format (:obj:`ChemicalStructureFormat`): serialized format
"""
def __init__(self, value=None, serialized_format=None):
"""
Args:
value (:obj:`str`, :obj:`openbabel.OBMol`, :obj:`bpforms.BpForm`, :obj:`bcforms.BcForm`, optional): value
serialized_format (:obj:`ChemicalStructureFormat`, openbabel): serialized format
"""
self._value = None
self._serialized_format = None
self._serialized_value = None
self.value = value
self.serialized_format = serialized_format or self.serialized_format
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if value is None:
self._value = None
self._serialized_value = None
self._serialized_format = None
elif isinstance(value, str):
self.deserialize(value)
elif isinstance(value, openbabel.OBMol):
self._value = value
self._serialized_value = None
if self.serialized_format not in [ChemicalStructureFormat.inchi, ChemicalStructureFormat.smiles]:
self._serialized_format = ChemicalStructureFormat.smiles
elif isinstance(value, bpforms.BpForm):
if value.alphabet not in bpforms.util.get_alphabets().values():
raise ValueError('BpForms must use one of the defined alphabets')
self._value = value
self._serialized_value = None
self._serialized_format = ChemicalStructureFormat.bpforms
elif isinstance(value, bcforms.BcForm):
self._value = value
self._serialized_value = None
self._serialized_format = ChemicalStructureFormat.bcforms
else:
raise ValueError('Unable to set `value` to an instance of {}'.format(
value.__class__.__name__))
@property
def serialized_format(self):
return self._serialized_format
@serialized_format.setter
def serialized_format(self, value):
if value in [ChemicalStructureFormat.inchi, ChemicalStructureFormat.smiles]:
if self._serialized_format in [ChemicalStructureFormat.inchi, ChemicalStructureFormat.smiles]:
if value != self._serialized_format:
self._serialized_format = value
self._serialized_value = None
else:
raise ValueError('`serialized_format` must be consistent with `value`')
else:
if value != self._serialized_format:
raise ValueError('`serialized_format` must be consistent with `value`')
@property
def serialized_value(self):
return self._serialized_value
def to_dict(self):
""" Get a dictionary representation
Returns:
:obj:`dict`: dictionary representation
"""
serialized_value = self.serialized_value
if serialized_value is None and self.value is not None:
if isinstance(self.value, openbabel.OBMol):
conversion = openbabel.OBConversion()
assert conversion.SetOutFormat(self.serialized_format.name)
conversion.SetOptions('c', conversion.OUTOPTIONS)
serialized_value = conversion.WriteString(self.value, True)
else:
serialized_value = str(self.value)
self._serialized_value = serialized_value
if self.serialized_format:
serialized_format = self.serialized_format.name
if self.serialized_format == ChemicalStructureFormat.bpforms:
serialized_format += '/' + self.value.alphabet.id
else:
serialized_format = None
return {
"format": serialized_format,
"value": serialized_value
}
def from_dict(self, dict_value):
""" Set value from a dictionary representation
Args:
dict_value (:obj:`dict`): dictionary representation
Returns:
:obj:`ChemicalStructure`: self
"""
format = dict_value.get('format', None)
if format:
serialized_format, _, serialized_alphabet = format.partition('/')
self._serialized_format = ChemicalStructureFormat[serialized_format.strip()]
serialized_alphabet = serialized_alphabet.strip().lower()
else:
self._serialized_format = None
value = dict_value.get('value', None)
if self.serialized_format in [ChemicalStructureFormat.inchi, ChemicalStructureFormat.smiles]:
self._value = openbabel.OBMol()
conversion = openbabel.OBConversion()
assert conversion.SetInFormat(self.serialized_format.name)
conversion.ReadString(self.value, value or '')
elif self.serialized_format == ChemicalStructureFormat.bpforms:
alphabet = bpforms.util.get_alphabet(serialized_alphabet)
self._value = bpforms.BpForm(alphabet=alphabet).from_str(value or '')
elif self.serialized_format == ChemicalStructureFormat.bcforms:
self._value = bcforms.BcForm().from_str(value or '')
elif self.serialized_format is None:
if value:
raise ValueError('`format` key must be defined')
else:
value = None
self._serialized_value = value
return self
def serialize(self):
""" Generate a string representation
Returns:
:obj:`str`: string representation
"""
dict_value = self.to_dict()
return '{}: {}'.format(dict_value['format'], dict_value['value'])
def deserialize(self, serialized_value):
""" Set value from a string representation
Args:
serialized_value (:obj:`str`): string representation
Returns:
:obj:`ChemicalStructure`: self
"""
if serialized_value:
serialized_format, _, serialized_value = serialized_value.partition(':')
serialized_format = serialized_format.strip()
serialized_value = serialized_value.strip()
else:
serialized_format = None
serialized_value = None
self.from_dict({
'format': serialized_format,
'value': serialized_value,
})
return self
class ChemicalStructureAttribute(core.LiteralAttribute):
""" Attribute for the structures of chemical compounds """
def __init__(self, verbose_name='',
description=("The InChI, SMILES-, BpForms, BcForms-encoded structure of a compound."
"\n"
"\nExamples:"
"\n Small molecules (SMILES): C([N+])C([O-])=O"
"\n DNA (BpForms/dna): A{m2C}GT"
"\n RNA (BpForms/rna): AC{02G}U"
"\n Protein (BpForms/protein): RNC{AA0037}E"
"\n Complex (BcForms): 2 * subunit-A + subunit-B"),
primary=False, unique=False):
"""
Args:
verbose_name (:obj:`str`, optional): verbose name
description (:obj:`str`, optional): description
primary (:obj:`bool`, optional): indicate if attribute is primary attribute
unique (:obj:`bool`, optional): indicate if attribute value must be unique
"""
super(ChemicalStructureAttribute, self).__init__(default=None, none_value=None,
verbose_name=verbose_name,
description=description,
primary=primary, unique=unique)
if primary:
self.type = ChemicalStructure
else:
self.type = (ChemicalStructure, None.__class__)
def deserialize(self, value):
""" Deserialize value
Args:
value (:obj:`str`): string representation of structure
Returns:
:obj:`tuple`:
* :obj:`str`: cleaned value
* :obj:`core.InvalidAttribute`: cleaning error
"""
if value:
if isinstance(value, str):
try:
return (ChemicalStructure().deserialize(value), None)
except Exception as error:
return (None, core.InvalidAttribute(self, [str(error)]))
else:
return (None, core.InvalidAttribute(self, ['Value must be a string']))
return (None, None)
def validate(self, obj, value):
""" Determine if :obj:`value` is a valid value
Args:
obj (:obj:`Model`): class being validated
value (:obj:`ChemicalStructure`): value of attribute to validate
Returns:
:obj:`core.InvalidAttribute` or None: None if attribute is valid, other return list of
errors as an instance of :obj:`core.InvalidAttribute`
"""
errors = []
if value is not None and not isinstance(value, ChemicalStructure):
errors.append('Value must be an instance of `ChemicalStructure` or `None`')
if self.primary and value is None:
errors.append('{} value for primary attribute cannot be `None`'.format(
self.__class__.__name__))
if errors:
return core.InvalidAttribute(self, errors)
return None
def validate_unique(self, objects, values):
""" Determine if | |
8287 5.35613932168486974801385239922774E-4990 2.67806966084243487400692619961387E-4990
8288 1.339034830421217437003463099806935E-4990 6.695174152106087185017315499034675E-4991
8289 3.347587076053043592508657749517338E-4991 1.673793538026521796254328874758669E-4991
8290 8.368967690132608981271644373793345E-4992 4.184483845066304490635822186896673E-4992
8291 2.092241922533152245317911093448337E-4992 1.046120961266576122658955546724168E-4992
8292 5.23060480633288061329477773362084E-4993 2.61530240316644030664738886681042E-4993
8293 1.30765120158322015332369443340521E-4993 6.53825600791610076661847216702605E-4994
8294 3.269128003958050383309236083513025E-4994 1.634564001979025191654618041756513E-4994
8295 8.172820009895125958273090208782565E-4995 4.086410004947562979136545104391283E-4995
8296 2.043205002473781489568272552195642E-4995 1.021602501236890744784136276097821E-4995
8297 5.108012506184453723920681380489105E-4996 2.554006253092226861960340690244553E-4996
8298 1.277003126546113430980170345122277E-4996 6.385015632730567154900851725611383E-4997
8299 3.192507816365283577450425862805692E-4997 1.596253908182641788725212931402846E-4997
8300 7.98126954091320894362606465701423E-4998 3.990634770456604471813032328507115E-4998
8301 1.995317385228302235906516164253558E-4998 9.976586926141511179532580821267788E-4999
8302 4.988293463070755589766290410633894E-4999 2.494146731535377794883145205316947E-4999
8303 1.247073365767688897441572602658474E-4999 6.235366828838444487207863013292368E-5000
8304 3.117683414419222243603931506646184E-5000 1.558841707209611121801965753323092E-5000
8305 7.79420853604805560900982876661546E-5001 3.89710426802402780450491438330773E-5001
8306 1.948552134012013902252457191653865E-5001 9.742760670060069511262285958269325E-5002
8307 4.871380335030034755631142979134663E-5002 2.435690167515017377815571489567331E-5002
8308 1.217845083757508688907785744783666E-5002 6.089225418787543444538928723918328E-5003
8309 3.044612709393771722269464361959164E-5003 1.522306354696885861134732180979582E-5003
8310 7.61153177348442930567366090489791E-5004 3.805765886742214652836830452448955E-5004
8311 1.902882943371107326418415226224478E-5004 9.514414716855536632092076131122388E-5005
8312 4.757207358427768316046038065561194E-5005 2.378603679213884158023019032780597E-5005
8313 1.189301839606942079011509516390299E-5005 5.946509198034710395057547581951493E-5006
8314 2.973254599017355197528773790975747E-5006 1.486627299508677598764386895487873E-5006
8315 7.433136497543387993821934477439365E-5007 3.716568248771693996910967238719683E-5007
8316 1.858284124385846998455483619359842E-5007 9.291420621929234992277418096799208E-5008
8317 4.645710310964617496138709048399604E-5008 2.322855155482308748069354524199802E-5008
8318 1.161427577741154374034677262099901E-5008 5.807137888705771870173386310499505E-5009
8319 2.903568944352885935086693155249753E-5009 1.451784472176442967543346577624876E-5009
8320 7.25892236088221483771673288812438E-5010 3.62946118044110741885836644406219E-5010
8321 1.814730590220553709429183222031095E-5010 9.073652951102768547145916110155475E-5011
8322 4.536826475551384273572958055077738E-5011 2.268413237775692136786479027538869E-5011
8323 1.134206618887846068393239513769435E-5011 5.671033094439230341966197568847173E-5012
8324 2.835516547219615170983098784423587E-5012 1.417758273609807585491549392211793E-5012
8325 7.088791368049037927457746961058965E-5013 3.544395684024518963728873480529483E-5013
8326 1.772197842012259481864436740264742E-5013 8.860989210061297409322183701323708E-5014
8327 4.430494605030648704661091850661854E-5014 2.215247302515324352330545925330927E-5014
8328 1.107623651257662176165272962665464E-5014 5.538118256288310880826364813327318E-5015
8329 2.769059128144155440413182406663659E-5015 1.384529564072077720206591203331830E-5015
8330 6.92264782036038860103295601665915E-5016 3.461323910180194300516478008329575E-5016
8331 1.730661955090097150258239004164788E-5016 8.653309775450485751291195020823938E-5017
8332 4.326654887725242875645597510411969E-5017 2.163327443862621437822798755205985E-5017
8333 1.081663721931310718911399377602993E-5017 5.408318609656553594556996888014963E-5018
8334 2.704159304828276797278498444007482E-5018 1.352079652414138398639249222003741E-5018
8335 6.760398262070691993196246110018705E-5019 3.380199131035345996598123055009353E-5019
8336 1.690099565517672998299061527504677E-5019 8.450497827588364991495307637523383E-5020
8337 4.225248913794182495747653818761692E-5020 2.112624456897091247873826909380846E-5020
8338 1.056312228448545623936913454690423E-5020 5.281561142242728119684567273452115E-5021
8339 2.640780571121364059842283636726058E-5021 1.320390285560682029921141818363029E-5021
8340 6.601951427803410149605709091815145E-5022 3.300975713901705074802854545907573E-5022
8341 1.650487856950852537401427272953787E-5022 8.252439284754262687007136364768933E-5023
8342 4.126219642377131343503568182384467E-5023 2.063109821188565671751784091192233E-5023
8343 1.031554910594282835875892045596117E-5023 5.157774552971414179379460227980583E-5024
8344 2.578887276485707089689730113990292E-5024 1.289443638242853544844865056995146E-5024
8345 6.44721819121426772422432528497573E-5025 3.223609095607133862112162642487865E-5025
8346 1.611804547803566931056081321243933E-5025 8.059022739017834655280406606219663E-5026
8347 4.029511369508917327640203303109832E-5026 2.014755684754458663820101651554916E-5026
8348 1.007377842377229331910050825777458E-5026 5.03688921188614665955025412888729E-5027
8349 2.518444605943073329775127064443645E-5027 1.259222302971536664887563532221823E-5027
8350 6.296111514857683324437817661109115E-5028 3.148055757428841662218908830554558E-5028
8351 1.574027878714420831109454415277279E-5028 7.870139393572104155547272076386395E-5029
8352 3.935069696786052077773636038193198E-5029 1.967534848393026038886818019096599E-5029
8353 9.837674241965130194434090095482995E-5030 4.918837120982565097217045047741498E-5030
8354 2.459418560491282548608522523870749E-5030 1.229709280245641274304261261935375E-5030
8355 6.148546401228206371521306309676875E-5031 3.074273200614103185760653154838438E-5031
8356 1.537136600307051592880326577419219E-5031 7.685683001535257964401632887096095E-5032
8357 3.842841500767628982200816443548048E-5032 1.921420750383814491100408221774024E-5032
8358 9.60710375191907245550204110887012E-5033 4.80355187595953622775102055443506E-5033
8359 2.40177593797976811387551027721753E-5033 1.200887968989884056937755138608765E-5033
8360 6.004439844949420284688775693043825E-5034 3.002219922474710142344387846521913E-5034
8361 1.501109961237355071172193923260957E-5034 7.505549806186775355860969616304783E-5035
8362 3.752774903093387677930484808152392E-5035 1.876387451546693838965242404076196E-5035
8363 9.38193725773346919482621202038098E-5036 4.69096862886673459741310601019049E-5036
8364 2.345484314433367298706553005095245E-5036 1.172742157216683649353276502547623E-5036
8365 5.863710786083418246766382512738115E-5037 2.931855393041709123383191256369058E-5037
8366 1.465927696520854561691595628184529E-5037 7.329638482604272808457978140922645E-5038
8367 3.664819241302136404228989070461323E-5038 1.832409620651068202114494535230661E-5038
8368 9.162048103255341010572472676153305E-5039 4.581024051627670505286236338076653E-5039
8369 2.290512025813835252643118169038327E-5039 1.145256012906917626321559084519163E-5039
8370 5.726280064534588131607795422595815E-5040 2.863140032267294065803897711297908E-5040
8371 1.431570016133647032901948855648954E-5040 7.15785008066823516450974427824477E-5041
8372 3.578925040334117582254872139122385E-5041 1.789462520167058791127436069561193E-5041
8373 8.947312600835293955637180347805965E-5042 4.473656300417646977818590173902983E-5042
8374 2.236828150208823488909295086951492E-5042 1.118414075104411744454647543475746E-5042
8375 5.59207037552205872227323771737873E-5043 2.796035187761029361136618858689365E-5043
8376 1.398017593880514680568309429344683E-5043 6.990087969402573402841547146723413E-5044
8377 3.495043984701286701420773573361707E-5044 1.747521992350643350710386786680853E-5044
8378 8.737609961753216753551933933404265E-5045 4.368804980876608376775966966702133E-5045
8379 2.184402490438304188387983483351067E-5045 1.092201245219152094193991741675533E-5045
8380 5.461006226095760470969958708377665E-5046 2.730503113047880235484979354188833E-5046
8381 1.365251556523940117742489677094417E-5046 6.826257782619700588712448385472083E-5047
8382 3.413128891309850294356224192736042E-5047 1.706564445654925147178112096368021E-5047
8383 8.532822228274625735890560481840105E-5048 4.266411114137312867945280240920053E-5048
8384 2.133205557068656433972640120460027E-5048 1.066602778534328216986320060230013E-5048
8385 5.333013892671641084931600301150065E-5049 2.666506946335820542465800150575033E-5049
8386 1.333253473167910271232900075287517E-5049 6.666267365839551356164500376437583E-5050
8387 3.333133682919775678082250188218792E-5050 1.666566841459887839041125094109396E-5050
8388 8.33283420729943919520562547054698E-5051 4.16641710364971959760281273527349E-5051
8389 2.083208551824859798801406367636745E-5051 1.041604275912429899400703183818373E-5051
8390 5.208021379562149497003515919091865E-5052 2.604010689781074748501757959545933E-5052
8391 1.302005344890537374250878979772967E-5052 6.510026724452686871254394898864833E-5053
8392 3.255013362226343435627197449432417E-5053 1.627506681113171717813598724716208E-5053
8393 8.13753340556585858906799362358104E-5054 4.06876670278292929453399681179052E-5054
8394 2.03438335139146464726699840589526E-5054 1.01719167569573232363349920294763E-5054
8395 5.08595837847866161816749601473815E-5055 2.542979189239330809083748007369075E-5055
8396 1.271489594619665404541874003684538E-5055 6.357447973098327022709370018422688E-5056
8397 3.178723986549163511354685009211344E-5056 1.589361993274581755677342504605672E-5056
8398 7.94680996637290877838671252302836E-5057 3.97340498318645438919335626151418E-5057
8399 1.98670249159322719459667813075709E-5057 9.93351245796613597298339065378545E-5058
8400 4.966756228983067986491695326892725E-5058 2.483378114491533993245847663446363E-5058
8401 1.241689057245766996622923831723182E-5058 6.208445286228834983114619158615908E-5059
8402 3.104222643114417491557309579307954E-5059 1.552111321557208745778654789653977E-5059
8403 7.760556607786043728893273948269885E-5060 3.880278303893021864446636974134943E-5060
8404 1.940139151946510932223318487067472E-5060 9.700695759732554661116592435337358E-5061
8405 4.850347879866277330558296217668679E-5061 2.425173939933138665279148108834340E-5061
8406 1.212586969966569332639574054417170E-5061 6.06293484983284666319787027208585E-5062
8407 3.031467424916423331598935136042925E-5062 1.515733712458211665799467568021463E-5062
8408 7.578668562291058328997337840107315E-5063 3.789334281145529164498668920053658E-5063
8409 1.894667140572764582249334460026829E-5063 9.473335702863822911246672300134145E-5064
8410 4.736667851431911455623336150067073E-5064 2.368333925715955727811668075033536E-5064
8411 1.184166962857977863905834037516768E-5064 5.92083481428988931952917018758384E-5065
8412 2.96041740714494465976458509379192E-5065 1.48020870357247232988229254689596E-5065
8413 7.4010435178623616494114627344798E-5066 3.7005217589311808247057313672399E-5066
8414 1.85026087946559041235286568361995E-5066 9.25130439732795206176432841809975E-5067
8415 4.625652198663976030882164209049875E-5067 2.312826099331988015441082104524938E-5067
8416 1.156413049665994007720541052262469E-5067 5.782065248329970038602705261312345E-5068
8417 2.891032624164985019301352630656173E-5068 1.445516312082492509650676315328086E-5068
8418 7.22758156041246254825338157664043E-5069 3.613790780206231274126690788320215E-5069
8419 1.806895390103115637063345394160108E-5069 9.034476950515578185316726970800538E-5070
8420 4.517238475257789092658363485400269E-5070 2.258619237628894546329181742700135E-5070
8421 1.129309618814447273164590871350068E-5070 5.646548094072236365822954356750338E-5071
8422 2.823274047036118182911477178375169E-5071 1.411637023518059091455738589187585E-5071
8423 7.058185117590295457278692945937925E-5072 3.529092558795147728639346472968963E-5072
8424 1.764546279397573864319673236484482E-5072 8.822731396987869321598366182422408E-5073
8425 4.411365698493934660799183091211204E-5073 2.205682849246967330399591545605602E-5073
8426 1.102841424623483665199795772802801E-5073 5.514207123117418325998978864014005E-5074
8427 2.757103561558709162999489432007003E-5074 1.378551780779354581499744716003501E-5074
8428 6.892758903896772907498723580017505E-5075 3.446379451948386453749361790008753E-5075
8429 1.723189725974193226874680895004377E-5075 8.615948629870966134373404475021883E-5076
8430 4.307974314935483067186702237510942E-5076 2.153987157467741533593351118755471E-5076
8431 1.076993578733870766796675559377736E-5076 5.384967893669353833983377796888678E-5077
8432 2.692483946834676916991688898444339E-5077 1.346241973417338458495844449222170E-5077
8433 6.73120986708669229247922224611085E-5078 3.365604933543346146239611123055425E-5078
8434 1.682802466771673073119805561527713E-5078 8.414012333858365365599027807638563E-5079
8435 4.207006166929182682799513903819282E-5079 2.103503083464591341399756951909641E-5079
8436 1.051751541732295670699878475954821E-5079 5.258757708661478353499392379774103E-5080
8437 2.629378854330739176749696189887052E-5080 1.314689427165369588374848094943526E-5080
8438 6.57344713582684794187424047471763E-5081 3.286723567913423970937120237358815E-5081
8439 1.643361783956711985468560118679408E-5081 8.216808919783559927342800593397038E-5082
8440 4.108404459891779963671400296698519E-5082 2.054202229945889981835700148349260E-5082
8441 1.027101114972944990917850074174630E-5082 5.13550557486472495458925037087315E-5083
8442 2.567752787432362477294625185436575E-5083 1.283876393716181238647312592718288E-5083
8443 6.41938196858090619323656296359144E-5084 3.20969098429045309661828148179572E-5084
8444 1.60484549214522654830914074089786E-5084 8.0242274607261327415457037044893E-5085
8445 4.01211373036306637077285185224465E-5085 2.006056865181533185386425926122325E-5085
8446 1.003028432590766592693212963061163E-5085 5.015142162953832963466064815305813E-5086
8447 2.507571081476916481733032407652907E-5086 1.253785540738458240866516203826453E-5086
8448 6.268927703692291204332581019132265E-5087 3.134463851846145602166290509566133E-5087
8449 1.567231925923072801083145254783067E-5087 7.836159629615364005415726273915333E-5088
8450 3.918079814807682002707863136957667E-5088 1.959039907403841001353931568478833E-5088
8451 9.795199537019205006769657842394165E-5089 4.897599768509602503384828921197083E-5089
8452 2.448799884254801251692414460598542E-5089 1.224399942127400625846207230299271E-5089
8453 6.121999710637003129231036151496355E-5090 3.060999855318501564615518075748178E-5090
8454 1.530499927659250782307759037874089E-5090 7.652499638296253911538795189370445E-5091
8455 3.826249819148126955769397594685223E-5091 1.913124909574063477884698797342611E-5091
8456 9.565624547870317389423493986713055E-5092 4.782812273935158694711746993356528E-5092
8457 2.391406136967579347355873496678264E-5092 1.195703068483789673677936748339132E-5092
8458 5.97851534241894836838968374169566E-5093 2.98925767120947418419484187084783E-5093
8459 1.494628835604737092097420935423915E-5093 7.473144178023685460487104677119575E-5094
8460 3.736572089011842730243552338559788E-5094 1.868286044505921365121776169279894E-5094
8461 9.34143022252960682560888084639947E-5095 4.670715111264803412804440423199735E-5095
8462 2.335357555632401706402220211599868E-5095 1.167678777816200853201110105799934E-5095
8463 5.83839388908100426600555052899967E-5096 2.919196944540502133002775264499835E-5096
8464 1.459598472270251066501387632249918E-5096 7.297992361351255332506938161249588E-5097
8465 3.648996180675627666253469080624794E-5097 1.824498090337813833126734540312397E-5097
8466 9.122490451689069165633672701561985E-5098 4.561245225844534582816836350780993E-5098
8467 2.280622612922267291408418175390497E-5098 1.140311306461133645704209087695248E-5098
8468 5.70155653230566822852104543847624E-5099 2.85077826615283411426052271923812E-5099
8469 1.42538913307641705713026135961906E-5099 7.1269456653820852856513067980953E-5100
8470 3.56347283269104264282565339904765E-5100 1.781736416345521321412826699523825E-5100
8471 8.908682081727606607064133497619125E-5101 4.454341040863803303532066748809563E-5101
8472 2.227170520431901651766033374404782E-5101 1.113585260215950825883016687202391E-5101
8473 5.567926301079754129415083436011955E-5102 2.783963150539877064707541718005978E-5102
8474 1.391981575269938532353770859002989E-5102 6.959907876349692661768854295014945E-5103
8475 3.479953938174846330884427147507473E-5103 1.739976969087423165442213573753736E-5103
8476 8.69988484543711582721106786876868E-5104 4.34994242271855791360553393438434E-5104
8477 2.17497121135927895680276696719217E-5104 1.087485605679639478401383483596085E-5104
8478 5.437428028398197392006917417980425E-5105 2.718714014199098696003458708990213E-5105
8479 1.359357007099549348001729354495107E-5105 6.796785035497746740008646772475533E-5106
8480 3.398392517748873370004323386237767E-5106 1.699196258874436685002161693118883E-5106
8481 8.495981294372183425010808465594415E-5107 4.247990647186091712505404232797208E-5107
8482 2.123995323593045856252702116398604E-5107 1.061997661796522928126351058199302E-5107
8483 5.30998830898261464063175529099651E-5108 2.654994154491307320315877645498255E-5108
8484 1.327497077245653660157938822749128E-5108 6.637485386228268300789694113745638E-5109
8485 3.318742693114134150394847056872819E-5109 1.659371346557067075197423528436410E-5109
8486 8.29685673278533537598711764218205E-5110 4.148428366392667687993558821091025E-5110
8487 2.074214183196333843996779410545513E-5110 1.037107091598166921998389705272756E-5110
8488 5.18553545799083460999194852636378E-5111 2.59276772899541730499597426318189E-5111
8489 1.296383864497708652497987131590945E-5111 6.481919322488543262489935657954725E-5112
8490 3.240959661244271631244967828977363E-5112 1.620479830622135815622483914488681E-5112
8491 8.102399153110679078112419572443405E-5113 4.051199576555339539056209786221703E-5113
8492 2.025599788277669769528104893110852E-5113 1.012799894138834884764052446555426E-5113
8493 5.06399947069417442382026223277713E-5114 2.531999735347087211910131116388565E-5114
8494 1.265999867673543605955065558194283E-5114 6.329999338367718029775327790971413E-5115
8495 3.164999669183859014887663895485707E-5115 1.582499834591929507443831947742853E-5115
8496 7.912499172959647537219159738714265E-5116 3.956249586479823768609579869357133E-5116
8497 1.978124793239911884304789934678567E-5116 9.890623966199559421523949673392833E-5117
8498 4.945311983099779710761974836696417E-5117 2.472655991549889855380987418348208E-5117
8499 1.236327995774944927690493709174104E-5117 6.18163997887472463845246854587052E-5118
8500 3.09081998943736231922623427293526E-5118 1.54540999471868115961311713646763E-5118
8501 7.72704997359340579806558568233815E-5119 3.863524986796702899032792841169075E-5119
8502 1.931762493398351449516396420584538E-5119 9.658812466991757247581982102922688E-5120
8503 4.829406233495878623790991051461344E-5120 2.414703116747939311895495525730672E-5120
8504 1.207351558373969655947747762865336E-5120 6.03675779186984827973873881432668E-5121
8505 3.01837889593492413986936940716334E-5121 1.50918944796746206993468470358167E-5121
8506 7.54594723983731034967342351790835E-5122 3.772973619918655174836711758954175E-5122
8507 1.886486809959327587418355879477088E-5122 9.432434049796637937091779397385438E-5123
8508 4.716217024898318968545889698692719E-5123 2.358108512449159484272944849346360E-5123
8509 1.179054256224579742136472424673180E-5123 5.89527128112289871068236212336590E-5124
8510 2.94763564056144935534118106168295E-5124 1.473817820280724677670590530841475E-5124
8511 7.369089101403623388352952654207375E-5125 3.684544550701811694176476327103688E-5125
8512 1.842272275350905847088238163551844E-5125 9.21136137675452923544119081775922E-5126
8513 4.60568068837726461772059540887961E-5126 2.302840344188632308860297704439805E-5126
8514 1.151420172094316154430148852219903E-5126 5.757100860471580772150744261099513E-5127
8515 2.878550430235790386075372130549757E-5127 1.439275215117895193037686065274878E-5127
8516 7.19637607558947596518843032637439E-5128 3.598188037794737982594215163187195E-5128
8517 1.799094018897368991297107581593598E-5128 8.995470094486844956485537907967988E-5129
8518 4.497735047243422478242768953983994E-5129 2.248867523621711239121384476991997E-5129
8519 1.124433761810855619560692238495999E-5129 5.622168809054278097803461192479993E-5130
8520 2.811084404527139048901730596239997E-5130 1.405542202263569524450865298119998E-5130
8521 7.02771101131784762225432649059999E-5131 3.513855505658923811127163245299995E-5131
8522 1.756927752829461905563581622649998E-5131 8.784638764147309527817908113249988E-5132
8523 4.392319382073654763908954056624994E-5132 2.196159691036827381954477028312497E-5132
8524 1.098079845518413690977238514156249E-5132 5.490399227592068454886192570781243E-5133
8525 2.745199613796034227443096285390622E-5133 1.372599806898017113721548142695311E-5133
8526 6.862999034490085568607740713476555E-5134 3.431499517245042784303870356738278E-5134
8527 1.715749758622521392151935178369139E-5134 8.578748793112606960759675891845695E-5135
8528 4.289374396556303480379837945922848E-5135 2.144687198278151740189918972961424E-5135
8529 1.072343599139075870094959486480712E-5135 5.36171799569537935047479743240356E-5136
8530 2.68085899784768967523739871620178E-5136 1.34042949892384483761869935810089E-5136
8531 6.70214749461922418809349679050445E-5137 3.351073747309612094046748395252225E-5137
8532 1.675536873654806047023374197626113E-5137 8.377684368274030235116870988130563E-5138
8533 4.188842184137015117558435494065282E-5138 2.094421092068507558779217747032641E-5138
8534 1.047210546034253779389608873516321E-5138 5.236052730171268896948044367581603E-5139
8535 2.618026365085634448474022183790802E-5139 1.309013182542817224237011091895401E-5139
8536 6.545065912714086121185055459477005E-5140 3.272532956357043060592527729738503E-5140
8537 1.636266478178521530296263864869252E-5140 8.181332390892607651481319324346258E-5141
8538 4.090666195446303825740659662173129E-5141 2.045333097723151912870329831086565E-5141
8539 1.022666548861575956435164915543283E-5141 5.113332744307879782175824577716413E-5142
8540 2.556666372153939891087912288858207E-5142 1.278333186076969945543956144429103E-5142
8541 6.391665930384849727719780722145515E-5143 3.195832965192424863859890361072758E-5143
8542 1.597916482596212431929945180536379E-5143 7.989582412981062159649725902681895E-5144
8543 3.994791206490531079824862951340948E-5144 1.997395603245265539912431475670474E-5144
8544 9.98697801622632769956215737835237E-5145 4.993489008113163849781078689176185E-5145
8545 2.496744504056581924890539344588093E-5145 1.248372252028290962445269672294046E-5145
8546 6.24186126014145481222634836147023E-5146 3.120930630070727406113174180735115E-5146
8547 1.560465315035363703056587090367558E-5146 7.802326575176818515282935451837788E-5147
8548 3.901163287588409257641467725918894E-5147 1.950581643794204628820733862959447E-5147
8549 9.752908218971023144103669314797235E-5148 4.876454109485511572051834657398618E-5148
8550 2.438227054742755786025917328699309E-5148 1.219113527371377893012958664349655E-5148
8551 6.095567636856889465064793321748275E-5149 3.047783818428444732532396660874138E-5149
8552 1.523891909214222366266198330437069E-5149 7.619459546071111831330991652185345E-5150
8553 3.809729773035555915665495826092673E-5150 1.904864886517777957832747913046336E-5150
8554 9.52432443258888978916373956523168E-5151 4.76216221629444489458186978261584E-5151
8555 2.38108110814722244729093489130792E-5151 1.19054055407361122364546744565396E-5151
8556 5.9527027703680561182273372282698E-5152 2.9763513851840280591136686141349E-5152
8557 1.48817569259201402955683430706745E-5152 7.44087846296007014778417153533725E-5153
8558 3.720439231480035073892085767668625E-5153 1.860219615740017536946042883834313E-5153
8559 9.301098078700087684730214419171565E-5154 4.650549039350043842365107209585783E-5154
8560 2.325274519675021921182553604792892E-5154 1.162637259837510960591276802396446E-5154
8561 5.81318629918755480295638401198223E-5155 2.906593149593777401478192005991115E-5155
8562 1.453296574796888700739096002995558E-5155 7.266482873984443503695480014977788E-5156
8563 3.633241436992221751847740007488894E-5156 1.816620718496110875923870003744447E-5156
8564 9.083103592480554379619350018722235E-5157 4.541551796240277189809675009361118E-5157
8565 2.270775898120138594904837504680559E-5157 1.135387949060069297452418752340280E-5157
8566 5.67693974530034648726209376170140E-5158 2.83846987265017324363104688085070E-5158
8567 1.41923493632508662181552344042535E-5158 7.09617468162543310907761720212675E-5159
8568 3.548087340812716554538808601063375E-5159 1.774043670406358277269404300531688E-5159
8569 8.87021835203179138634702150265844E-5160 4.43510917601589569317351075132922E-5160
8570 2.21755458800794784658675537566461E-5160 1.108777294003973923293377687832305E-5160
8571 5.543886470019869616466888439161525E-5161 2.771943235009934808233444219580763E-5161
8572 1.385971617504967404116722109790382E-5161 6.929858087524837020583610548951908E-5162
8573 3.464929043762418510291805274475954E-5162 1.732464521881209255145902637237977E-5162
8574 8.662322609406046275729513186189885E-5163 4.331161304703023137864756593094943E-5163
8575 2.165580652351511568932378296547472E-5163 1.082790326175755784466189148273736E-5163
8576 5.41395163087877892233094574136868E-5164 2.70697581543938946116547287068434E-5164
8577 1.35348790771969473058273643534217E-5164 6.76743953859847365291368217671085E-5165
8578 3.383719769299236826456841088355425E-5165 1.691859884649618413228420544177713E-5165
8579 8.459299423248092066142102720888565E-5166 4.229649711624046033071051360444283E-5166
8580 2.114824855812023016535525680222142E-5166 1.057412427906011508267762840111071E-5166
8581 5.287062139530057541338814200555355E-5167 2.643531069765028770669407100277678E-5167
8582 1.321765534882514385334703550138839E-5167 6.608827674412571926673517750694195E-5168
8583 3.304413837206285963336758875347098E-5168 1.652206918603142981668379437673549E-5168
8584 8.261034593015714908341897188367745E-5169 4.130517296507857454170948594183873E-5169
8585 2.065258648253928727085474297091937E-5169 1.032629324126964363542737148545968E-5169
8586 5.16314662063482181771368574272984E-5170 2.58157331031741090885684287136492E-5170
8587 1.29078665515870545442842143568246E-5170 6.4539332757935272721421071784123E-5171
8588 3.22696663789676363607105358920615E-5171 1.613483318948381818035526794603075E-5171
8589 8.067416594741909090177633973015375E-5172 4.033708297370954545088816986507688E-5172
8590 2.016854148685477272544408493253844E-5172 1.008427074342738636272204246626922E-5172
8591 5.04213537171369318136102123313461E-5173 2.521067685856846590680510616567305E-5173
8592 1.260533842928423295340255308283653E-5173 6.302669214642116476701276541418263E-5174
8593 3.151334607321058238350638270709132E-5174 1.575667303660529119175319135354566E-5174
8594 7.87833651830264559587659567677283E-5175 3.939168259151322797938297838386415E-5175
8595 1.969584129575661398969148919193208E-5175 9.847920647878306994845744595966038E-5176
8596 4.923960323939153497422872297983019E-5176 2.461980161969576748711436148991510E-5176
8597 1.230990080984788374355718074495755E-5176 6.154950404923941871778590372478775E-5177
8598 3.077475202461970935889295186239388E-5177 1.538737601230985467944647593119694E-5177
8599 7.69368800615492733972323796559847E-5178 3.846844003077463669861618982799235E-5178
8600 1.923422001538731834930809491399618E-5178 9.617110007693659174654047456998088E-5179
8601 4.808555003846829587327023728499044E-5179 2.404277501923414793663511864249522E-5179
8602 1.202138750961707396831755932124761E-5179 6.010693754808536984158779660623805E-5180
8603 3.005346877404268492079389830311903E-5180 1.502673438702134246039694915155951E-5180
8604 7.513367193510671230198474575779755E-5181 3.756683596755335615099237287889878E-5181
8605 1.878341798377667807549618643944939E-5181 9.391708991888339037748093219724695E-5182
8606 4.695854495944169518874046609862348E-5182 2.347927247972084759437023304931174E-5182
| |
= 1
# self.ship_status = ShipStatus.backhome
# self.back_home()
# if self.low_dump_energy_warnning:
# # 记录是因为按了低电量判断为返航
# self.back_home()
#
# def back_home(self):
# """
# 紧急情况下返航
# :return:
# """
# # 有返航点下情况下返回返航点,没有则停止
# if self.home_lng_lat is None:
# if not config.home_debug:
# self.pi_main_obj.stop()
# else:
# print('back home')
# self.points_arrive_control(self.home_lng_lat, self.home_lng_lat, True, True)
# # self.pi_main_obj.stop()
#
# def smooth_path(self):
# """
# 平滑路径
# :return:平滑路径线路
# """
# smooth_path_lng_lat = []
# distance_matrix = []
# for index, target_lng_lat in enumerate(self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps):
# if index == 0:
# theta = lng_lat_calculate.angleFromCoordinate(self.lng_lat[0],
# self.lng_lat[1],
# target_lng_lat[0],
# target_lng_lat[1])
# distance = lng_lat_calculate.distanceFromCoordinate(self.lng_lat[0],
# self.lng_lat[1],
# target_lng_lat[0],
# target_lng_lat[1])
# if distance < config.smooth_path_ceil_size:
# smooth_path_lng_lat.append(target_lng_lat)
# else:
# for i in range(1, int((distance / config.smooth_path_ceil_size) + 1)):
# cal_lng_lat = lng_lat_calculate.one_point_diatance_to_end(self.lng_lat[0],
# self.lng_lat[1],
# theta,
# config.smooth_path_ceil_size * i)
# smooth_path_lng_lat.append(cal_lng_lat)
# smooth_path_lng_lat.append(target_lng_lat)
# else:
# theta = lng_lat_calculate.angleFromCoordinate(
# self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][0],
# self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][1],
# target_lng_lat[0],
# target_lng_lat[1])
# distance = lng_lat_calculate.distanceFromCoordinate(
# self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][0],
# self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][1],
# target_lng_lat[0],
# target_lng_lat[1])
# if distance < config.smooth_path_ceil_size:
# smooth_path_lng_lat.append(target_lng_lat)
# else:
# for i in range(1, int(distance / config.smooth_path_ceil_size + 1)):
# cal_lng_lat = lng_lat_calculate.one_point_diatance_to_end(
# self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][0],
# self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps[index - 1][1],
# theta,
# config.smooth_path_ceil_size * i)
# smooth_path_lng_lat.append(cal_lng_lat)
# smooth_path_lng_lat.append(target_lng_lat)
# for smooth_lng_lat_i in smooth_path_lng_lat:
# distance_list = []
# for sampling_points_gps_i in self.server_data_obj.mqtt_send_get_obj.sampling_points_gps:
# s_d = lng_lat_calculate.distanceFromCoordinate(sampling_points_gps_i[0],
# sampling_points_gps_i[1],
# smooth_lng_lat_i[0],
# smooth_lng_lat_i[1])
# distance_list.append(s_d)
# distance_matrix.append(distance_list)
# a_d_m = np.asarray(distance_matrix)
# for k in range(len(distance_matrix[0])):
# temp_a = a_d_m[:, k]
# temp_list = temp_a.tolist()
# index_l = temp_list.index(min(temp_list))
# self.smooth_path_lng_lat_index.append(index_l)
# return smooth_path_lng_lat
#
# def calc_target_lng_lat(self, index_):
# """
# 根据当前点和路径计算下一个经纬度点
# :return:
# """
# # 离散按指定间距求取轨迹点数量
# if not self.smooth_path_lng_lat:
# self.smooth_path_lng_lat = self.smooth_path()
# # 搜索最临近的路点
# distance_list = []
# start_index = self.smooth_path_lng_lat_index[index_]
# if index_ == 0:
# self.search_list = copy.deepcopy(self.smooth_path_lng_lat[:start_index])
# else:
# self.search_list = copy.deepcopy(
# self.smooth_path_lng_lat[self.smooth_path_lng_lat_index[index_ - 1]:start_index])
# for target_lng_lat in self.search_list:
# distance = lng_lat_calculate.distanceFromCoordinate(self.lng_lat[0],
# self.lng_lat[1],
# target_lng_lat[0],
# target_lng_lat[1])
# distance_list.append(distance)
# # 如果没有可以去路径
# if len(distance_list) == 0:
# return self.server_data_obj.mqtt_send_get_obj.sampling_points_gps[index_]
# index = distance_list.index(min(distance_list))
# # if index + 1 == len(self.search_list):
# # return self.server_data_obj.mqtt_send_get_obj.sampling_points_gps[index_]
# lng_lat = self.search_list[index]
# index_point_distance = lng_lat_calculate.distanceFromCoordinate(self.lng_lat[0],
# self.lng_lat[1],
# lng_lat[0],
# lng_lat[1])
# while config.smooth_path_ceil_size > index_point_distance and (index + 1) < len(
# self.search_list):
# lng_lat = self.search_list[index]
# index_point_distance = lng_lat_calculate.distanceFromCoordinate(self.lng_lat[0],
# self.lng_lat[1],
# lng_lat[0],
# lng_lat[1])
# index += 1
# return self.search_list[index]
#
# # 构建障碍物地图
# def build_obstacle_map(self):
# """
# 根据超声波距离构建障碍物地图
# :return: 障碍物位置举证
# """
# method = 1
# if method == 0:
# map_size = int(20 / 0.5)
# obstacle_map = np.zeros((map_size, map_size))
# # 判断前方距离是否有障碍物,根据障碍物改变目标点
# for k, v in self.pi_main_obj.distance_dict.items():
# v = min(v, 20)
# row = int(map_size - math.ceil(math.cos(math.radians(k)) * v / 0.5))
# col = int((map_size / 2) - 1 - math.ceil(math.sin(math.radians(k)) * v / 0.5))
# for row_index in range(row):
# obstacle_map[row_index, col] = 1
# else:
# obstacle_map = [0] * len(self.pi_main_obj.distance_dict.items())
# for k, v in self.pi_main_obj.distance_dict.items():
# if v < 5:
# obstacle_map[10 + int(k / 0.9)] = 1
# return obstacle_map
#
# # 计算障碍物下目标点
# def get_avoid_obstacle_point(self, path_planning_point_gps=None):
# """
# 根据障碍物地图获取下一个运动点
# :return: 下一个目标点,是否需要紧急停止
# """
# next_point_lng_lat = copy.deepcopy(path_planning_point_gps)
# if config.b_millimeter_wave:
# print('config.obstacle_avoid_type', config.obstacle_avoid_type)
# # 不避障
# if config.obstacle_avoid_type == 0:
# return path_planning_point_gps, False
# # 避障停止
# elif config.obstacle_avoid_type == 1:
# if 1 in self.pi_main_obj.obstacle_list[
# int(self.pi_main_obj.cell_size / 2) - 3:int(self.pi_main_obj.cell_size / 2) + 3]:
# return next_point_lng_lat, True
# else:
# return path_planning_point_gps, False
# # 避障绕行,根据障碍物计算下一个目标点
# elif config.obstacle_avoid_type == 2:
# angle_point = lng_lat_calculate.angleFromCoordinate(self.lng_lat[0],
# self.lng_lat[1],
# path_planning_point_gps[0],
# path_planning_point_gps[1])
# if angle_point > 180:
# angle_point_temp = angle_point - 360
# else:
# angle_point_temp = angle_point
# point_angle_index = angle_point_temp // self.pi_main_obj.view_cell + 9
# # 目标区域超出避障范围,当前正在转弯不必进行避障
# if point_angle_index < 0 or point_angle_index >= len(self.pi_main_obj.obstacle_list):
# return next_point_lng_lat, False
# index_i = 0
# value_list = []
# while index_i < self.pi_main_obj.cell_size:
# kr = index_i
# index_j = index_i
# while index_j < self.pi_main_obj.cell_size and self.pi_main_obj.obstacle_list[index_j] == 0:
# kl = index_j
# if kl - kr >= config.ceil_max: # 判断是否是宽波谷
# print(self.pi_main_obj.obstacle_list, round(kl - config.ceil_max // 2))
# v = round((kl + kr) / 2)
# value_list.append(v)
# break
# index_j = index_j + 1
# index_i += 1
# print('self.pi_main_obj.obstacle_list', self.pi_main_obj.obstacle_list, )
# # 没有可以通过通道
# if len(value_list) == 0:
# return next_point_lng_lat, True
# else:
# how = []
# for value_i in value_list:
# howtemp = abs(value_i - point_angle_index)
# how.append(howtemp)
# ft = how.index(min(how))
# kb = value_list[int(ft)]
# angle = kb * config.view_cell - config.field_of_view / 2
# if angle < 0:
# angle += 360
# next_point_lng_lat = lng_lat_calculate.one_point_diatance_to_end(self.lng_lat[0],
# self.lng_lat[1],
# angle,
# config.min_steer_distance)
# print('angle', angle)
# return next_point_lng_lat, False
# else:
# return path_planning_point_gps, False
#
# # 控制到达目标点
# def points_arrive_control(self, target_lng_lat_gps, sample_lng_lat_gps, b_force_arrive=False, b_back_home=False):
# """
# :param target_lng_lat_gps: 目标点真实经纬度
# :param sample_lng_lat_gps: 下一个采样点真实经纬度
# :param b_force_arrive: 是否约束一定要到达
# :param b_back_home 是否是正在返航
# :return:
# """
# distance = lng_lat_calculate.distanceFromCoordinate(
# self.lng_lat[0],
# self.lng_lat[1],
# sample_lng_lat_gps[0],
# sample_lng_lat_gps[1])
# self.distance_p = distance
# if distance < config.arrive_distance:
# return True
# while distance >= config.arrive_distance:
# if not b_back_home and not self.b_at_home:
# self.check_backhome()
# distance_sample = lng_lat_calculate.distanceFromCoordinate(
# self.lng_lat[0],
# self.lng_lat[1],
# sample_lng_lat_gps[0],
# sample_lng_lat_gps[1])
# self.distance_p = distance_sample
# # 避障判断下一个点
# b_stop = False
# if not config.home_debug:
# target_lng_lat_gps, b_stop = self.get_avoid_obstacle_point(target_lng_lat_gps)
# all_distance = lng_lat_calculate.distanceFromCoordinate(
# self.lng_lat[0], self.lng_lat[1], target_lng_lat_gps[0],
# target_lng_lat_gps[1])
# # 当前点到目标点角度
# point_theta = lng_lat_calculate.angleFromCoordinate(self.lng_lat[0],
# self.lng_lat[1],
# target_lng_lat_gps[0],
# target_lng_lat_gps[1])
# theta_error = point_theta - self.current_theta
# if abs(theta_error) > 180:
# if theta_error > 0:
# theta_error = theta_error - 360
# else:
# theta_error = 360 + theta_error
# self.theta_error = theta_error
# left_pwm, right_pwm = self.path_track_obj.pid_pwm_2(distance=all_distance,
# theta_error=theta_error)
# self.last_left_pwm = left_pwm
# self.last_right_pwm = right_pwm
# # 在家调试模式下预测目标经纬度
# if config.home_debug:
# time.sleep(0.1)
# # 计算当前行驶里程
# if self.last_lng_lat:
# speed_distance = lng_lat_calculate.distanceFromCoordinate(self.last_lng_lat[0],
# self.last_lng_lat[1],
# self.lng_lat[0],
# self.lng_lat[1])
# self.run_distance += speed_distance
# left_delta_pwm = int(self.last_left_pwm + left_pwm) / 2 - config.stop_pwm
# right_delta_pwm = int(self.last_right_pwm + right_pwm) / 2 - config.stop_pwm
# steer_power = left_delta_pwm - right_delta_pwm
# forward_power = left_delta_pwm + right_delta_pwm
# delta_distance = forward_power * 0.002
# delta_theta = steer_power * 0.08
# self.last_lng_lat = copy.deepcopy(self.lng_lat)
# if self.current_theta is not None:
# self.current_theta = (self.current_theta - delta_theta / 2) % 360
# self.lng_lat = lng_lat_calculate.one_point_diatance_to_end(self.lng_lat[0],
# self.lng_lat[1],
# self.current_theta,
# delta_distance)
# else:
# # 判断是否需要避障处理
# print('b_stop', b_stop)
# # self.pi_main_obj.set_pwm(left_pwm, right_pwm)
# if b_stop:
# self.obstacle_info = '1'
# self.pi_main_obj.stop()
# # 记录是因为按了暂停按钮而终止
# self.b_stop_path_track = True
# return False
# else:
# self.obstacle_info = '0'
# self.pi_main_obj.set_pwm(left_pwm, right_pwm)
#
# # 清空规划点
# if int(self.server_data_obj.mqtt_send_get_obj.control_move_direction) == -1:
# # 记录是因为按了暂停按钮而终止
# self.b_stop_path_track = True
# return False
# if not config.home_debug and self.pi_main_obj.b_start_remote:
# # 记录是因为按了遥控而终止
# self.b_stop_path_track = True
# break
# # 如果目标点改变并且不是强制到达 b_force_arrive
# if not b_force_arrive:
# break
# else:
# if distance_sample < config.arrive_distance:
# if b_back_home:
# self.b_at_home = 1
# return True
#
# # 处理电机控制 必须使用线程
# def move_control(self):
# # 记录上次手动发送
# last_control = None
# # 记录发送的经纬度
# b_log_points = 1
# while True:
# time.sleep(config.pi2com_timeout)
# # 检查是否需要返航
# if not self.b_at_home:
# self.check_backhome()
# # 判断当前是手动控制还是自动控制
# d = int(self.server_data_obj.mqtt_send_get_obj.control_move_direction)
# if d in [-2, -1, 0, 90, 180, 270]:
# self.ship_status = ShipStatus.computer_control
# # 改变状态不再重复发送指令
# self.server_data_obj.mqtt_send_get_obj.control_move_direction = -2
# if d in [-1, 0, 90, 180, 270]:
# # 手动控制下设置在家状态为0
# self.b_at_home = 0
# b_log_points = 1
# # 使用路径规划
# if len(self.server_data_obj.mqtt_send_get_obj.path_planning_points) > 0:
# # 此时为自动模式清除d控制状态
# self.server_data_obj.mqtt_send_get_obj.control_move_direction = -2
# self.ship_status = ShipStatus.auto
# # 使用遥控器 调试模式下无法使用
# if not config.home_debug and \
# config.current_platform == config.CurrentPlatform.pi and \
# self.pi_main_obj.b_start_remote:
# remote_left_pwm, remote_right_pwm = self.pi_main_obj.check_remote_pwm()
# self.pi_main_obj.set_pwm(set_left_pwm=remote_left_pwm, set_right_pwm=remote_right_pwm)
# # 手动模式
# elif self.ship_status == ShipStatus.computer_control:
# if config.obstacle_avoid_type == 3:
# if 1 in self.pi_main_obj.obstacle_list[
# int(self.pi_main_obj.cell_size / 2) - 3:int(self.pi_main_obj.cell_size / 2) + 3]:
# print('self.pi_main_obj.obstacle_list', self.pi_main_obj.obstacle_list)
# d = -1
# # 使用飞控
# if config.b_use_pix:
# if d == 0:
# pwm_data = {'1': 1900, '3': 1900}
# elif d == 90:
# pwm_data = {'1': 1900, '3': 1100}
# elif d == 180:
# pwm_data = {'1': 1100, '3': 1100}
# elif d == 270:
# pwm_data = {'1': 1100, '3': 1900}
# elif d == -1:
# pwm_data = {'1': 1500, '3': 1500}
# else:
# pwm_data = None
# if last_control is None or last_control != pwm_data:
# last_control = pwm_data
# self.drone_obj.channel_control(pwm_data)
# self.com_data_send_logger.info({'com pwm data': pwm_data})
# # 使用树莓派
# elif config.b_use_pi:
# if not config.home_debug:
# if d == 0:
# self.control_info = '向前'
# self.pi_main_obj.forward()
# elif d == 90:
# self.control_info = '向左'
# self.pi_main_obj.left()
# elif d == 180:
# self.control_info = '向后'
# self.pi_main_obj.backword()
# elif d == 270:
# self.control_info = '向右'
# self.pi_main_obj.right()
# elif d == -1:
# self.control_info = '停止'
# self.pi_main_obj.stop()
# # 手动模式下判断是否抽水
# if not config.home_debug:
# if config.b_pin_stc or os.path.exists(config.stc_port):
# self.draw()
# # 自动模式计算角度
# elif self.ship_status == ShipStatus.auto:
# self.control_info = ''
# if self.lng_lat is None:
# self.logger.error('无当前GPS,不能自主巡航')
# time.sleep(0.5)
# self.clear_status()
# continue
# if self.plan_start_time is None:
# self.plan_start_time = time.time()
# # 设置自动路径搜索为False
# self.b_stop_path_track = False
# if b_log_points:
# self.logger.info({'点击地点': self.server_data_obj.mqtt_send_get_obj.path_planning_points})
# # 船起始运行距离
# start_distance = self.run_distance
# # 判断是否是寻点模式点了寻点但是还没点开始
# if self.server_data_obj.mqtt_send_get_obj.row_gap:
# if not self.server_data_obj.mqtt_send_get_obj.b_start:
# b_log_points = 0
# time.sleep(0.2)
# continue
# # 计算总里程
# for index, gaode_lng_lat in enumerate(self.server_data_obj.mqtt_send_get_obj.path_planning_points):
# if index == 0:
# distance_p = lng_lat_calculate.distanceFromCoordinate(
# self.gaode_lng_lat[0],
# self.gaode_lng_lat[1],
# gaode_lng_lat[0],
# gaode_lng_lat[1])
# self.totle_distance += distance_p
# else:
# distance_p = lng_lat_calculate.distanceFromCoordinate(
# self.server_data_obj.mqtt_send_get_obj.path_planning_points[index - 1][0],
# self.server_data_obj.mqtt_send_get_obj.path_planning_points[index - 1][1],
# gaode_lng_lat[0],
# gaode_lng_lat[1])
# self.totle_distance += distance_p
# self.logger.info({'全部距离': self.totle_distance})
# # 将目标点转换为真实经纬度
# self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps = []
# self.server_data_obj.mqtt_send_get_obj.sampling_points_gps = []
# # 如果勾选了返航且存在返航点
# if self.server_data_obj.mqtt_send_get_obj.back_home:
# if self.server_data_obj.mqtt_send_get_obj.set_home_gaode_lng_lat:
# self.server_data_obj.mqtt_send_get_obj.path_planning_points.append(
# self.server_data_obj.mqtt_send_get_obj.set_home_gaode_lng_lat)
# self.server_data_obj.mqtt_send_get_obj.sampling_points.append(
# self.server_data_obj.mqtt_send_get_obj.set_home_gaode_lng_lat)
# self.server_data_obj.mqtt_send_get_obj.sampling_points_status.append(0)
# if config.home_debug:
# self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps = copy.deepcopy(
# self.server_data_obj.mqtt_send_get_obj.path_planning_points)
# self.server_data_obj.mqtt_send_get_obj.sampling_points_gps = copy.deepcopy(
# self.server_data_obj.mqtt_send_get_obj.sampling_points)
# else:
# for path_planning_point in self.server_data_obj.mqtt_send_get_obj.path_planning_points:
# path_planning_point_gps = lng_lat_calculate.gps_gaode_to_gps(self.lng_lat,
# self.gaode_lng_lat,
# path_planning_point)
# self.server_data_obj.mqtt_send_get_obj.path_planning_points_gps.append(path_planning_point_gps)
# for sampling_point in self.server_data_obj.mqtt_send_get_obj.sampling_points:
# sampling_point_gps = lng_lat_calculate.gps_gaode_to_gps(self.lng_lat,
# self.gaode_lng_lat,
# sampling_point)
# self.server_data_obj.mqtt_send_get_obj.sampling_points_gps.append(sampling_point_gps)
# self.path_info = [0, len(self.server_data_obj.mqtt_send_get_obj.sampling_points)]
# print('self.server_data_obj.mqtt_send_get_obj.sampling_points_status',
# self.server_data_obj.mqtt_send_get_obj.sampling_points_status)
# while self.server_data_obj.mqtt_send_get_obj.sampling_points_status.count(0) > 0:
# for index, sampling_point_gps in enumerate(
# self.server_data_obj.mqtt_send_get_obj.sampling_points_gps):
# | |
<gh_stars>1-10
# MINLP written by GAMS Convert at 04/21/18 13:54:18
#
# Equation counts
# Total E G L N X C B
# 1051 51 0 1000 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 1021 1001 20 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 4021 3021 1000 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x1 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x2 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x3 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x4 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x5 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x6 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x7 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x8 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x9 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x10 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x11 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x12 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x13 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x14 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x15 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x16 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x17 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x18 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x19 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x20 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x21 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x22 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x23 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x24 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x25 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x26 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x27 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x28 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x29 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x30 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x31 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x32 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x33 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x34 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x35 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x36 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x37 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x38 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x39 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x40 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x41 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x42 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x43 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x44 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x45 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x46 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x47 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x48 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x49 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x50 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x51 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x52 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x53 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x54 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x55 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x56 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x57 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x58 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x59 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x60 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x61 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x62 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x63 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x64 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x65 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x66 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x67 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x68 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x69 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x70 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x71 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x72 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x73 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x74 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x75 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x76 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x77 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x78 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x79 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x80 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x81 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x82 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x83 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x84 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x85 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x86 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x87 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x88 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x89 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x90 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x91 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x92 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x93 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x94 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x95 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x96 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x97 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x98 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x99 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x100 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x101 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x102 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x103 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x104 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x105 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x106 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x107 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x117 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x118 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x119 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x120 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x121 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x122 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x123 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x124 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x125 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x126 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x127 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x128 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x129 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x130 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x131 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x132 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x133 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x134 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x135 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x136 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x137 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x138 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x139 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x140 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x141 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x142 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x143 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x144 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x145 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x146 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x147 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x148 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x149 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x150 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x151 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x152 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x153 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x154 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x155 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x156 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x157 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x158 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x159 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x160 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x161 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x162 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x163 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x164 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x165 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x166 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x167 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x168 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x169 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x170 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x171 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x172 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x173 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x174 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x175 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x176 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x177 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x178 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x179 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x180 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x181 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x182 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x183 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x184 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x185 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x186 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x187 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x188 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x189 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x190 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x191 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x192 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x193 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x194 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x195 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x196 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x197 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x198 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x199 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x200 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x201 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x202 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x203 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x204 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x205 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x206 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x207 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x208 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x209 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x210 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x211 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x212 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x213 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x214 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x215 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x216 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x217 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x218 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x219 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x220 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x221 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x222 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x223 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x224 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x225 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x226 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x227 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x228 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x229 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x230 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x231 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x232 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x233 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x234 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x235 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x236 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x237 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x238 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x239 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x240 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x241 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x242 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x243 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x244 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x245 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x246 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x247 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x248 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x249 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x250 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x251 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x252 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x253 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x254 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x255 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x256 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x257 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x258 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x259 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x260 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x261 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x262 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x263 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x264 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x265 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x266 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x267 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x268 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x269 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x270 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x271 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x272 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x273 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x274 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x275 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x276 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x277 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x278 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x279 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x280 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x281 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x282 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x283 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x284 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x285 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x286 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x287 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x288 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x289 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x290 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x291 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x292 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x293 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x294 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x295 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x296 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x297 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x298 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x299 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x300 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x301 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x302 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x303 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x304 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x305 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x306 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x307 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x308 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x309 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x310 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x311 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x312 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x313 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x314 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x315 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x316 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x317 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x318 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x319 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x320 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x321 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x322 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x323 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x324 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x325 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x326 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x327 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x328 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x329 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x330 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x331 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x332 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x333 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x334 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x335 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x336 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x337 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x338 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x339 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x340 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x341 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x342 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x343 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x344 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x345 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x346 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x347 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x348 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x349 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x350 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x351 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x352 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x353 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x354 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x355 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x356 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x357 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x358 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x359 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x360 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x361 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x362 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x363 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x364 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x365 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x366 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x367 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x368 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x369 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x370 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x371 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x372 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x373 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x374 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x375 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x376 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x377 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x378 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x379 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x380 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x381 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x382 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x383 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x384 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x385 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x386 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x387 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x388 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x389 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x390 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x391 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x392 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x393 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x394 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x395 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x396 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x397 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x398 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x399 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x400 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x401 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x402 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x403 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x404 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x405 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x406 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x407 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x408 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x409 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x410 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x411 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x412 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x413 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x414 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x415 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x416 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x417 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x418 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x419 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x420 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x421 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x422 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x423 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x424 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x425 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x426 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x427 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x428 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x429 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x430 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x431 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x432 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x433 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x434 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x435 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x436 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x437 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x438 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x439 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x440 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x441 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x442 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x443 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x444 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x445 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x446 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x447 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x448 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x449 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x450 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x451 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x452 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x453 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x454 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x455 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x456 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x457 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x458 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x459 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x460 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x461 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x462 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x463 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x464 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x465 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x466 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x467 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x468 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x469 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x470 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x471 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x472 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x473 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x474 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x475 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x476 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x477 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x478 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x479 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x480 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x481 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x482 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x483 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x484 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x485 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x486 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x487 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x488 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x489 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x490 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x491 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x492 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x493 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x494 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x495 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x496 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x497 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x498 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x499 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x500 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x501 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x502 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x503 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x504 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x505 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x506 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x507 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x508 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x509 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x510 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x511 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x512 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x513 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x514 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x515 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x516 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x517 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x518 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x519 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x520 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x521 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x522 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x523 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x524 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x525 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x526 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x527 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x528 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x529 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x530 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x531 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x532 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x533 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x534 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x535 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x536 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x537 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x538 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x539 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x540 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x541 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x542 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x543 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x544 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x545 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x546 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x547 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x548 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x549 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x550 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x551 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x552 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x553 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x554 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x555 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x556 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x557 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x558 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x559 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x560 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x561 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x562 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x563 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x564 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x565 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x566 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x567 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x568 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x569 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x570 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x571 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x572 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x573 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x574 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x575 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x576 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x577 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x578 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x579 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x580 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x581 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x582 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x583 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x584 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x585 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x586 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x587 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x588 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x589 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x590 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x591 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x592 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x593 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x594 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x595 = Var(within=Reals,bounds=(0,None),initialize=0)
m.x596 | |
+ ... + b_(n-1)*xn + bn*xn^2 ))")
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentCoefficients)
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
# ------------------------------------------------------------------ #
# ------------------------------------------------------------------ #
# if we couldnt obtain a fitting model at all, try again but this time
# withouth trying to find a perfect gauss form in the resulting model
# equation
if (len(bestModelingResults)==0):
# Re-evaluate every obtained model trained through the Multiple
# Polynomial Regression but this time determining the best fitting
# model by recalculating each of their accuracies but this time with
# the right math equation, which would be the gaussian function.
bestModelingResults = []
for currentModelingResults in range(0, len(allModeledAccuracies)):
currentCoefficients = allModeledAccuracies[currentModelingResults][1]
# We determine the accuracy of the obtained coefficients
predictedData = []
orderOfThePolynomial = 2
numberOfIndependentVariables = (len(currentCoefficients)-1)
for row in range(0, numberOfRows):
temporalRow = []
actualIc = currentCoefficients[0][0]
currentOrderOfThePolynomial = 1
currentVariable = 0
for currentIndependentVariable in range(0, numberOfIndependentVariables):
if (currentOrderOfThePolynomial == (orderOfThePolynomial+1)):
currentOrderOfThePolynomial = 1
currentVariable = currentVariable + 1
actualIc = actualIc + currentCoefficients[currentIndependentVariable+1][0]*self.x_samplesList[row][currentVariable]**(currentOrderOfThePolynomial)
currentOrderOfThePolynomial = currentOrderOfThePolynomial + 1
temporalRow.append(math.exp(-(actualIc)))
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = numberOfRows
for row in range(0, numberOfDataPoints):
n2 = modifiedSamplesList_y[row][0]
n1 = predictedData[row][0]
if ((n1<0.2) and (n2<0.051)):
newAcurracyValueToAdd = 1-n1
else:
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
if (len(bestModelingResults) == 0):
# We save the first best fitting modeling result
bestModelingResults = []
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
else:
if (predictionAcurracy > bestModelingResults[1]):
bestModelingResults = []
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))")
temporalRow = []
temporalRow.append(predictionAcurracy)
temporalRow.append(currentCoefficients)
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
if (len(bestModelingResults)==0):
# We save the first best fitting modeling result
bestModelingResults = []
temporalRow = []
currentCoefficients = []
for row in range(0, len(allModeledAccuracies[0][1])):
temporalRow.append(0)
currentCoefficients.append(temporalRow)
temporalRow = []
predictedData = []
for row in range(0, numberOfRows):
temporalRow.append(0)
predictedData.append(temporalRow)
bestModelingResults.append(currentCoefficients)
bestModelingResults.append(0)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
# We include all the reports of all the models studied to the reporting
# variable that contains the report of the best fitting model and we
# then return it
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getLinearLogisticRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Logistic Regression model to be able
to predict a classification problem that can have any number of
independent variables (x).
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[4.395207586412653], [5.985854141495452], [-4.395207586412653]]
accuracyFromTraining =
80.02122762886552
predictedData =
[[0.012185988957723588],
[0.05707820342364075],
[0.22900916243958236],
[0.5930846789223594],
[0.8773292738274195],
[0.9722944298625625],
[0.9942264149220237],
[0.9988179452639562],
[0.9997588776328182],
[0.9999508513195541]]
coefficientDistribution =
'Coefficients distribution is as follows: p = (exp(bo + b1*x1 + b2*x2 + ... + bn*xn))/(1 + exp(bo + b1*x1 + b2*x2 + ... + bn*xn))'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getLinearLogisticRegression(self, evtfbmip=True):
from . import MortrackML_Library as mSL
import math
getOptimizedRegression = evtfbmip
numberOfRows = len(self.y_samplesList)
matrix_x = self.x_samplesList
modifiedSamplesList_y = []
for row in range(0, numberOfRows):
temporalRow = []
if ((self.y_samplesList[row][0]!=1) and (self.y_samplesList[row][0]!=0)):
raise Exception('ERROR: One of the dependent (y) data points doesnt have a 1 or a 0 as value.')
if (self.y_samplesList[row][0] == 1):
temporalRow.append(0.999)
if (self.y_samplesList[row][0] == 0):
temporalRow.append(0.001)
modifiedSamplesList_y.append(temporalRow)
matrix_y = []
for row in range(0, numberOfRows):
temporalRow = []
temporalRow.append(math.log(modifiedSamplesList_y[row][0]/(1-modifiedSamplesList_y[row][0])))
matrix_y.append(temporalRow)
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getMultipleLinearRegression(evtfbmip = getOptimizedRegression)
coefficients = modelingResults[0]
# We determine the accuracy of the obtained coefficientsfor the
# Probability Equation of the Logistic Regression Equation
predictedData = []
numberOfIndependentVariables = len(matrix_x[0])
for row in range(0, len(matrix_y)):
temporalRow = []
actualIc = coefficients[0][0]
for currentIndependentVariable in range(0, numberOfIndependentVariables):
actualIc = actualIc + coefficients[currentIndependentVariable+1][0]*matrix_x[row][currentIndependentVariable]
actualIc = math.exp(actualIc)
actualIc = actualIc/(1+actualIc)
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = numberOfRows
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (n2 == 0):
n2 = 0.001
if (n1 < 0.2):
newAcurracyValueToAdd = 1-n1
else:
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
else:
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + newAcurracyValueToAdd
predictionAcurracy = predictionAcurracy/numberOfDataPoints*100
# We save the current the modeling results
bestModelingResults = []
bestModelingResults.append(coefficients)
bestModelingResults.append(predictionAcurracy)
bestModelingResults.append(predictedData)
bestModelingResults.append("Coefficients distribution is as follows: p = (exp(bo + b1*x1 + b2*x2 + ... + bn*xn))/(1 + exp(bo + b1*x1 + b2*x2 + ... + bn*xn))")
allAccuracies = []
temporalRow = []
temporalRow.append(bestModelingResults[1])
temporalRow.append(bestModelingResults[0])
temporalRow.append(self.x_samplesList)
allAccuracies.append(temporalRow)
bestModelingResults.append(allAccuracies)
return bestModelingResults
"""
getLinearRegression(isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model to predict the behavior of a dataset through
a regular Linear Regression model. Note that this method can only solve
regression problems that have 1 independent variable (x).
CODE EXAMPLE:
matrix_x = [
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9]
]
matrix_y = [
[8.5],
[9.7],
[10.7],
[11.5],
[12.1],
[14],
[13.3],
[16.2],
[17.3],
[17.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getLinearRegression(isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[8.470909090909096], [1.0242424242424237]]
accuracyFromTraining =
97.05959379759686
predictedData =
[[8.470909090909096],
[9.49515151515152],
[10.519393939393943],
[11.543636363636367],
[12.56787878787879],
[13.592121212121214],
[14.616363636363639],
[15.640606060606062],
[16.664848484848484],
[17.689090909090908]]
coefficientDistribution =
'Coefficients distribution is as follows: y = b + m*x'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
def getLinearRegression(self, isClassification=True):
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
matrixMath = mLAL.MatrixMath()
x_samples = matrixMath.getTransposedMatrix(self.x_samplesList)
y_samples = matrixMath.getTransposedMatrix(self.y_samplesList)
x_length = len(x_samples[0])
y_length = len(y_samples[0])
if x_length != y_length:
raise Exception('Dependent Variable has a different vector size than Independent Variable')
x_mean = 0
x_squared_mean = 0
y_mean = 0
xy_mean = 0
for n in range (0, x_length):
x_mean += x_samples[0][n]
x_squared_mean += x_samples[0][n]*x_samples[0][n]
y_mean += y_samples[0][n]
xy_mean += x_samples[0][n]*y_samples[0][n]
x_mean = x_mean/x_length
x_squared_mean = x_squared_mean/x_length
y_mean = y_mean/y_length
xy_mean = xy_mean/x_length
m = ( (x_mean*y_mean - xy_mean) / (x_mean**2 - x_squared_mean) )
# m = ( (mean(xs)*mean(ys) - mean(xs*ys)) / (mean(xs)*mean(xs) - mean(xs*xs)) )
b = y_mean - m*x_mean
matrix_b = [[b], [m]]
# We determine the accuracy of the obtained coefficients
predictedData = []
bothMatrixRowLength = len(self.y_samplesList)
for row in range(0, bothMatrixRowLength):
temporalRow = []
actualIc = matrix_b[0][0] + matrix_b[1][0]*self.x_samplesList[row][0]
temporalRow.append(actualIc)
predictedData.append(temporalRow)
predictionAcurracy = 0
numberOfDataPoints = bothMatrixRowLength
for row in range(0, numberOfDataPoints):
n2 = self.y_samplesList[row][0]
n1 = predictedData[row][0]
if (isClassification == False):
if (((n1*n2) != 0)):
newAcurracyValueToAdd = (1-(abs(n2-n1)/abs(n2)))
if (newAcurracyValueToAdd < 0):
newAcurracyValueToAdd = 0
predictionAcurracy = predictionAcurracy + | |
<gh_stars>10-100
import json
import time
import pytest
from amqp.exceptions import NotFound
from kombu import Connection
from kombu.messaging import Exchange, Queue
from kombu.pools import connections
from kombu.serialization import register, unregister
from mock import ANY, patch
from nameko.amqp.publish import UndeliverableMessage
from nameko.constants import AMQP_URI_CONFIG_KEY
from nameko.extensions import DependencyProvider
from nameko.testing.services import entrypoint_waiter
from nameko.testing.utils import get_extension
from nameko.utils.retry import retry
from nameko_amqp_retry import Backoff, BackoffPublisher
from nameko_amqp_retry.backoff import get_backoff_queue_name
from nameko_amqp_retry.messaging import consume
from nameko_amqp_retry.rpc import rpc
from test import NAMEKO3
class QuickBackoff(Backoff):
schedule = (100,)
class SlowBackoff(Backoff):
schedule = (500,)
class TestPublisher(object):
@pytest.fixture
def container(self, container_factory, rabbit_config, queue):
class Service(object):
name = "service"
@consume(queue)
def backoff(self, delay):
class DynamicBackoff(Backoff):
schedule = (delay,)
raise DynamicBackoff()
container = container_factory(Service, rabbit_config)
container.start()
return container
def test_routing(
self, container, publish_message, exchange, queue, counter,
rabbit_config, rabbit_manager, queue_info
):
""" Queues should be dynamically created for each unique delay.
Messages should be routed to the appropriate queue based on their
delay value.
"""
delays = [10000, 20000, 20000, 30000, 30000, 30000]
def all_received(worker_ctx, res, exc_info):
if counter.increment() == len(delays):
return True
# cause multiple unique backoffs to be raised
with entrypoint_waiter(container, 'backoff', callback=all_received):
for delay in delays:
publish_message(exchange, delay, routing_key=queue.routing_key)
# verify that a queue is created for each unique delay,
# and only messages with the matching delay are in each one
@retry
def check_queue(delay):
backoff_queue = queue_info(get_backoff_queue_name(delay))
assert backoff_queue.message_count == delays.count(delay)
for delay in set(delays):
check_queue(delay)
class TestQueueExpiry(object):
@pytest.yield_fixture(autouse=True)
def fast_backoff(self):
yield
@pytest.yield_fixture(autouse=True)
def fast_expire(self):
exp = 500
with patch('nameko_amqp_retry.backoff.EXPIRY_GRACE_PERIOD', new=exp):
yield exp
@pytest.fixture
def container(self, container_factory, rabbit_config, queue):
class Service(object):
name = "service"
@consume(queue)
def backoff(self, delay):
class DynamicBackoff(Backoff):
schedule = (delay,)
limit = 1
raise DynamicBackoff()
container = container_factory(Service, rabbit_config)
container.start()
return container
def test_queues_removed(
self, container, publish_message, exchange, queue, counter,
rabbit_config, rabbit_manager, fast_expire, queue_info
):
""" Backoff queues should be removed after their messages are
redelivered.
"""
delays = [50, 100, 100, 100, 50]
def all_expired(worker_ctx, res, exc_info):
if not issubclass(exc_info[0], Backoff.Expired):
return
if counter.increment() == len(delays):
return True
# cause multiple unique backoffs to be raised
with entrypoint_waiter(container, 'backoff', callback=all_expired):
for delay in delays:
publish_message(exchange, delay, routing_key=queue.routing_key)
# wait for long enough for the queues to expire
time.sleep((max(delays) + fast_expire) / 1000.0)
# verify the queues have been removed
for delay in set(delays):
with pytest.raises(NotFound):
queue_info(get_backoff_queue_name(delay))
def test_republishing_redeclares(
self, container, publish_message, exchange, queue, counter,
rabbit_config, rabbit_manager, fast_expire
):
""" Queue expiry must be reset when a new message is published to
the backoff queue
"""
delays = [100] * 3
def all_expired(worker_ctx, res, exc_info):
if not issubclass(exc_info[0], Backoff.Expired):
return
if counter.increment() == len(delays):
return True
# cause multiple unique backoffs to be raised, but wait for
# EXPIRY_GRACE_PERIOD between each publish.
with entrypoint_waiter(container, 'backoff', callback=all_expired):
for delay in delays:
publish_message(exchange, delay, routing_key=queue.routing_key)
time.sleep(fast_expire / 1000.0)
# the entrypoint waiter blocks until every published message expires
# (after exactly one backoff). if the subsequent publishes didn't
# redeclare the queue, the later messages would be lost when the queue
# was removed (50ms + fast_expire after the first publish)
class TestMandatoryDelivery:
@pytest.fixture
def container(self, container_factory, rabbit_config, queue):
class Service(object):
name = "service"
@consume(queue)
def backoff(self, delay):
raise Backoff()
container = container_factory(Service, rabbit_config)
container.start()
return container
def test_failed_delivery(
self, container, publish_message, exchange, queue, rabbit_config
):
backoff_publisher = get_extension(container, BackoffPublisher)
make_queue = backoff_publisher.make_queue
# patch make_queue so that the return value does not have
# a matching binding; this forces an unroutable messsage
with patch.object(
backoff_publisher, 'make_queue', new=lambda _: make_queue(999999)
):
publish_message(
exchange, "", routing_key=queue.routing_key
)
# when the backoff publisher fails, the error should bubble up to
# the container
with pytest.raises(UndeliverableMessage):
container.wait()
class TestMultipleMessages(object):
@pytest.fixture
def container(self, container_factory, rabbit_config, counter):
class Service(object):
name = "service"
backoff = BackoffPublisher()
@rpc
def slow(self):
if counter["slow"].increment() <= 1:
raise SlowBackoff()
return "slow"
@rpc
def quick(self):
if counter["quick"].increment() <= 1:
raise QuickBackoff()
return "quick"
container = container_factory(Service, rabbit_config)
container.start()
return container
def test_messages_can_leapfrog(
self, container, entrypoint_tracker, rpc_proxy, wait_for_result
):
""" Messages with short TTLs should be able to leapfrog messages with
long TTLs that are also in the "wait" queue
"""
# wait for both entrypoints to generate a result
with entrypoint_waiter(
container, 'quick', callback=wait_for_result
) as result_quick:
with entrypoint_waiter(
container, 'slow', callback=wait_for_result
) as result_slow:
# wait for "slow" to fire once before calling "quick",
# to make absolutely sure its backoff is dispatched first
with entrypoint_waiter(container, 'slow'):
rpc_proxy.service.slow.call_async()
rpc_proxy.service.quick.call_async()
assert result_quick.get() == "quick"
assert result_slow.get() == "slow"
# "quick" should return a result before "slow" because it has a
# shorter backoff interval, even though "slow" raises first
assert entrypoint_tracker.get_results() == (
[None, None] + ["quick", "slow"]
)
assert entrypoint_tracker.get_exceptions() == (
[(SlowBackoff, ANY, ANY), (QuickBackoff, ANY, ANY)] + [None, None]
)
class TestNegativeExpiration(object):
@pytest.fixture
def container(self, container_factory, rabbit_config, counter):
class Service(object):
name = "service"
backoff = BackoffPublisher()
@rpc
def bad(self):
class BadBackoff(Backoff):
schedule = (-10, )
raise BadBackoff()
container = container_factory(Service, rabbit_config)
container.start()
return container
def test_negative_expiration_coerced_to_zero(
self, container, entrypoint_tracker, rpc_proxy, wait_for_result
):
with entrypoint_waiter(
container, 'bad', callback=wait_for_result
):
rpc_proxy.service.bad.call_async()
assert len(entrypoint_tracker.get_results()) > 1
class TestCallStack(object):
@pytest.fixture
def container(self, container_factory, rabbit_config, service_cls):
class CallStack(DependencyProvider):
""" Exposes the call stack directly to the service
"""
def get_dependency(self, worker_ctx):
return worker_ctx.context_data['call_id_stack']
class Service(service_cls):
call_stack = CallStack()
container = container_factory(Service, rabbit_config)
container.start()
return container
@pytest.mark.usefixtures('predictable_call_ids')
def test_rpc_call_stack(self, container, rpc_proxy):
""" RPC backoff extends call stack
"""
call_stacks = []
def callback(worker_ctx, result, exc_info):
call_stacks.append(worker_ctx.call_id_stack)
if exc_info is None:
return True
with entrypoint_waiter(container, 'method', callback=callback):
rpc_proxy.service.method("msg")
expected = [
[
'standalone_rpc_proxy.call.0',
'service.method.1'
],
[
'standalone_rpc_proxy.call.0',
'service.method.1.backoff',
'service.method.2'
],
[
'standalone_rpc_proxy.call.0',
'service.method.1.backoff',
'service.method.2.backoff',
'service.method.3',
],
[
'standalone_rpc_proxy.call.0',
'service.method.1.backoff',
'service.method.2.backoff',
'service.method.3.backoff',
'service.method.4'
],
]
if NAMEKO3: # pragma: no cover
for stack in expected:
stack[0] = stack[0].replace("proxy", "client").replace("call", "0")
assert call_stacks == expected
@pytest.mark.usefixtures('predictable_call_ids')
def test_events_call_stack(self, container, dispatch_event):
""" Event handler backoff extends call stack
"""
call_stacks = []
def callback(worker_ctx, result, exc_info):
call_stacks.append(worker_ctx.call_id_stack)
if exc_info is None:
return True
with entrypoint_waiter(container, 'method', callback=callback):
dispatch_event(
"src_service",
"event_type",
{},
headers={
'nameko.call_id_stack': ['event.dispatch']
}
)
assert call_stacks == [
[
'event.dispatch',
'service.method.0'
],
[
'event.dispatch',
'service.method.0.backoff',
'service.method.1'
],
[
'event.dispatch',
'service.method.0.backoff',
'service.method.1.backoff',
'service.method.2',
],
[
'event.dispatch',
'service.method.0.backoff',
'service.method.1.backoff',
'service.method.2.backoff',
'service.method.3'
],
]
@pytest.mark.usefixtures('predictable_call_ids')
def test_messaging_call_stack(
self, container, publish_message, exchange, queue
):
""" Message consumption backoff extends call stack
"""
call_stacks = []
def callback(worker_ctx, result, exc_info):
call_stacks.append(worker_ctx.call_id_stack)
if exc_info is None:
return True
with entrypoint_waiter(container, 'method', callback=callback):
publish_message(
exchange,
"msg",
routing_key=queue.routing_key,
headers={
'nameko.call_id_stack': ['message.publish']
}
)
assert call_stacks == [
[
'message.publish',
'service.method.0'
],
[
'message.publish',
'service.method.0.backoff',
'service.method.1'
],
[
'message.publish',
'service.method.0.backoff',
'service.method.1.backoff',
'service.method.2',
],
[
'message.publish',
'service.method.0.backoff',
'service.method.1.backoff',
'service.method.2.backoff',
'service.method.3'
],
]
class TestSerialization(object):
@pytest.yield_fixture(autouse=True)
def custom_serializer(self, rabbit_config):
def encode(value):
value = json.dumps(value)
return value.upper()
def decode(value):
value = value.lower()
return json.loads(value)
# register new serializer
register(
"upperjson", encode, decode, "application/x-upper-json", "utf-8"
)
# update config so consumers expect it
rabbit_config['serializer'] = "upperjson"
yield
unregister("upperjson")
def test_custom_serialization(
self, container, publish_message, exchange, queue, wait_for_result
):
""" Backoff can be used with a custom AMQP message serializer
"""
with entrypoint_waiter(
container, 'method', callback=wait_for_result
) as result:
publish_message(
exchange,
"msg",
serializer="upperjson",
routing_key=queue.routing_key
)
assert result.get() == "result"
class TestDeadLetteredMessages(object):
@pytest.yield_fixture(autouse=True)
def limited_backoff(self, backoff_count):
# allow exactly `backoff_count` backoffs
limit = backoff_count
with patch.object(Backoff, 'limit', new=limit):
yield limit
@pytest.fixture
def deadlettering_exchange(self, rabbit_config, exchange, queue):
conn = Connection(rabbit_config[AMQP_URI_CONFIG_KEY])
with connections[conn].acquire(block=True) as connection:
deadletter_exchange = Exchange(name="deadletter", type="topic")
deadletter_exchange.maybe_bind(connection)
deadletter_exchange.declare()
deadletter_queue = Queue(
name="deadletter",
exchange=deadletter_exchange,
routing_key="#",
queue_arguments={
'x-dead-letter-exchange': exchange.name
}
)
deadletter_queue.maybe_bind(connection)
deadletter_queue.declare()
return deadletter_exchange
def test_backoff_works_on_previously_deadlettered_message(
self, container, publish_message, deadlettering_exchange,
queue, exchange, wait_for_result, entrypoint_tracker, limited_backoff
):
""" Backoff can be used even if the original message has previously
been deadlettered
"""
with entrypoint_waiter(
container, 'method', callback=wait_for_result
) as result:
# dispatch a message to the deadlettering exchange.
# it will be deadlettered into the normal `exchange`
# and should afterwards be processed as "normal" message
publish_message(
deadlettering_exchange,
"msg",
routing_key=queue.routing_key,
expiration=1.0
)
# the initial deadlettering should not count towards the backoff limit,
# so we shouldn't see Backoff.Expired here
assert result.get() == "result"
assert entrypoint_tracker.get_results() == (
[None] * limited_backoff + ["result"]
)
assert entrypoint_tracker.get_exceptions() == | |
'charisma':16,
},
'class_features':{
'Feat_Inspiring_Leader':True,
'Feat_Mounted_Combatant':True,
'Fighting_Style_Protection':True,
'Second_Wind':True,
'Action_Surge':True,
'Martial_Archetype_Champion':True,
'Champion_Improved_Critical':True,
'Feat_Heavy_Armor_Master':True,
'Extra_Attack':True,
'Remarkable_Athlete':True,
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
#'Infusion of Regeneration':1,
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Absorbtion':1,
'Rune of Shielding':1,
'Plate Armor':1,
'Heavy Shield':1,
'Sword of Life-Stealing':1,
#'Longsword +1':1,
'Lance':1,
'Pilum':6,
},
'mount_combat':True,
'mount_type':'Warhorse',
'equipment_mount':{
'Horse Scale Mail':1,
},
}
#----
# Легионеры (армия) (<NAME>):
metadict_chars['Warrior 1 lvl (Katorjnik) (манипуларий)'] = {
# Исторически носили по 2 пилума, но шесть весомее (вот только пехота столько не унесёт).
'level':1,
'char_class':'Warrior',
'hit_dice':'1d8',
'behavior':'warrior',
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Studded Leather':1,
'Heavy Shield':1,
'Shortsword':1,
'Pilum':7,
},
}
metadict_chars['Warrior 2 lvl (Katorjnik) (ветеран) (кольчуга)'] = {
'level':2,
'char_class':'Warrior',
'hit_dice':'1d8',
'behavior':'warrior',
'class_features':{
'Fighting_Style_Protection':True,
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Chain Shirt':1,
'Heavy Shield':1,
'Shortsword':1,
'Pilum':7,
},
}
metadict_chars['Warrior 2 lvl (Katorjnik) (ветеран)'] = {
'level':2,
'char_class':'Warrior',
'hit_dice':'1d8',
'behavior':'elite_warrior',
'class_features':{
'Fighting_Style_Protection':True,
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Scale Mail':1,
'Heavy Shield':1,
'Shortsword':1,
'Pilum':2,
},
}
metadict_chars['Warrior 3 lvl (Katorjnik) (урагос)'] = {
# Десятник (декан, урагос)
'level':3,
'char_class':'Warrior',
'hit_dice':'1d8',
'behavior':'commander',
'class_features':{
'Fighting_Style_Protection':True,
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Breastplate':1,
'Heavy Shield':1,
'Shortsword':1,
'Pilum':7,
},
}
metadict_chars['Warrior 4 lvl (Katorjnik) (опцион)'] = {
'level':4,
'char_class':'Warrior-officer',
'hit_dice':'1d8',
'behavior':'commander',
'class_features':{
'Fighting_Style_Defence':True,
'Feat_Inspiring_Leader':True,
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Shielding':1,
'Breastplate':1,
'Heavy Shield':1,
'Shortsword':1,
'Pilum':7,
},
#'mount_combat':False,
#'mount_type':'Riding Horse',
#'equipment_mount':{
# },
}
metadict_chars['Warrior 5 lvl (Katorjnik) (центурион)'] = {
'level':5,
'char_class':'Warrior-officer',
'hit_dice':'1d8',
#'carefull_AI':True,
'behavior':'commander',
'class_features':{
'Fighting_Style_Defence':True,
'Feat_Inspiring_Leader':True,
'Extra_Attack':True,
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Absorbtion':1,
'Rune of Shielding':1,
'Half Plate':1,
'Heavy Shield':1,
'Shortsword':1,
'Pilum':3,
},
#'mount_combat':False,
#'mount_type':'Riding Horse',
#'equipment_mount':{
# },
}
#----
# Следопыты (свита) (Сакатр Ка'Ален):
metadict_chars['Ranger 2 lvl (Gogan) (следопыт Сакатра)'] = {
'level':2,
'char_class':'Ranger',
'hit_dice':'1d10',
'behavior':'archer',
'class_features':{
'Feat_Sharpshooter':True,
'Favored_Enemy':['humans'],
'Natural_Explorer':['sea'],
'Spellcasting':True,
'Spells':[
('1_lvl', 'Fog_Cloud'),
('1_lvl', 'Hail_of_Thorns'),
],
'Fighting_Style_Archery':True,
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Goodberry':5,
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Infusion of Longstrider':1,
'Rune of Absorbtion':1,
'Breastplate':1,
'Shield':1,
'Shortsword':1,
'Longbow':1,
'Arrow':40,
},
#'mount_combat':True,
#'mount_type':'Light Warhorse',
#'equipment_mount':{
# },
}
metadict_chars['Ranger 7 lvl (Gogan) (Сакатр Ка-Ален)'] = {
'level':7,
'brave_AI':True,
'archer_AI':True,
#'killer_AI':True,
'char_class':'Ranger',
'hit_dice':'1d10',
'behavior':'commander',
'hitpoints_medial':True,
'abilityes':{
'strength':11,
'dexterity':20,
'constitution':14,
'intelligence':14,
'wisdom':16,
'charisma':17,
},
'class_features':{
# TODO: Сделай Hunter_Steel_Will -- преимущество на спасброски против испуга
'Feat_Sharpshooter':True,
'Favored_Enemy':['humans', 'sea_monsters'],
'Natural_Explorer':['sea', 'coast'],
'Spellcasting':True,
'Spells':[
# TODO: сделай Spike_Growth
('ritual', 'Animal_Messenger'),
('1_lvl', 'Goodberry'),
('1_lvl', 'Fog_Cloud'),
('1_lvl', 'Hail_of_Thorns'),
('2_lvl', 'Pass_Without_Trace'),
#('2_lvl', 'Spike_Growth'),
],
'Fighting_Style_Archery':True,
'Primeval_Awareness':True,
'Ranger_Archetype_Hunter':True,
'Hunter_Horde_Breaker':True,
'Hunter_Steel_Will':True,
'Extra_Attack':True,
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Goodberry':15,
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Infusion of Longstrider':1,
'Rune of Shielding':1,
'Studded Leather':1,
'Shield':1,
'Scimitar':1,
'Longbow +1':1,
'Arrow':40,
},
#'mount_combat':True,
#'mount_type':'Light Warhorse',
#'equipment_mount':{
# },
}
#----
# Пираты (армия) (<NAME>):
metadict_chars['Warrior 1 lvl (Gogan) (кимерийский пират)'] = {
'level':1,
'char_class':'Warrior-pirate',
'hit_dice':'1d8',
'behavior':'warrior',
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Studded Leather':1,
'Shield':1,
'Shortsword':1,
'Shortbow':1,
'Arrow':40,
},
}
metadict_chars['Warrior 2 lvl (Gogan) (кимерийский пират-ветеран)'] = {
'level':2,
'char_class':'Warrior-pirate',
'hit_dice':'1d8',
'behavior':'elite_warrior',
'class_features':{
'Fighting_Style_Archery':True,
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Scale Mail':1,
'Shield':1,
'Rapier':1,
'Dagger':1,
'Shortbow':1,
'Arrow':40,
},
}
metadict_chars['Warrior 3 lvl (Gogan) (кимерийский пират-сержант)'] = {
'level':3,
'char_class':'Warrior-pirate',
'hit_dice':'1d8',
'behavior':'commander',
'class_features':{
'Fighting_Style_Archery':True,
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Breastplate':1,
'Shield':1,
'Rapier':1,
'Dagger':1,
'Shortbow':1,
'Arrow':40,
},
}
metadict_chars['Warrior 4 lvl (Gogan) (кимерийский пират-лейтенант)'] = {
'level':4,
'char_class':'Warrior-pirate',
'hit_dice':'1d8',
'behavior':'commander',
'class_features':{
'Fighting_Style_Archery':True,
'Feat_Inspiring_Leader':True,
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Shielding':1,
'Breastplate':1,
'Shield':1,
'Rapier':1,
'Dagger':1,
'Shortbow':1,
'Arrow':40,
},
#'mount_combat':False,
#'mount_type':'Riding Horse',
#'equipment_mount':{
# },
}
metadict_chars['Warrior 5 lvl (Gogan) (кимерийский пират-капитан)'] = {
'level':5,
#'volley_AI':True,
'char_class':'Warrior-pirate',
'hit_dice':'1d8',
'behavior':'commander',
'class_features':{
'Fighting_Style_Archery':True,
'Feat_Inspiring_Leader':True,
'Extra_Attack':True,
},
'race':'Human-common',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Potion of Antidote':1,
'Infusion of Vitality':1,
'Rune of Shielding':1,
'Breastplate':1,
'Shield':1,
'Rapier':1,
'Dagger':1,
'Shortbow':1,
'Arrow':40,
},
#'mount_combat':False,
#'mount_type':'Riding Horse',
#'equipment_mount':{
# },
}
#----
# Враги (герои) (кара'Ям):
metadict_chars['Warlock 1 lvl (колдун Кара\'Яма)'] = {
'level':1,
'char_class':'Warlock',
'hit_dice':'1d8',
'behavior':'archer',
'class_features':{
'Feat_Mounted_Combatant':True,
'Otherworldly_Patron':'Fiend',
'Pact_Magic':True,
'Spells':[
('cantrip', 'Eldritch_Blast'),
('cantrip', 'Create_Bonfire'),
('ritual', 'Unseen_Servant'),
('1_lvl', 'Burning_Hands'),
('1_lvl', 'Hex'),
],
'Dark_One\'s_Blessing':True,
},
'race':'Human-hero',
'weapon_skill':['simple'],
'armor_skill':['light'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Infusion of Vitality':1,
'Rune of Armor':1,
'Dagger':1,
'Shortbow':1,
'Arrow':40,
},
'mount_combat':True,
'mount_type':'Horseclaw',
'equipment_mount':{
},
}
metadict_chars['Warlock 5 lvl (Кара\'Ям)'] = {
# Свободно накладывает на себя смену облика: Invocation_Mask_of_Many_Faces
# Игнорирует сопротивление огню: Feat_Elemental_Adept
'level':5,
'fireball_AI':True,
'char_class':'Warlock',
'hit_dice':'1d8',
'behavior':'commander',
'hitpoints_medial':True,
'abilityes':{
'strength':10,
'dexterity':18,
'constitution':14,
'intelligence':16,
'wisdom':8,
'charisma':18,
},
'class_features':{
'Feat_Mounted_Combatant':True,
'Otherworldly_Patron':'Fiend',
'Pact_Magic':True,
'Spells':[
# Unseen_Servant для разминирования. Для учеников.
# Burning_Hands для учеников.
# TODO: Сделай Green_Flame_Blade.
('cantrip', 'Eldritch_Blast'),
('cantrip', 'Create_Bonfire'),
('cantrip', 'Minor_Illusion'),
('ritual', 'Detect_Magic'),
('ritual', 'Unseen_Servant'),
('3_lvl', 'Burning_Hands'),
('3_lvl', 'Invisibility'),
('3_lvl', 'Suggestion'),
('3_lvl', 'Earthbind'),
('3_lvl', 'Fireball'),
('3_lvl', 'Fly'),
],
'Dark_One\'s_Blessing':True,
'Eldritch_Invocations':True,
'Invocation_Eldritch_Spear':True,
'Invocation_Agonizing_Blast':True,
'Invocation_Mask_of_Many_Faces':True,
'Pact_Boon':True,
'Pact_of_the_Blade':True,
'Feat_Elemental_Adept':'fire',
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
# Добряника от друида Тик-Бо:
# Договор клинка, оружие +1.
'Goodberry':30,
'Rune of Absorbtion':1,
'Rune of Shielding':1,
'Rune of Armor':1,
'Shortsword +1':1,
'Shortbow +1':1,
'Arrow':40,
},
'mount_combat':True,
'mount_type':'Horseclaw',
'equipment_mount':{
},
}
#----
# Враги (герои) (Кема'Эш):
metadict_chars['Warlock 1 lvl (колдун Кема\'Эша)'] = {
'level':1,
'char_class':'Warlock',
'hit_dice':'1d8',
'behavior':'archer',
'class_features':{
'Feat_Mounted_Combatant':True,
'Otherworldly_Patron':'Fiend',
'Pact_Magic':True,
'Spells':[
('cantrip', 'Eldritch_Blast'),
('cantrip', 'Create_Bonfire'),
('1_lvl', 'Charm_Person'),
('1_lvl', 'Burning_Hands'),
('1_lvl', 'Hex'),
],
'Dark_One\'s_Blessing':True,
},
'race':'Human-hero',
'weapon_skill':['simple'],
'armor_skill':['light'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Infusion of Vitality':1,
'Rune of Armor':1,
'Dagger':1,
'Shortbow':1,
'Arrow':40,
},
'mount_combat':True,
'mount_type':'Horseclaw',
'equipment_mount':{
},
}
metadict_chars['Warlock 5 lvl (Кема\'Эш)'] = {
# Атакует издалека: Invocation_Eldritch_Spear.
# Воодушевляет своих: Feat_Inspiring_Leader.
# Передаёт команды с помощью Dancing_Lights и Message.
'level':5,
'fireball_AI':True,
'char_class':'Warlock',
'hit_dice':'1d8',
'behavior':'commander',
'hitpoints_medial':True,
'abilityes':{
'strength':10,
'dexterity':16,
'constitution':12,
'intelligence':14,
'wisdom':16,
'charisma':18,
},
'class_features':{
'Feat_Mounted_Combatant':True,
'Otherworldly_Patron':'Fiend',
'Pact_Magic':True,
'Spells':[
('cantrip', 'Eldritch_Blast'),
('cantrip', 'Minor_Illusion'),
('cantrip', 'Prestidigitation'),
('cantrip', 'Dancing_Lights'),
('cantrip', 'Message'),
('ritual', 'Detect_Magic'),
('ritual', 'Identify'),
('3_lvl', 'Charm_Person'),
('3_lvl', 'Hex'),
('3_lvl', 'Burning_Hands'),
('3_lvl', 'Summon_Lesser_Demons'),
('3_lvl', 'Fly'),
],
'Dark_One\'s_Blessing':True,
'Eldritch_Invocations':True,
'Invocation_Agonizing_Blast':True,
'Invocation_Eldritch_Spear':True,
'Invocation_Book_of_Ancient_Secrets':True,
'Pact_Boon':True,
'Pact_of_the_Tome':True,
'Feat_Inspiring_Leader':True,
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Goodberry':30,
'Rune of Absorbtion':1,
'Rune of Shielding':1,
'Rune of Armor':1,
'Dagger':1,
'Shortbow +1':1,
'Arrow':40,
},
'mount_combat':True,
'mount_type':'Horseclaw',
'equipment_mount':{
},
}
#----
# Враги (герои) (Энзиф):
metadict_chars['Ranger 1 lvl (следопыт Энзифа)'] = {
'level':1,
'char_class':'Ranger',
'hit_dice':'1d10',
'behavior':'archer',
'class_features':{
'Feat_Sharpshooter':True,
'Favored_Enemy':['humans'],
'Natural_Explorer':['forest'],
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Infusion of Vitality':1,
'Studded Leather':1,
'Shield':1,
'Shortsword':1,
'Longbow':1,
'Arrow':40,
},
#'mount_combat':True,
#'mount_type':'Light Warhorse',
#'equipment_mount':{
# },
}
metadict_chars['Ranger 5 lvl (Энзиф «Ходи-гора»)'] = {
'level':5,
'char_class':'Ranger',
'hit_dice':'1d10',
'behavior':'commander',
'hitpoints_medial':True,
'abilityes':{
'strength':18,
'dexterity':18,
'constitution':16,
'intelligence':8,
'wisdom':12,
'charisma':14,
},
'class_features':{
'Feat_Sharpshooter':True,
'Favored_Enemy':['humans'],
'Natural_Explorer':['forest'],
'Spellcasting':True,
'Spells':[
('ritual', 'Animal_Messenger'),
('1_lvl', 'Absorb_Elements'),
('2_lvl', 'Pass_Without_Trace'),
('2_lvl', 'Find_Traps'),
('2_lvl', 'Spike_Growth'),
],
'Fighting_Style_Archery':True,
'Primeval_Awareness':True,
'Ranger_Archetype_Hunter':True,
'Hunter_Horde_Breaker':True,
'Feat_Great_Weapon_Master':True,
'Extra_Attack':True,
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Infusion of Vitality':1,
'Rune of Absorbtion':1,
'Rune of Shielding':1,
'Studded Leather':1,
'Shield':1,
'Shortsword':1,
'Longbow +1':1,
'Arrow':40,
},
#'mount_combat':True,
#'mount_type':'Light Warhorse',
#'equipment_mount':{
# },
}
#----
# Враги (герои) (Магор):
metadict_chars['Paladin 1 lvl (паладин Магора)'] = {
'level':1,
'char_class':'Paladin',
'hit_dice':'1d10',
'behavior':'elite_warrior',
'class_features':{
'Feat_Tough':True,
'Feat_Heavy_Armor_Master':True,
'Ability_Score_Improvement':{
'strength':+1,
},
'Divine_Sense':True,
'Lay_on_Hands':True,
},
'race':'Human-hero-big',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Infusion of Vitality':1,
'Splint Armor':1,
'Heavy Shield':1,
'Flait':1,
'Long Spear':1,
},
#'mount_combat':False,
#'mount_type':'Riding Horse',
#'equipment_mount':{
# },
}
metadict_chars['Paladin 5 lvl (Магор «Детоед»)'] = {
'level':5,
'char_class':'Paladin',
'hit_dice':'1d10',
'behavior':'commander',
#'fearless_AI':True,
'killer_AI':True,
'hitpoints_medial':True,
'abilityes':{
'strength':18,
'dexterity':10,
'constitution':18,
'intelligence':10,
'wisdom':12,
'charisma':18,
},
'class_features':{
'Feat_Tough':True,
'Feat_Heavy_Armor_Master':True,
'Ability_Score_Improvement':{
'strength':+3,
},
'Divine_Sense':True,
'Lay_on_Hands':True,
'Fighting_Style_Defence':True,
'Divine_Smite':True,
'Spellcasting':True,
'Spells':[
('channel', 'Sacred_Weapon'),
('1_lvl', 'Divine_Smite'),
('2_lvl', 'Divine_Smite'),
('1_lvl', 'Bless'),
('1_lvl', 'Heroism'),
('1_lvl', 'Shield_of_Faith'),
('1_lvl', 'Protection_from_Evil_and_Good'),
('1_lvl', 'Sanctuary'),
('1_lvl', 'Command'),
('2_lvl', 'Lesser_Restoration'),
('2_lvl', 'Zone_of_Truth'),
('2_lvl', 'Find_Steed'),
],
'Divine_Health':True,
'Oath_of_Devotion':True,
'Channel_Turn_The_Unholy':True,
'Channel_Sacred_Weapon':True,
'Extra_Attack':True,
},
'race':'Human-hero-big',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Infusion of Vitality':2,
'Rune of Absorbtion':1,
'Splint Armor':1,
'Heavy Shield':1,
'Flait +1':1,
'Long Spear':1,
},
#'mount_combat':False,
#'mount_type':'Riding Horse',
#'equipment_mount':{
# },
}
#----
# Враги (герои) (Хана'Вам):
metadict_chars['Fighter 1 lvl (снайпер Хана\'Вама)'] = {
# Снайперы, корректируют "Град стрел" ополчения.
'level':1,
'char_class':'Battlemaster',
'abilityes_choice':['dexterity','constitution','strength','charisma'],
'hit_dice':'1d10',
'behavior':'archer',
'class_features':{
'Feat_Sharpshooter':True,
'Fighting_Style_Archery':True,
'Second_Wind':True,
},
'race':'Human-hero',
'weapon_skill':['simple','martial'],
'armor_skill':['light','medium','heavy','shield'],
'skills':[
'Perception',
],
'equipment_supply':soldier_supply,
'equipment_backpack':soldiers_pack,
'equipment_weapon':{
'Rune of Armor':1,
'Shield':1,
'Shortsword':1,
'Longbow':1,
'Arrow':40,
},
'mount_combat':True,
'mount_type':'Riding Horse',
'equipment_mount':{
},
}
metadict_chars['Fighter 5 lvl (Хана\'Вам)'] = {
# Лучник-чемпион
'level':5,
'char_class':'Battlemaster',
'abilityes_choice':['dexterity','constitution','strength','charisma'],
'hit_dice':'1d10',
'behavior':'commander',
'killer_AI':True,
'hitpoints_medial':True,
'abilityes':{
'strength':12,
'dexterity':20,
'constitution':12,
'intelligence':16,
'wisdom':10,
'charisma':16,
},
'class_features':{
| |
from aiomysql import Pool, DictCursor
from utils.db_api.sql import db_pool
import json, io
class DBCommands:
pool: Pool = db_pool
#885533024
TG_USER_ACTIVATE = "UPDATE crudapp_telegramuser SET mfo_id = {0}, status = 1 WHERE uid = {1}"
MFO_LIST = "select * from crudapp_mfostruct"
GET_ATMS = "SELECT crudapp_atm.id as atm_id, crudapp_atm.name as atm_name,crudapp_telegramuser.uid as uid,crudapp_telegramuser.name as tg_name,crudapp_atmmodel.name as model_name,crudapp_atmmodel.company as model_company FROM crudapp_atm INNER JOIN crudapp_telegramuser ON crudapp_telegramuser.mfo_id=crudapp_atm.mfo_id INNER JOIN crudapp_atmmodel ON crudapp_atmmodel.id=crudapp_atm.atmModelId_id WHERE uid={0}"
CREATE_TICKET = "INSERT INTO crudapp_ticket(status, atm_id, dataClosed, dataCreated, user_id, edited) VALUES (0, {0}, UTC_TIMESTAMP(), UTC_TIMESTAMP(), {1}, 0)"
SET_TICKET_BROKEN = "INSERT INTO crudapp_ticket_broken(ticket_id, brokencategory_id) VALUES ({0}, {1})"
REMOVE_TICKET_BROKEN = "DELETE FROM crudapp_ticket_broken WHERE ticket_id = {0} AND brokencategory_id={1}"
EXIST_TICKET_BROKEN = "SELECT * FROM crudapp_ticket_broken WHERE ticket_id = {0} AND brokencategory_id={1}"
EXIST_BROKEN = "SELECT * FROM crudapp_ticket_broken WHERE ticket_id = {0}"
GET_PROBLEMS = "select * from crudapp_brokencategory"
CAN_CREATE_TICKET = "SELECT count(*) as count FROM crudapp_ticket INNER JOIN crudapp_telegramuser ON crudapp_telegramuser.mfo_id=crudapp_ticket.user_id WHERE crudapp_ticket.edited=0 and crudapp_telegramuser.uid={0}"
GET_EDIT_TICKET = "SELECT crudapp_ticket.id, crudapp_ticket.STATUS, crudapp_ticket.atm_id, crudapp_ticket.user_id, crudapp_ticket.edited, crudapp_telegramuser.uid FROM crudapp_ticket INNER JOIN crudapp_telegramuser ON crudapp_telegramuser.id = crudapp_ticket.user_id WHERE crudapp_ticket.edited = {1} and crudapp_telegramuser.uid = {0} ORDER BY crudapp_ticket.id DESC"
SET_EDIT_TICKET = "UPDATE crudapp_ticket SET edited = {1} WHERE id = {0}"
SET_STATUS_TICKET = "UPDATE crudapp_ticket SET status = {1} WHERE id = {0}"
ADD_MSG = "INSERT INTO crudapp_telegrammsg ( json, t_id, category, download, operator, show, text ) VALUES ( '{0}', {1}, '{2}', 0, 0, 0, '{3}' )"
ADD_MSG_FILE = "INSERT INTO crudapp_telegrammsg ( json, t_id, category, dt, path, download, operator, crudapp_telegrammsg.show, caption, text) VALUES ( '{0}', {1}, '{2}', NOW(), '{3}', {4}, {5}, {6}, %s, %s )"
GET_USER = "select * from crudapp_telegramuser where uid={0}"
EXIST_USER_IN_DB = "select * from crudapp_telegramuser where uid={0}"
GET_USER_STAGE = "SELECT * FROM crudapp_telegramuser WHERE uid = {0}"
SET_USER_STAGE = "UPDATE crudapp_telegramuser SET stage = '{1}' WHERE uid={0}"
GET_TICKET_MSG = "select * from crudapp_telegrammsg where t_id = {0}"
REGISTRATION = "INSERT INTO crudapp_telegramuser(uid,name,stage,status,json_info,language) VALUES({0}, '{1}','',0,'{2}',0)"
GET_STATUS = "\
SELECT\
crudapp_ticket.id AS ticket_id,\
GROUP_CONCAT( crudapp_brokencategory.title SEPARATOR '; ' ) AS details,\
crudapp_ticket.STATUS AS ticket_status,\
crudapp_ticket.dataCreated AS created,\
crudapp_ticket.dataClosed AS closed,\
crudapp_ticket.description,\
crudapp_ticket.operator,\
crudapp_ticket.edited,\
crudapp_telegramuser.uid AS user_id,\
crudapp_telegramuser.NAME AS username,\
crudapp_atm.NAME AS atm_name,\
crudapp_atm.id AS atm_id,\
crudapp_atm.serialNumber AS serial_number,\
crudapp_atm.terminalId AS terminal_id,\
crudapp_atm.lat,\
crudapp_atm.long,\
crudapp_atmmodel.image,\
crudapp_atmmodel.NAME AS atm_model_name,\
crudapp_atmmodel.company \
FROM\
crudapp_ticket\
JOIN crudapp_telegramuser ON crudapp_telegramuser.id = crudapp_ticket.user_id\
JOIN crudapp_atm ON crudapp_atm.id = crudapp_ticket.atm_id\
JOIN crudapp_atmmodel ON crudapp_atm.atmModelId_id = crudapp_atmmodel.id\
JOIN crudapp_ticket_broken ON crudapp_ticket.id = crudapp_ticket_broken.ticket_id\
JOIN crudapp_brokencategory ON crudapp_brokencategory.id = crudapp_ticket_broken.brokencategory_id \
WHERE\
crudapp_telegramuser.uid = {0} \
AND crudapp_ticket.STATUS IN ( {1} ) \
GROUP BY\
crudapp_ticket.id \
ORDER BY\
crudapp_ticket.id DESC\
LIMIT 3"
GET_TICKET_OF_ID = "SELECT\
crudapp_ticket.id AS ticket_id,\
GROUP_CONCAT( crudapp_brokencategory.title SEPARATOR '; ' ) AS details,\
crudapp_ticket.STATUS AS ticket_status,\
crudapp_ticket.description,\
crudapp_ticket.operator,\
crudapp_ticket.edited,\
crudapp_telegramuser.uid AS user_id,\
crudapp_telegramuser.NAME AS username,\
crudapp_atm.NAME AS atm_name,\
crudapp_atm.id AS atm_id,\
crudapp_atm.serialNumber AS serial_number,\
crudapp_atm.terminalId AS terminal_id,\
crudapp_atm.lat,\
crudapp_atm.LONG,\
crudapp_atmmodel.image,\
crudapp_atmmodel.NAME AS atm_model_name,\
crudapp_atmmodel.company \
FROM\
crudapp_ticket\
JOIN crudapp_telegramuser ON crudapp_telegramuser.id = crudapp_ticket.user_id\
JOIN crudapp_atm ON crudapp_atm.id = crudapp_ticket.atm_id\
JOIN crudapp_atmmodel ON crudapp_atm.atmModelId_id = crudapp_atmmodel.id\
JOIN crudapp_ticket_broken ON crudapp_ticket.id = crudapp_ticket_broken.ticket_id\
JOIN crudapp_brokencategory ON crudapp_brokencategory.id = crudapp_ticket_broken.brokencategory_id \
WHERE\
crudapp_ticket.id = {0} limit 1"
HISTORY_TICKET_CHAT = "\
SELECT \
json,t_id,crudapp_ticket.operator as opername,uid,name,text,caption \
FROM \
crudapp_telegrammsg \
INNER JOIN crudapp_ticket ON crudapp_ticket.id = crudapp_telegrammsg.t_id \
INNER JOIN crudapp_telegramuser ON crudapp_ticket.user_id = crudapp_telegramuser.id \
WHERE crudapp_ticket.id = {0}"
HISTORY_MSG_FIX = "INSERT INTO crudapp_chathistory(chat, user, message_id) VALUES ({0}, {1}, {2})"
HISTORY_OF_USER = "SELECT * FROM crudapp_chathistory where chat = {0} and user = {1}"
HISTORY_CLEAR = "DELETE FROM crudapp_chathistory WHERE chat = {0} and user = {0}"
GET_USER_OF_TICKET = "SELECT\
crudapp_ticket.id AS tid,\
crudapp_telegramuser.uid AS uid \
FROM\
crudapp_ticket\
INNER JOIN crudapp_telegramuser ON crudapp_ticket.user_id = crudapp_telegramuser.id \
WHERE\
crudapp_ticket.id = {0}"
SET_TICKET_ADMIN = "UPDATE crudapp_ticket SET operator = '{1}' WHERE id = {0}"
LIST_NEW_TICKETS_NOTIF = 'select crudapp_ticket.id as tid, crudapp_ticket.status, crudapp_ticket.dataClosed, crudapp_ticket.dataCreated, crudapp_ticket.description, crudapp_ticket.operator, crudapp_telegramuser.uid, crudapp_telegramuser.name as username, crudapp_atm.id as atm_id, crudapp_atm.name as atm_name, crudapp_atm.service_id from crudapp_ticket inner join crudapp_atm on crudapp_atm.id = crudapp_ticket.atm_id inner join crudapp_telegramuser on crudapp_telegramuser.id = crudapp_ticket.user_id where crudapp_ticket.status = 0'
LIST_NOT_CLOSED_NOTIF = 'select crudapp_ticket.id as tid, crudapp_ticket.status, crudapp_ticket.dataClosed, crudapp_ticket.dataCreated, crudapp_ticket.description, crudapp_ticket.operator, crudapp_telegramuser.uid, crudapp_telegramuser.name as username, crudapp_atm.id as atm_id, crudapp_atm.name as atm_name, crudapp_atm.service_id from crudapp_ticket inner join crudapp_atm on crudapp_atm.id = crudapp_ticket.atm_id inner join crudapp_telegramuser on crudapp_telegramuser.id = crudapp_ticket.user_id where crudapp_ticket.status in (0,3,4)'
NOTIF_MONTH ='SELECT COUNT(*) FROM crudapp_ticket WHERE MONTH(crudapp_ticket.dataCreated) = MONTH(NOW())'
NOTIF_READ ='SELECT COUNT(*) FROM crudapp_ticket WHERE crudapp_ticket.status not in (1,0,2,5)'
NOTIF_NOT_OPEN ='SELECT COUNT(*) FROM crudapp_ticket WHERE crudapp_ticket.status = 0'
NOTIF_CLOSED ='SELECT COUNT(*) FROM crudapp_ticket WHERE crudapp_ticket.status = 1'
LIST_TICKETS_NOTIF = 'select crudapp_ticket.id as tid, crudapp_ticket.status, crudapp_ticket.dataClosed, crudapp_ticket.dataCreated, crudapp_ticket.description, crudapp_ticket.operator, crudapp_telegramuser.uid, crudapp_telegramuser.name as username, crudapp_atm.id as atm_id, crudapp_atm.name as atm_name, crudapp_atm.service_id from crudapp_ticket inner join crudapp_atm on crudapp_atm.id = crudapp_ticket.atm_id inner join crudapp_telegramuser on crudapp_telegramuser.id = crudapp_ticket.user_id'
async def get_ticket_msgs(self, tid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.GET_TICKET_MSG.format(tid))
return await cur.fetchall()
async def branchList(self):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.MFO_LIST)
return await cur.fetchall()
async def userActivateBranch(self, branch, uid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.TG_USER_ACTIVATE.format(branch,uid))
return await cur.fetchall()
async def getObjects(self, uid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.GET_ATMS.format(uid))
return await cur.fetchall()
async def get_problems(self):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.GET_PROBLEMS)
return await cur.fetchall()
async def create_ticket(self, atmid, uid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.CREATE_TICKET.format(atmid, uid))
await cur.fetchone()
return cur.lastrowid
async def set_ticket_broken(self, tid, bid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.SET_TICKET_BROKEN.format(tid, bid))
await cur.fetchone()
return cur.lastrowid
async def set_ticket_admin(self, tid, name):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.SET_TICKET_ADMIN.format(tid, name))
return await cur.fetchone()
async def stats_summary(self):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
dt = {}
await cur.execute(self.NOTIF_MONTH)
t = await cur.fetchone()
dt['month'] = t['COUNT(*)']
await cur.execute(self.NOTIF_READ)
t = await cur.fetchone()
dt['read'] = t['COUNT(*)']
await cur.execute(self.NOTIF_NOT_OPEN)
t = await cur.fetchone()
dt['news'] = t['COUNT(*)']
await cur.execute(self.NOTIF_CLOSED)
t = await cur.fetchone()
dt['closed'] = t['COUNT(*)']
#(number_of_rows,)= await cur.fetchone()
# await cur.execute(self.LIST_NEW_TICKETS_NOTIF)
# dt['list_new'] = await cur.fetchall()
# await cur.execute(self.LIST_NOT_CLOSED_NOTIF)
# dt['list'] = await cur.fetchall()
await cur.execute(self.LIST_TICKETS_NOTIF)
dt['list'] = await cur.fetchall()
return {'list':dt['list'],'month':dt['month'],'read':dt['read'],'news':dt['news'],'closed':dt['closed']}
#return cur.rowcount
async def exist_ticket_broken(self, tid, bid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.EXIST_TICKET_BROKEN.format(tid, bid))
await cur.fetchall()
#(number_of_rows,)= await cur.fetchone()
return True if cur.rowcount > 0 else False
#return cur.rowcount
async def exist_ticket_broken_two(self, tid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.EXIST_BROKEN.format(tid))
await cur.fetchall()
#(number_of_rows,)= await cur.fetchone()
return True if cur.rowcount > 0 else False
#return cur.rowcount
async def remove_ticket_broken(self, tid, bid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.REMOVE_TICKET_BROKEN.format(tid, bid))
return await cur.fetchone()
async def get_user(self, uid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.GET_USER.format(uid))
return await cur.fetchone()
async def get_user_of_ticket(self, tid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.GET_USER_OF_TICKET.format(tid))
tmp = await cur.fetchone()
return tmp['uid']
async def get_edit_ticket(self, uid,edited):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.GET_EDIT_TICKET.format(uid, edited))
return await cur.fetchone()
async def userStage(self, uid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.GET_USER_STAGE.format(uid))
tmp = await cur.fetchone()
return tmp['stage']
async def registration(self, uid, name, json):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.GET_USER.format(uid))
await cur.fetchall()
if cur.rowcount > 0:
return False
else:
await cur.execute(self.REGISTRATION.format(uid,name,json,0))
await cur.fetchone()
return True
async def activateStatus(self, uid):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.GET_USER.format(uid))
a = await cur.fetchone()
return False if a['status'] == 0 else True
async def setUserStage(self, uid, stage):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.SET_USER_STAGE.format(uid, stage))
return await cur.fetchone()
async def set_edit_ticket(self, tid,status):
async with self.pool.acquire() as conn:
async with conn.cursor(DictCursor) as cur:
await cur.execute(self.SET_EDIT_TICKET.format(tid,status))
return await cur.fetchone()
| |
<reponame>chenbinghui1/DSL<filename>mmdet/runner/hooks/unlabel_pred_hook.py<gh_stars>1-10
import os
import time
import json
import math
import copy
import mmcv
import torch
import numpy as np
from mmcv.runner import Hook, HOOKS
from mmdet.datasets.pipelines import Compose
from mmcv.parallel import collate, scatter
#from mmdet.ops.nms import nms_wrapper
from mmcv.ops import nms
import torch.distributed as dist
from mmdet.datasets.api_wrappers import COCO
import json
import os,sys
def parse_det_results(results, score_thr, reverse_mapper=None):
new_results = []
for i, res_perclass in enumerate(results):
class_id = i
for per_class_results in res_perclass:
xmin, ymin, xmax, ymax, score = per_class_results
if score < score_thr:
continue
xmin, ymin, xmax, ymax = int(xmin), int(ymin), int(xmax), int(ymax)
dict_instance = dict()
if reverse_mapper is not None and str(class_id) in set(reverse_mapper.keys()):
dict_instance["category"] = reverse_mapper[str(class_id)]
else:
dict_instance["category"] = class_id
dict_instance["category_index"] = class_id
dict_instance["score"] = round(float(score), 6)
dict_instance["bbox"] = [xmin, ymin, xmax, ymax]
new_results.append(dict_instance)
return new_results
def gen_save_json_dict(info_dict, save_root, reverse_mapper=None, save_polygon=False):
task_type = info_dict["task_type"]
dict_tmp = dict()
for k in info_dict.keys():
if k == "result":
continue
dict_tmp[k] = info_dict[k]
# dict_tmp["infer_results"] = []
score_thr = 0
if task_type == 'Det':
if "infer_score_thre" in set(info_dict.keys()):
score_thr = max(info_dict["infer_score_thre"], score_thr)
dict_tmp["infer_results"] = parse_det_results(info_dict["result"], score_thr, reverse_mapper)
dict_tmp["infer_results"] = sorted(dict_tmp["infer_results"], key=lambda s: s["score"], reverse=True)
else:
raise Exception()
return dict_tmp
def get_image_list_from_list(list_file, root, anno_root):
coco = COCO(list_file)
img_ids = coco.get_img_ids()
image_path_list = []
for i in img_ids:
info = coco.load_imgs([i])[0]
name = info['file_name'] + '.json'
with open(os.path.join(anno_root, name),'r') as f:
data = json.load(f)
if min(info['width'], info['height']) >=32 and data['targetNum']>0:
image_path_list.append(os.path.join(root, info['file_name']))
num_images = len(image_path_list)
if num_images == 0:
print("[ERROR][ModelInfer] Found no image in {}".format(root))
sys.exit()
else:
return image_path_list
def create_dir(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
return path
def save_results2file(result, image_path, image_height, image_width,
save_file_format, checkpoint_name, infer_score_thre,
id2cat, cat2id, image_root_path, save_root_path,
task_type, vis=False, save_polygon=False, anno_root_path=None, iou=0.1, fuse=False, first_ignore=False):
# zhoujing.zjh 防止image_root_path以/结尾导致图像层级目录个数判断错误,导致保存的结果中缺少dataset
image_root_path = os.path.abspath(image_root_path)
image_name = image_path.split('/')[-1]
sub_path = image_path.replace(image_root_path, "")
sub_dirs = sub_path.split("/")[:-1]
#assert 3 > len(sub_dirs) > 0, \
# "[Error][ModelInfer] Please check image_root_path for inference."
for sub_dir in sub_dirs:
#image_name = os.path.join(sub_dir, image_name)
save_path = os.path.join(save_root_path, sub_dir)
create_dir(save_path)
if save_file_format == "json":
save_path = os.path.join(save_path, image_name + ".json")
info_dict = dict()
if len(sub_dirs) == 2:
info_dict["dataset_name"] = sub_dirs[-1]
else:
info_dict["dataset_name"] = ''
info_dict["checkpoint_name"] = checkpoint_name
info_dict["image_name"] = image_name
info_dict["image_height"] = image_height
info_dict["image_width"] = image_width
info_dict["result"] = result
info_dict["task_type"] = task_type
info_dict["infer_score_thre"] = infer_score_thre
dict_save = gen_save_json_dict(info_dict, save_root_path,
reverse_mapper=id2cat,
save_polygon=save_polygon)
# bhchen fuse with old bboxes
new_info=dict()
new_info['targetNum'] = len(dict_save['infer_results'])
new_info['rects'] = []
new_info['scores'] = []
new_info['tags'] = []
new_info['cid'] = []
for i in range(int(new_info['targetNum'])):
if dict_save['infer_results'][i]['category'] not in cat2id.keys():
continue
new_info['rects'].append(dict_save['infer_results'][i]['bbox'])
new_info['scores'].append(dict_save['infer_results'][i]['score'])
new_info['tags'].append(dict_save['infer_results'][i]['category'])
new_info['cid'].append(dict_save['infer_results'][i]['category_index'])
# load the old bboxes info
with open(os.path.join(anno_root_path, sub_path[1:])+".json", 'r') as f:
old_info = json.load(f)
old_info['cid'] = []
for i in range(int(old_info['targetNum'])):
old_info['cid'].append(cat2id[old_info['tags'][i]])
# to numpy array
if fuse:
if first_ignore:
bboxes = np.array(new_info['rects'], dtype=np.float32)
scores = np.array(new_info['scores'], dtype=np.float32)
cids = np.array(new_info['cid'], dtype=np.float32)
else:
bboxes = np.array(old_info['rects'] + new_info['rects'], dtype=np.float32)
scores = np.array(old_info['scores'] + new_info['scores'], dtype=np.float32)
cids = np.array(old_info['cid'] + new_info['cid'], dtype=np.float32)
else:
bboxes = np.array(new_info['rects'], dtype=np.float32)
scores = np.array(new_info['scores'], dtype=np.float32)
cids = np.array(new_info['cid'], dtype=np.float32)
#nms_op = getattr(nms_wrapper, 'nms')
# fuse starting
final_bboxes = []
final_scores = []
final_cids = []
final_mask = []
for i in range(0,len(id2cat)-1):
tmp_scores = scores[cids==i]
if len(tmp_scores) == 0:
continue
tmp_bboxes = bboxes[cids==i,:]
#cls_dets = np.concatenate((tmp_bboxes,tmp_scores[:,None]),axis=1)
#cls_dets, _ = nms_op(cls_dets, iou_thr=iou)
cls_dets, _ = nms(tmp_bboxes, tmp_scores, iou_threshold=iou, score_threshold=0.1)
final_cids.extend([i]*cls_dets.shape[0])
final_bboxes.extend(cls_dets[:,0:4].tolist())
final_scores.extend(cls_dets[:,-1].tolist())
final_info=dict()
final_info["imageName"]=old_info["imageName"]
final_info["targetNum"]=len(final_scores)
final_info["rects"]=final_bboxes
final_info["tags"]=[id2cat[str(i)] for i in final_cids]
final_info["masks"]=[[] for j in range(len(final_scores))]
final_info["scores"]=final_scores
with open(save_path, "w", encoding='utf-8') as fopen:
json.dump(final_info, fopen, indent=4, ensure_ascii=False)
class LoadImage(object):
def __call__(self, results):
if isinstance(results['img'], str):
results['filename'] = results['img']
else:
results['filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# liangting.zl 08.20 add default pad_shape
results['pad_shape'] = img.shape
results['ori_filename'] = results['filename']
return results
def inference_model(model, img, config, task_type, iou):
"""Inference image(s) with the model.
Args:
model (nn.Module): The loaded detector/segmentor.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
If imgs is a str, a generator will be returned, otherwise return the
detection results directly.
"""
# build the data pipeline
test_pipeline = [LoadImage()] + config.data.unlabel_pred.pipeline[1:]
test_pipeline = Compose(test_pipeline)
## bhchen add mirror testing 06/02/2021
flip = config.data.unlabel_pred.get("eval_flip", False)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
image_height, image_width, _ = data['img_metas'][0].data['ori_shape']
data = scatter(
collate([data], samples_per_gpu=1),
[torch.cuda.current_device()])[0]
# forward the model
with torch.no_grad():
if task_type in {'Det', 'Sem'}:
if flip:
data_mirror = torch.flip(data['img'][0], [3])
data_mirror_flop = torch.flip(data_mirror, [2])
data_flop = torch.flip(data_mirror_flop, [3])
data['img_metas'][0].append(data['img_metas'][0][0])
data['img_metas'][0].append(data['img_metas'][0][0])
data['img_metas'][0].append(data['img_metas'][0][0])
data['img'][0] = torch.cat([data['img'][0], data_mirror, data_mirror_flop, data_flop], dim=0)
result_tmp = model(return_loss = False, rescale = True, **data)
result = result_tmp[0]
result_mirror = result_tmp[1]
result_mirror_flop = result_tmp[2]
result_flop = result_tmp[3]
else:
result = model(return_loss=False, rescale=True, **data)[0]
elif task_type == 'Cls':
result = model(return_loss=False, **data)
else:
raise Exception()
return result, image_height, image_width
def single_gpu_test(model, task_type, image_list,
image_root_path, config, id2cat, cat2id,
checkpoint_name, infer_score_thre,
save_root_path, save_file_format, anno_root_path=None,
vis=False, save_polygon=False, ema_model=None, iou=0.1, fuse=False, first_ignore=False):
if ema_model is not None:
model = ema_model
model.eval()
prog_bar = mmcv.ProgressBar(len(image_list))
for idx in range(len(image_list)):
image_path = image_list[idx]
result, image_height, image_width = inference_model(model, image_path, config, task_type, iou=iou)
save_results2file(result, image_path, image_height, image_width,
save_file_format, checkpoint_name, infer_score_thre,
id2cat, cat2id, image_root_path, save_root_path,
task_type, vis, save_polygon, anno_root_path=anno_root_path, iou=iou, fuse=fuse, first_ignore=first_ignore)
batch_size = 1
for _ in range(batch_size):
prog_bar.update()
def multi_gpu_test(model, task_type, image_list,
image_root_path, config, id2cat,cat2id,
checkpoint_name, infer_score_thre,
save_root_path, save_file_format,
rank, world_size, anno_root_path=None, vis=False, save_polygon=False, ema_model=None, iou=0.1, fuse=False, first_ignore=False):
if ema_model is not None:
model = ema_model
model.eval()
lens = len(image_list)
if rank == 0:
if lens >8:
prog_bar = mmcv.ProgressBar(len(image_list))
for idx in range(rank, len(image_list), world_size):
image_path = image_list[idx]
result, image_height, image_width = inference_model(model, image_path, config, task_type, iou=iou)
save_results2file(result, image_path, image_height, image_width,
save_file_format, checkpoint_name, infer_score_thre,
id2cat, cat2id, image_root_path, save_root_path,
task_type, vis, save_polygon, anno_root_path=anno_root_path, iou=iou, fuse=fuse, first_ignore=first_ignore)
batch_size = world_size
if rank == 0:
if lens >8:
for _ in range(batch_size):
prog_bar.update()
dist.barrier()
def adathres(rank, flag, filename, id2cat, cat2id, input_list, input_path, settings):
if rank !=0 or flag == False:
return
else:
ranges = settings.get("ranges",[0.3,0.35])
gamma1 = settings.get("gamma1",0.05)
gamma2 = settings.get("gamma2",0.6)
base = settings.get("base",0.3)
cnt = 0
cnt_ = 0
dis = {}
cum = {}
for files in input_list:
file = files.split('/')[-1]
with open(os.path.join(input_path,file.strip()+'.json'),'r') as f:
data = json.load(f)
if data['targetNum']==0:
cnt+=1
for j,i in enumerate(data['tags']):
if i in cat2id.keys():
if not os.path.exists(filename):
if data['scores'][j] >=0.3:
cnt_ +=1
if id2cat[str(cat2id[i])] not in dis.keys():
dis[id2cat[str(cat2id[i])]]=1
cum[id2cat[str(cat2id[i])]]=data['scores'][j]
else:
dis[id2cat[str(cat2id[i])]]+=1
cum[id2cat[str(cat2id[i])]]+=data['scores'][j]
else:
with open(filename,'r') as hist:
history = json.load(hist)["thres"]
if i not in history.keys():
cnt_ +=1
if id2cat[str(cat2id[i])] not in dis.keys():
dis[id2cat[str(cat2id[i])]]=1
cum[id2cat[str(cat2id[i])]]=data['scores'][j]
else:
dis[id2cat[str(cat2id[i])]]+=1
cum[id2cat[str(cat2id[i])]]+=data['scores'][j]
continue
if data['scores'][j] >= history[i]:
cnt_ +=1
if id2cat[str(cat2id[i])] not in dis.keys():
dis[id2cat[str(cat2id[i])]]=1
cum[id2cat[str(cat2id[i])]]=data['scores'][j]
else:
dis[id2cat[str(cat2id[i])]]+=1
cum[id2cat[str(cat2id[i])]]+=data['scores'][j]
# For class weights
avg = 0
per = {}
for i in dis.keys():
avg +=dis[i]
for i in dis.keys():
per[i]=(avg/len(dis)/cum[i])**gamma2
final = {}
for i in sorted(per):
final[i]=per[i]
id_final={}
for i in final.keys():
id_final[int(cat2id[i])]=final[i]
# For class threshold
per = {}
for i in dis.keys():
per[i]=max(min((cum[i]/(avg/len(dis)))**gamma1*base, ranges[1]), ranges[0])
# output
Final = dict()
Final['cat'] = final
Final['id'] = id_final
Final['thres'] = per
with open(filename,'w') as f:
json.dump(Final, f,indent=4,ensure_ascii=False)
@HOOKS.register_module()
class UnlabelPredHook(Hook):
def __init__(self, kwargs, config, task_type, interval_mode='epoch', interval=1):
self.dataset_type = kwargs["type"]
self.num_gpus = kwargs["num_gpus"]
self.image_root_path = kwargs["image_root_path"]
self.image_list_file = kwargs["image_list_file"]
self.anno_root_path = kwargs["anno_root_path"]
self.start_point = int(kwargs.get("start_point", 0))
self.fuse = kwargs.get("fuse_history", False)
self.iter_fuse_flag = False
self.first_ignore = True if not kwargs.get("first_fuse", True) else False
self.config = config
self.use_ema = kwargs["use_ema"]
self.category_info_path = kwargs["category_info_path"]
# zhoujing.zjh 2021.01.28 兼容config中dict输入
if isinstance(self.category_info_path, str):
category_info_file = open(kwargs["category_info_path"])
category_info = json.load(category_info_file)
category_info_file.close()
self.id2cat = category_info["id2cat"]
self.cat2id = category_info["cat2id"]
elif isinstance(self.category_info_path, dict):
if "id2cat" in self.category_info_path:
self.id2cat = self.category_info_path["id2cat"]
self.cat2ed = self.category_info_path["cat2id"]
else:
raise RuntimeError('[UnlabelPredHook] train_config \"category_info_path\" is a dict, but not found \"id2cat\".')
else:
raise RuntimeError('[UnlabelPredHook] train_config \"category_info_path\" is not str or dict')
self.infer_score_thre = kwargs["infer_score_thre"] \
if task_type in ('Det', 'Det_Sem') else None
self.first_score_thre = kwargs.get("first_score_thre", None)
if self.first_score_thre == None:
self.first_score_thre = self.config.get("infer_score_thre",0.1)
self.save_file_format = kwargs["save_file_format"]
self.save_dir = config.work_dir
self.image_list = get_image_list_from_list(self.image_list_file, self.image_root_path, self.anno_root_path)
self.interval_mode = interval_mode
self.interval = interval
self.eval_config = kwargs["eval_config"] \
if task_type in ('Det', 'Det_Sem') else None
self.eval_img_path = kwargs.get("img_path")
self.eval_img_resize_size = kwargs.get("img_resize_size")
self.eval_low_level_scale = kwargs.get("low_level_scale")
self.task_type = task_type
save_polygon = kwargs.get("save_polygon")
self.prefile = ""
self.preload_num = kwargs.get("preload",10)
# bhchen add | |
#example X arrays and how its formatted:
# X = [423 30 10 0 ] read from left to right theres a ribosome in position 423 30 and 10, with a 0 kept as a buffer for simulation
t = t_array[0] #time point
Nt = len(t_array) #number of time points to record over
tf = t_array[-1] #final time point
N_rib = 200 #Maximum number of ribosomes on a single mRNA (hard limit for the simulation not a physical constant)
col = np.zeros((1,N_rib))
X_array = np.zeros((N_rib, Nt)) #recording array that records the ribosome posistions over time array points
NR = 0 #number of ribosomes bound
it = 1 #number of iterations
Sn_p = np.eye(max(NR+1, 2), dtype=int) #stoichiometry for the SSA
wn_p = np.zeros((X.shape[0], 1)) # propensities for the SSA
T = np.array([0, 0], dtype=float)
ribtimes = np.array([[0,0]],dtype=float)
col_points = []
#wn_p = np.zeros((1,X.shape[0])).flatten()
wshape = len(wn_p)
Inhibit_condition = 1 #set up inhibitor flags
while t < tf:
if Inhibitor == True:
if t >= inhibit_time:
Inhibit_condition = 0
else:
Inhibit_condition = 1
else:
Inhibit_condition = 1
if FRAP == True : #if the Photobleaching is happening, "remove" ribosomes
if t >= inhibit_time and t < inhibit_time + 20:
#X = np.array([0, 0])
a=1
#T = np.array([0,0])
oldNR = NR
#other options for NR calc
#NR = len(np.where(X>0)[0])
#NR = len(np.where(X!=0)[0])
#NR = len(np.argwhere(X))
#NR = np.nonzero(X)[0].shape[0]
#NR = max(0,len(X)-1)
#NR = np.sum(X!=0)
#NR = np.where(X!=0)[0][-1]+1
#NR = np.flatnonzero(X).shape[0]
NR = len(np.flatnonzero(X)) #each iteration get the number of ribosomes on the mRNA
if X.shape[0] < NR+1: #if the last reaction added a ribosome put a 0 on the end of X vec
X = np.append(X, [0])
T = np.append(T, [0])
T[-2] = t
X[-1] = 0
T[-1] = 0
X = X[0:max(NR, 1)+1] #clear any additional 0's on the end
T = T[0:max(NR, 1)+1]
if oldNR != NR: #if the number of ribosomes has changed reallocate the sizes of stoich and propensities
Sn_p = np.eye(max(NR+1, 2), dtype=int)
wn_p = np.zeros((X.shape[0], 1))
wshape = len(wn_p)
Sn = Sn_p
wn = wn_p
#get indices of where X vecs are > 0 ie where the ribosome values are
inds = X > 0
wn[inds] = kelong[X[inds]-1] #update propensities
if X[0] == N: #if the ribosome in the 0 position is at the end of the mRNA set propensities to the reaction for completion
Sn[:, 0] = (np.append(X[1:], np.array([0]))-X[0:])
wn[0] = kcompl
#if there are no ribosomes or when there is enough room for a new ribosome to bind add the propensity for binding
if NR == 0:
wn[NR] = kbind*Inhibit_condition
if NR > 0 and X[NR-1] > R:
wn[NR] = kbind*Inhibit_condition
REST = np.less(X[1:]+10, X[0:-1]) #apply the footprint condition ie set any propensities where it violates the > 10 codons apart rule to 0
wn[1:] = (wn[1:].T*REST).T #apply that logical^ to propensities
w0 = sum(wn.flat) #get the sum of propensities
randnum = np.random.random_sample(2) #update time to point of next reaction (exponential waiting time distb)
t = (t-np.log(randnum[0])/w0)
while it < Nt and t > t_array[it]: #record state if past timepoint
X_array[0:len(X), it] = X
it += 1
if t < tf: #if still running simulation pick which reaction happened via random number and propensity sum
r2 = w0*randnum[1]
tmp = 0
for i in range(wshape):
tmp = tmp + wn[i]
if tmp >= r2:
event = i
break
X = (X + Sn[:, event].T) #update X vector for new ribosome state
if np.sum(Sn[:,event]) < 0 :
ribtimes = np.vstack((ribtimes,[T[0],t]))
T[:-1] = T[1:]
Ncol = np.append(Ncol,col[0][0] )
col = np.atleast_2d(np.append(col[:,1:],[0]))
else:
if X[event-1] == X[event] + R:
col[0][event] +=1
col_points.append( (X[event],t) )
return X_array,ribtimes[1:,:],Ncol,col_points #return the completed simulation
def get_acc2(self, data, trunc=False):
'''
Get autocorrelation function
*NOT* multi-tau
'''
N = len(data)
fvi = np.fft.fft(data, n=2*N)
acf = fvi*np.conjugate(fvi)
acf = np.fft.ifft(acf)
acf = np.real(acf[:N])/float(N)
if trunc:
acf[acf < 0]=0
for i in range(1, len(acf)):
if acf[i] > acf[i-1]:
acf[i] = acf[i-1]
return acf
#
# def get_cc(self, data1,data2, trunc=False):
# '''
# Get crosscorrelation function fft version
#
# data1, data2 are 1xN vectors of signals to correlate
#
# '''
# N = len(data1)
# fvi_1 = np.fft.fft(data1, n=2*N)
# fvi_2 = np.fft.fft(data2, n=2*N)
#
# ccf = fvi_1*np.conjugate(fvi_2)
# ccf = np.fft.ifft(ccf)
# ccf = np.real(ccf)/float(N)
# #ccf = np.hstack((ccf[::-1][:-1],ccf))
#
# if trunc:
# ccf[ccf < 0]=0
# for i in range(1, len(ccf)):
# if ccf[i] > ccf[i-1]:
# ccf[i] = ccf[i-1]
# return ccf
def elongation_animation(self, ti=0, tf=1000, tstep=1000, cell_radius=50, imagesize=5, dpi=90, filename='simulated_cell', ssa_obj=None, fcolor='#00FF00' ,rnacolor='#FF0000', xkcd=False):
'''
function that creates a mrna translation animation
'''
custom_cmap = ['#69dd42', '#e5361b', '#db11c7']
def rpts(x, y, angle):
nx = np.cos(angle)*x - np.sin(angle)*y
ny = np.sin(angle)*x + np.cos(angle)*y
return nx, ny
def update_line(num, xpos, ypos, line): #function for the FuncAnimation
if num != 0:
ax.get_lines()[-1].remove()
for child in ax.get_children(): #remove the previous patch collection (green spots)
if isinstance(child, PatchCollection):
child.remove()
patches = []
gp = []
ep = []
radi = np.ones(xpos[:, inds[num]].shape)*4 #create a max radius of 3 for intensity vecs
ypos = np.ones(xpos[:, inds[num]].shape)*(ytop+3)
x = xpos[:, inds[num]]
x[np.where(x == 0)] = x[np.where(x == 0)] - 300
for x1, y1, r in zip(xpos[:, inds[num]], ypos, radi): #make circle objects of radius based on ivec
circle = mpatches.Circle((x1, y1), r, facecolor='#FF0000', edgecolor='k')
patches.append(circle)
pcolor = custom_cmap[0]
for i in range(len(x.flatten())):
if x[i] > 0:
xpts = np.linspace(0, int(x[i])-1, int(x[i]))
ypts = 5*np.sin(1/10*np.linspace(0, int(x[i])-1, int(x[i])))
xpts, ypts = rpts(ypts, xpts, 1)
ypts = ypts+ytop+3
xpts = xpts+x[i]
radi = np.ones(xpts.shape)*1
k = 0
ypts = np.fliplr(np.atleast_2d(ypts))
ypts = ypts.flatten()
xpts = np.fliplr(np.atleast_2d(xpts))
xpts = xpts.flatten()
for x2, y2, r2 in zip(xpts, ypts, radi):
probloc = False
j = 0
for key in epitopes.keys():
if k in epitopes[key]:
probloc = True
pcolor = custom_cmap[j]
j += 1
rx = np.random.rand()*2
ry = np.random.rand()*2
if probloc == False:
circle = mpatches.Circle((x2+rx, y2+ry), r2, facecolor='#0000FF', edgecolor='#FFFFFF', lw=2, ls='solid')
gp.append(circle)
else:
circle = mpatches.Circle((x2+rx, y2+ry), r2*3, facecolor='#00FF00', edgecolor='#000000', lw=2, ls='solid')
ep.append(circle)
k += 1
#fig.gca().add_artist(circle)
'''
xs = np.flip(np.sort(xpos[:,inds[num]][0].flatten()),axis=0)
for i in range(max_ribs):
line.set_data(xpos[:,inds[num]],ypos[inds[num]])
line.set_linewidth(0)
line.set_marker('o')
line.set_markersize(3)
'''
p = PatchCollection(patches, facecolors=('#FF0000',), zorder=5) #create a patch collection to add to axis
m = PatchCollection(gp, facecolors=('#0000FF',), lw=2, zorder=3) #create a patch collection to add to axis
e = PatchCollection(ep, facecolors=(pcolor,), zorder=4)
n = num
ax.plot(np.linspace(0, tag_length, len(ssa_obj.time_vec_fixed[ssa_obj.start_time:]))[:n], 3*ssa_obj.intensity_vec.flatten()[:n]+total_length, color=pcolor)
fldot = mpatches.Ellipse((total_length-30, total_length+40), width=ssa_obj.intensity_vec.flatten()[n], height=ssa_obj.intensity_vec.flatten()[n]*1.0, color=pcolor)
f = [fldot]
fe = PatchCollection(f, facecolors=(pcolor,), zorder=4)
ax.add_collection(p) #adds the circles to axis
ax.add_collection(m) #adds the circles to axis
ax.add_collection(e)
ax.add_collection(fe)
plt.xlabel(str(inds[num])) #update time label
return line,
if ssa_obj == None:
ssa_obj = self.ssa_solver(n_traj=1, tf=tf, tstep=tstep)
if xkcd == True:
plt.xkcd()
fig1 = plt.figure(figsize=(imagesize+5, imagesize), dpi=dpi) #make figure
fig1.tight_layout()
ax = fig1.add_subplot('111')
ax.set_aspect(1)
tag_length = self.POI.tag_length
total_length = self.POI.total_length
epitopes = self.POI.tag_epitopes
tag_length = total_length - self.POI.gene_length
ax.cla()
ybot = 90
ytop = 110
ax.plot([0, total_length], [ybot, ybot], color='white', zorder=3)
ax.plot([0, total_length], [ytop, ytop], color='white', zorder=3)
ax.plot([0, 0], [ybot, ytop], color='white', zorder=3)
ax.plot([total_length, total_length], [ybot, ytop], color='white', zorder=3)
ax.axis([-10, total_length+10, 80, total_length+np.max(ssa_obj.intensity_vec)*3+20])
ax.plot([tag_length, tag_length], [ybot, ytop], color='white', linewidth=1, zorder=3)
k = 0
for key in epitopes.keys():
for i in range(len(epitopes[key])):
ax.plot([epitopes[key][i], epitopes[key][i]], [ybot, ytop], color=custom_cmap[k], linewidth=2, zorder=3)
rect = mpatches.Rectangle(xy=(tag_length, ybot), width=total_length-tag_length, height=ytop-ybot, color='#0000FF')
#ax.fill_between([tag_length,tag_length,total_length,total_length],[ybot,ytop,ytop,ybot],color='#00FF00')
ax.add_patch(rect)
k += 1
ticks = np.linspace(0, total_length, 10).astype(int)
ax.set_xticks(ticks)
ax.set_xlabel('Codon Position')
ax.get_yaxis().set_visible(False)
ax.set_facecolor('k')
filename = 'elong.gif'
Writer = animation.writers['pillow']
print('making movie...')
max_ribs = np.max(np.nonzero(ssa_obj.solutions[0])[0])
l, = plt.plot([], [], 'r-')
t = ssa_obj.time_vec_fixed[ssa_obj.start_time:]
inds = np.linspace(0, len(t)-1, len(t)).astype(int)
xpos = np.zeros((max_ribs, len(ssa_obj.time_vec_fixed[ssa_obj.start_time:])))
ypos = np.ones((1, len(ssa_obj.time_vec_fixed[ssa_obj.start_time:]))).flatten()
xpos[:, :] = ssa_obj.solutions[0][:max_ribs, ssa_obj.start_time:len(ssa_obj.time_vec_fixed)]
writer = Writer(fps=30, metadata=dict(artist='Me'), bitrate=1800)
line_ani = animation.FuncAnimation(fig1, update_line, len(ssa_obj.time_vec_fixed[ssa_obj.start_time:]), fargs=(xpos, ypos, l),
interval=50, blit=True)
line_ani.save((filename), writer=writer) #save the animation
def simulate_cell(self, diffusion_constant, kon, koff, kRNA, kdecay, ti=0, tf=1000, tstep=1000, cell_radius=50, imagesize=5, dpi=90, filename='simulated_cell', ssa_obj=None, fcolor='#00FF00', rnacolor='#FF0000'):
'''
[DNA] ==kRNA==> [RNA] <==koff== [RNA*] ==translation simulation==> [Protein]===> null
// || /\
|| `'=====kon====='`
| |
'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
The temperature may be in deg C, F, K or R.
If the units are not specified, the units in default_units.py are used.
This first version only works for TAS < 661.48 kt.
"""
P = SA.alt2press(altitude, alt_units, press_units='pa')
press_ratio = SA.alt2press_ratio(altitude, alt_units)
temp_ratio = U.temp_conv(temp, from_units=temp_units, to_units='K')\
/ 288.15
density_ratio = press_ratio / temp_ratio
Rho = Rho0 * density_ratio
tas = _dp2speed(dp, P, Rho, press_units, speed_units)
return tas
# #############################################################################
#
# speed to delta pressure
#
# CAS to delta pressure
#
# EAS and altitude to delta pressure
#
# TAS, altitude and temperature to delta pressure
#
# #############################################################################
def _speed2dp(
speed,
Pref,
Rhoref,
press_units=default_press_units,
speed_units=default_speed_units,
):
""" Return a delta pressure (the difference between the pitot and
static pressures) for a given speed. Subsonic equation.
"""
speed = U.speed_conv(speed, from_units=speed_units, to_units='m/s')
dp = Pref * (((Rhoref * speed ** 2.) / (7. * Pref) + 1.) ** 3.5
- 1.)
dp = U.press_conv(dp, from_units='pa', to_units=press_units)
return dp
def _super_cas2dp(mcas):
"""Return the differential pressure (difference between pitot and static
pressures) for a given CAS.
This function only works for speed in m/s, and pressure in pa.
This function is only intended for CAS > 661.48 kt.
"""
dp_over_P0 = (F * (mcas / A0) ** 7.) / (7. * (mcas / A0) ** 2. - 1.)\
** 2.5 - 1.
dp = dp_over_P0 * P0
return dp
def cas2dp(cas, speed_units=default_speed_units,
press_units=default_press_units):
"""
Return the differential pressure (difference between pitot and static
pressures) for a given CAS.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The pressure units may be in inches of HG, mm of HG, psi, lb/ft^2,
hpa and mb. The units are specified as: 'in HG', 'mm HG', 'psi',
'lb/in**2', 'psf', 'lb/ft**2 'hpa', 'mb' or 'pa'.
If the units are not specified, the units in default_units.py are used.
"""
# check to confirm the speed is less than 661.48 kt
# kcas = U.speed_conv(cas, from_units = speed_units, to_units = 'kt')
mcas = U.speed_conv(cas, from_units=speed_units, to_units='m/s')
# if kcas > 661.48:
if mcas > A0:
# supersonic case
dp = _super_cas2dp(mcas)
dp = U.press_conv(dp, from_units='pa', to_units=press_units)
else:
# subsonic case
dp = _speed2dp(cas, P0, Rho0, press_units=press_units,
speed_units=speed_units)
return dp
def eas2dp(
eas,
altitude,
speed_units=default_speed_units,
alt_units=default_alt_units,
press_units=default_press_units,
):
"""
Return the differential pressure (difference between pitot and static
pressures) for a given EAS.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The pressure units may be in inches of HG, mm of HG, psi, lb/ft^2,
hpa and mb. The units are specified as: 'in HG', 'mm HG', 'psi',
'lb/in**2', 'psf', 'lb/ft**2 'hpa', 'mb' or 'pa'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
If the units are not specified, the units in default_units.py are used.
This first version only works for CAS < 661.48 kt.
"""
# check to confirm the speed is less than 661.48 kt
keas = U.speed_conv(eas, from_units=speed_units, to_units='kt')
if keas > 661.48:
raise ValueError( 'The function eas2dp only works if the eas is less than or equal to 661.48 kt')
P = SA.alt2press(altitude, alt_units=alt_units, press_units='pa')
dp = _speed2dp(eas, P, Rho0, press_units=press_units,
speed_units=speed_units)
return dp
def tas2dp(
tas,
altitude,
temp,
speed_units=default_speed_units,
alt_units=default_alt_units,
temp_units=default_temp_units,
press_units=default_press_units,
):
"""
Return the differential pressure (difference between pitot and static
pressures) for a given TAS.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The pressure units may be in inches of HG, mm of HG, psi, lb/ft^2,
hpa and mb. The units are specified as: 'in HG', 'mm HG', 'psi',
'lb/in**2', 'psf', 'lb/ft**2 'hpa', 'mb' or 'pa'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
The temperature may be in deg C, F, K or R.
If the units are not specified, the units in default_units.py are used.
This first version only works for CAS < 661.48 kt.
"""
# check to confirm the speed is less than 661.48 kt
ktas = U.speed_conv(tas, from_units=speed_units, to_units='kt')
if ktas > 661.48:
raise ValueError ( 'The function tas2dp only works if the tas is less than or equal to 661.48 kt')
P = SA.alt2press(altitude, alt_units=alt_units, press_units='pa')
press_ratio = SA.alt2press_ratio(altitude, alt_units)
temp_ratio = U.temp_conv(temp, from_units=temp_units, to_units='K')\
/ 288.15
density_ratio = press_ratio / temp_ratio
Rho = Rho0 * density_ratio
dp = _speed2dp(tas, P, Rho, press_units=press_units,
speed_units=speed_units)
return dp
def cas2eas(
cas,
altitude,
speed_units=default_speed_units,
alt_units=default_alt_units,
):
"""
Return the EAS for a given CAS, pressure altitude and temperature.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
If the units are not specified, the units in default_units.py are used.
"""
dp = cas2dp(cas, speed_units)
eas = dp2eas(dp, altitude, alt_units=alt_units,
speed_units=speed_units)
return eas
def i_cas2eas(data_items):
"""
Return the EAS for a given CAS, pressure altitude and temp, with
interactive input from the user.
"""
# version that goes interactive, if required
data_items['cas'] = _get_CAS(data_items)
cas = data_items['cas']
data_items['speed_units'] = _get_speed_units(data_items)
speed_units = data_items['speed_units']
data_items['altitude'] = _get_alt(data_items)
altitude = data_items['altitude']
data_items['alt_units'] = _get_alt_units(data_items)
alt_units = data_items['alt_units']
print
print ('CAS = {0} - {1}'.format(cas, speed_units))
print ('Altitude = {0} - {1}'.format(altitude, alt_units))
print ()
eas = cas2eas(cas, altitude, speed_units, alt_units)
data_items['eas'] = eas
return_string = 'EAS = ' + str(eas) + ' ' + speed_units
print (return_string)
def cas2tas(
cas,
altitude,
temp='std',
speed_units=default_speed_units,
alt_units=default_alt_units,
temp_units=default_temp_units,
):
"""
Return the TAS for a given CAS, pressure altitude and temperature.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
The temperature may be in deg C, F, K or R. The temperature defaults to std
temperature if it is not input.
If the units are not specified, the units in default_units.py are used.
"""
if temp == 'std':
temp = SA.alt2temp(altitude, temp_units=temp_units,
alt_units=alt_units)
dp = cas2dp(cas, speed_units)
tas = dp2tas(
dp,
altitude,
temp,
speed_units=speed_units,
alt_units=alt_units,
temp_units=temp_units,
)
return tas
def i_cas2tas(data_items):
"""
Return the TAS for a given CAS, pressure altitude and temp, with
interactive input from the user.
"""
# version that goes interactive, if required
data_items['cas'] = _get_CAS(data_items)
cas = data_items['cas']
data_items['speed_units'] = _get_speed_units(data_items)
speed_units = data_items['speed_units']
data_items['altitude'] = _get_alt(data_items)
altitude = data_items['altitude']
data_items['alt_units'] = _get_alt_units(data_items)
alt_units = data_items['alt_units']
data_items['temp_units'] = _get_temp_units(data_items)
temp_units = data_items['temp_units']
data_items['temp'] = _get_temp(data_items)
temp = data_items['temp']
print
print ('CAS = ', cas, speed_units)
print ('Altitude = ', altitude, alt_units)
print ('Temperature = ', temp, 'deg', temp_units)
print
tas = cas2tas(
cas,
altitude,
temp,
speed_units,
alt_units,
temp_units,
)
data_items['tas'] = tas
return_string = 'TAS = ' + str(tas) + ' ' + speed_units
print (return_string)
def eas2tas(
eas,
altitude,
temp='std',
speed_units=default_speed_units,
alt_units=default_alt_units,
temp_units=default_temp_units,
):
"""
Return the TAS for a given EAS, pressure altitude and temperature.
The speed units may be 'kt', 'mph', 'km/h', 'm/s' and 'ft/s'.
The altitude may be in feet ('ft'), metres ('m'), kilometres ('km'),
statute miles, ('sm') or nautical miles ('nm').
The temperature may be in deg C, F, K or R. The temperature defaults to std
temperature if it is not input.
If the units are not specified, the units in default_units.py are used.
"""
if temp == 'std':
temp = SA.alt2temp(altitude, temp_units=temp_units,
alt_units=alt_units)
dp = eas2dp(eas, altitude, speed_units, alt_units)
tas = dp2tas(
dp,
altitude,
temp,
speed_units=speed_units,
alt_units=alt_units,
temp_units=temp_units,
)
return tas
def i_eas2tas(data_items):
"""
Return the TAS for a given EAS, pressure altitude and temp, with
interactive input from the user.
"""
# version that goes interactive, if | |
except KeyError:
involved = oset.union(*self.node_to_terms(node))
keep = self.get_keep(node)
legs = involved & keep
self.info[node]['legs'] = legs
return legs
def get_involved(self, node):
"""Get all the indices involved in the formation of subgraph ``node``.
"""
try:
involved = self.info[node]['involved']
except KeyError:
if len(node) == 1:
involved = oset()
else:
sub_legs = map(self.get_legs, self.children[node])
involved = oset.union(*sub_legs)
self.info[node]['involved'] = involved
return involved
def get_removed(self, node):
"""Get the indices that will be removed by the creation of ``node``.
"""
try:
removed = self.info[node]['removed']
except KeyError:
removed = self.get_involved(node) - self.get_legs(node)
self.info[node]['removed'] = removed
return removed
def get_size(self, node):
"""Get the tensor size of ``node``.
"""
try:
size = self.info[node]['size']
except KeyError:
size = compute_size_by_dict(self.get_legs(node), self.size_dict)
self.info[node]['size'] = size
return size
def get_flops(self, node):
"""Get the FLOPs for the pairwise contraction that will create
``node``.
"""
try:
flops = self.info[node]['flops']
except KeyError:
if len(node) == 1:
flops = 0
else:
involved = self.get_involved(node)
removed = self.get_removed(node)
flops = flop_count(involved, removed, 2, self.size_dict)
self.info[node]['flops'] = flops
return flops
def total_flops(self):
"""Sum the flops contribution from every node in the tree.
"""
if self._track_flops:
return self.multiplicity * self._flops
self._flops = 0
for node, _, _ in self.traverse():
self._flops += self.get_flops(node)
self._track_flops = True
return self.multiplicity * self._flops
def total_write(self):
"""Sum the total amount of memory that will be created and operated on.
"""
if self._track_write:
return self.multiplicity * self._write
self._write = 0
for node, _, _ in self.traverse():
self._write += self.get_size(node)
self._track_write = True
return self.multiplicity * self._write
def max_size(self):
"""The size of the largest intermediate tensor.
"""
if self._track_size:
return self._sizes.max()
self._sizes = MaxCounter()
for node, _, _ in self.traverse():
self._sizes.add(self.get_size(node))
self._track_size = True
return self._sizes.max()
def peak_size(self, order=None):
"""Get the peak concurrent size of tensors needed - this depends on the
traversal order, i.e. the exact contraction path, not just the
contraction tree.
"""
tot_size = sum(self.get_size(node) for node in self.gen_leaves())
peak = tot_size
for p, l, r in self.traverse(order=order):
tot_size -= self.get_size(l)
tot_size -= self.get_size(r)
tot_size += self.get_size(p)
peak = max(peak, tot_size)
return peak
def arithmetic_intensity(self):
"""The ratio of total flops to total write - the higher the better for
extracting good computational performance.
"""
return self.total_flops() / self.total_write()
def remove_ind(self, ind, inplace=False):
tree = self if inplace else self.copy()
tree.total_flops()
tree.total_write()
tree.max_size()
d = tree.size_dict[ind]
s_ind = oset([ind])
for node, node_info in tree.info.items():
# if ind doesn't feature in this node (contraction) nothing to do
involved = tree.get_involved(node)
# inputs can have leg indices that are not involved so
legs = tree.get_legs(node)
if (ind not in involved) and (ind not in legs):
continue
# else update all the relevant information about this node
node_info['involved'] = involved - s_ind
removed = tree.get_removed(node)
if ind in legs:
node_info['legs'] = legs - s_ind
old_size = tree.get_size(node)
tree._sizes.discard(old_size)
new_size = old_size // d
tree._sizes.add(new_size)
node_info['size'] = new_size
tree._write += (-old_size + new_size)
# modifying keep not stricly necessarily as its only called as
# ``legs = keep & involved`` ?
keep = tree.get_keep(node)
node_info['keep'] = keep - s_ind
else:
node_info['removed'] = removed - s_ind
old_flops = tree.get_flops(node)
new_flops = old_flops // d
if len(removed) == 1:
# if ind was the last contracted index then have outer product
new_flops //= 2
node_info['flops'] = new_flops
tree._flops += (-old_flops + new_flops)
def term_without(t):
if ind in t:
return t - s_ind
return t
tree.output = term_without(tree.output)
tree.inputs = tuple(map(term_without, tree.inputs))
tree.already_optimized.clear()
tree.multiplicity = tree.multiplicity * d
tree.sliced_inds = tree.sliced_inds + (ind,)
return tree
remove_ind_ = functools.partialmethod(remove_ind, inplace=True)
def contract_pair(self, x, y, check=False):
"""Contract node ``x`` with node ``y`` in the tree to create a new
parent node.
"""
parent = x | y
# make sure info entries exist for all (default dict)
for node in (x, y, parent):
self.add_node(node, check=check)
# enforce left ordering of 'heaviest' subtrees
nx, ny = len(x), len(y)
# deterministically break ties
hx, hy = hash(x), hash(y)
if (nx, hx) > (ny, hy):
lr = (x, y)
else:
lr = (y, x)
self.children[parent] = lr
if self.track_childless:
self.childless.discard(parent)
if x not in self.children and nx > 1:
self.childless.add(x)
if y not in self.children and ny > 1:
self.childless.add(y)
if self._track_flops:
self._flops += self.get_flops(parent)
if self._track_write:
self._write += self.get_size(parent)
if self._track_size:
self._sizes.add(self.get_size(parent))
return parent
def contract(self, nodes, optimize='auto-hq', check=False):
"""Contract an arbitrary number of ``nodes`` in the tree to build up a
subtree. The root of this subtree (a new intermediate) is returned.
"""
if len(nodes) == 1:
return next(iter(nodes))
if len(nodes) == 2:
return self.contract_pair(*nodes, check=check)
# create the bottom and top nodes
grandparent = frozenset.union(*nodes)
self.add_node(grandparent, check=check)
for node in nodes:
self.add_node(node, check=check)
# if more than two nodes need to find the path to fill in between
# \
# GN <- 'grandparent'
# / \
# ?????????
# ????????????? <- to be filled with 'temp nodes'
# / \ / / \
# N0 N1 N2 N3 N4 <- ``nodes``, or, subgraphs
# / \ / / \
path_inputs = [oset(self.get_legs(x)) for x in nodes]
path_output = oset(self.get_legs(grandparent))
if isinstance(optimize, str):
path_fn = get_path_fn(optimize)
else:
path_fn = optimize
path = path_fn(path_inputs, path_output, self.size_dict)
# now we have path create the nodes in between
temp_nodes = list(nodes)
for p in path:
to_contract = [
frozenset(temp_nodes.pop(i)) for i in sorted(p, reverse=True)
]
temp_nodes.append(
self.contract(to_contract, optimize=optimize, check=check)
)
parent, = temp_nodes
if check:
# final remaining temp input should be the 'grandparent'
assert parent == grandparent
return parent
def is_complete(self):
"""Check every node has two children, unless it is a leaf.
"""
too_many_nodes = len(self.info) > 2 * self.N - 1
too_many_branches = len(self.children) > self.N - 1
if too_many_nodes or too_many_branches:
raise ValueError("Contraction tree seems to be over complete!")
queue = [self.root]
while queue:
x = queue.pop()
if len(x) == 1:
continue
try:
queue.extend(self.children[x])
except KeyError:
return False
return True
def traverse(self, order=None):
"""Generate, in order, all the node merges in this tree. Non-recursive!
This ensures children are always visited before their parent.
Parameters
----------
order : None or callable
How to order the contractions within the tree. If a callable is
given (which should take a node as its argument), try to contract
nodes that maximize this function first.
Returns
-------
generator[tuple[frozenset[frozenset[str]]]]
The bottom up ordered sequence of tree merges, each a
tuple of ``(parent, left_child, right_child)``.
See Also
--------
descend
"""
queue = [self.root]
if order is not None:
from bisect import bisect
scores = [order(self.root)]
def add_to_queue(node):
score = order(node)
i = bisect(scores, score)
scores.insert(i, score)
queue.insert(i, node)
else:
def add_to_queue(node):
queue.append(node)
ready = set(self.gen_leaves())
seen = set()
check = -1
while queue:
node = queue[check]
l, r = self.children[node]
# both node's children are ready -> we can yield this contraction
if (l in ready) and (r in ready):
queue.pop(check)
ready.add(node)
yield node, l, r
check = -1 # reset
continue
if node not in seen:
# add the node's children to the queue to be processed
if r not in ready:
add_to_queue(r)
if l not in ready:
add_to_queue(l)
seen.add(node)
check = -1 # reset
continue
# node is not ready and we have already added its children ->
# move onto the next highest scoring node to check
check -= 1
def descend(self, mode='dfs'):
"""Generate, from root to leaves, all the node merges in this tree.
Non-recursive! This ensures parents are visited before their children.
Parameters
----------
mode : {'dfs', bfs}, optional
How expand from a parent.
Returns
-------
generator[tuple[frozenset[frozenset[str]]]]
The top down ordered sequence of tree merges, each a
tuple of ``(parent, left_child, right_child)``.
See Also
--------
traverse
"""
queue = [self.root]
while queue:
if mode == 'dfs':
parent = queue.pop(-1)
elif mode == 'bfs':
parent = queue.pop(0)
l, r = self.children[parent]
yield parent, l, r
if len(l) > 1:
queue.append(l)
if len(r) > 1:
queue.append(r)
def get_subtree(self, node, size, search='bfs'):
"""Get a subtree spanning down from | |
id):
"""寻找指定id的网格"""
if id == EMPTY_STRING or len(id) <1:
return
if direction == DIRECTION_LONG:
for x in self.dnGrids[:]:
if x.id == id:
self.writeCtaLog(u'找到下网格[open={},close={},stop={},volume={}]'.format(x.openPrice,x.closePrice,x.stopPrice,x.volume))
return x
if direction == DIRECTION_SHORT:
for x in self.upGrids[:]:
if x.id == id:
self.writeCtaLog(u'找到上网格[open={},close={},stop={},volume={}]'.format(x.openPrice,x.closePrice,x.stopPrice,x.volume))
return x
return None
def getPosition(self,direction, type=EMPTY_STRING):
"""获取特定类型的网格持仓"""
if direction == DIRECTION_LONG:
long_vol = [x.volume-x.tradedVolume for x in self.dnGrids if x.openStatus and x.type == type]
return sum(long_vol)
if direction == DIRECTION_SHORT:
short_vol = [x.volume - x.tradedVolume for x in self.upGrids if x.openStatus and x.type == type]
return sum(short_vol)
def updateOrderRef(self, direction, openPrice, orderRef):
"""更新网格的orderId"""
if direction == DIRECTION_LONG:
for x in self.dnGrids:
if x.openPrice == openPrice:
x.orderRef = orderRef
x.orderStatus = True
if direction == DIRECTION_SHORT:
for x in self.upGrids:
if x.openPrice == openPrice:
x.orderRef = orderRef
x.orderStatus = True
def cancelOrderRef(self,direction, openPrice):
"""网格撤单"""
if direction == DIRECTION_LONG:
for x in self.dnGrids:
if x.openPrice == openPrice and x.orderRef != EMPTY_STRING and x.orderStatus==True and x.openStatus==False:
x.orderRef = EMPTY_STRING
x.orderStatus = False
self.writeCtaLog(u'下网格撤单[{0}]'.format(x.openPrice))
if direction == DIRECTION_SHORT:
for x in self.upGrids:
if x.openPrice == openPrice and x.orderRef != EMPTY_STRING and x.orderStatus==True and x.openStatus==False:
x.orderRef = EMPTY_STRING
x.orderStatus = False
self.writeCtaLog(u'上网格撤单[{0}]'.format(x.openPrice))
def getGridbyOpenPrice(self, direction, openPrice, orderRef = EMPTY_STRING):
"""通过开仓价和委托状态获取网格"""
if direction == DIRECTION_LONG:
for x in self.dnGrids:
# 优先匹配价格
if x.orderRef == orderRef and x.openPrice == openPrice:
return x
if direction == DIRECTION_SHORT:
for x in self.upGrids:
# 优先匹配价格
if x.orderRef == orderRef and x.openPrice == openPrice:
return x
self.writeCtaLog(u'异常,getGridbyOpenPrice找不到网格[{0},openprice={1},orderRef={2}]'.format(direction, openPrice, orderRef))
return None
def getGrid(self, direction, openPrice=EMPTY_FLOAT, closePrice=EMPTY_FLOAT, orderRef=EMPTY_STRING, t=EMPTY_STRING):
"""获取网格"""
if direction == DIRECTION_LONG:
for x in self.dnGrids:
# 优先匹配价格
if t == u'OpenPrice' and x.openPrice == openPrice:
return x
elif t == u'ClosePrice' and x.closePrice == closePrice:
return x
elif t == u'OrderRef' and x.orderRef == orderRef:
return x
if direction == DIRECTION_SHORT:
for x in self.upGrids:
# 优先匹配价格
if t == u'OpenPrice' and x.openPrice == openPrice:
return x
elif t == u'ClosePrice' and x.closePrice == closePrice:
return x
elif t == u'OrderRef' and x.orderRef == orderRef:
return x
self.writeCtaLog(u'异常,getGrid找不到网格[direction={0},oepnPrice={1},closePrice={2},orderRef={3},t={4}]'.format(direction, openPrice, closePrice, orderRef, t))
return None
def getFirstLastGrid(self, direction,type = EMPTY_STRING):
"""获取最前/后一个的网格"""
# 做空网格:,first =开仓价最高一个,last= 最低一个
if direction == DIRECTION_SHORT:
short_grids = self.getGridsWithTypes(direction=direction, types=[type])
if short_grids is None or len(short_grids) ==0 :
return None, None
if len(short_grids) == 1:
return short_grids[0],short_grids[0]
# 价格由低至高排列
sortedGrids = sorted(short_grids, key=lambda g:g.openPrice)
return sortedGrids[-1], sortedGrids[0]
# 做多网格: first =最低一个,last= 开仓价最高一个
if direction == DIRECTION_LONG:
long_grids = self.getGridsWithTypes(direction=direction, types=[type])
if long_grids is None or len(long_grids) ==0:
return None, None
if len(long_grids) == 1:
return long_grids[0], long_grids[0]
sortedGrids = sorted(long_grids, key=lambda g: g.openPrice)
return sortedGrids[0], sortedGrids[-1]
return None,None
def getLastOpenedGrid(self, direction,type = EMPTY_STRING, orderby_asc=True):
"""获取最后一个开仓的网格"""
# highest_short_price_grid = getLastOpenedGrid(DIRECTION_SHORT
if direction == DIRECTION_SHORT:
opened_short_grids = self.getGrids(direction=direction, opened=True,type=type)
if opened_short_grids is None or len(opened_short_grids) ==0 :
return None
if len(opened_short_grids) > 1:
sortedGrids = sorted(opened_short_grids, key=lambda g:g.openPrice)
if orderby_asc:
# 取价格最高的一格
opened_short_grids = sortedGrids[-1:]
else:
# 取价格最低的一格
opened_short_grids = sortedGrids[0:1]
return opened_short_grids[0]
if direction == DIRECTION_LONG:
opened_long_grids = self.getGrids(direction=direction, opened=True,type=type)
if opened_long_grids is None or len(opened_long_grids) ==0:
return None
if len(opened_long_grids) > 1:
sortedGrids = sorted(opened_long_grids, key=lambda g: g.openPrice)
if orderby_asc:
# 取价格最低的一格
opened_long_grids = sortedGrids[0:1]
else:
# 取价格最高的一格
opened_long_grids = sortedGrids[-1:]
return opened_long_grids[0]
def closeGrid(self, direction, closePrice, closeVolume):
"""网格交易结束"""
if direction == DIRECTION_LONG:
for x in self.dnGrids:
if x.closePrice == closePrice and x.openStatus and x.volume == closeVolume:
self.writeCtaLog(u'下网格交易结束[{0}->{1}],仓位:{2},移除网格'.format(x.openPrice, x.closePrice,closeVolume))
self.dnGrids.remove(x)
return
if x.closePrice == closePrice and x.openStatus and x.volume > closeVolume:
self.writeCtaLog(u'下网格交易部分结束[{0}->{1}],减少仓位:{2}'.format(x.openPrice, x.closePrice,closeVolume))
x.volume = x.volume - closeVolume
if x.closePrice == closePrice and x.openStatus and x.volume < closeVolume:
self.writeCtaLog(u'下网格交易结束[{0}->{1}],移除网格,剩余仓位:{2}'.format(x.openPrice, x.closePrice, closeVolume-x.volume))
closeVolume = closeVolume - x.volume
self.dnGrids.remove(x)
if direction == DIRECTION_SHORT:
for x in self.upGrids:
if x.closePrice == closePrice and x.openStatus and x.volume == closeVolume:
self.writeCtaLog(u'上网格交易结束[{0}->{1}],仓位:{2},移除网格'.format(x.openPrice, x.closePrice,closeVolume))
self.upGrids.remove(x)
return
if x.closePrice == closePrice and x.openStatus and x.volume > closeVolume:
self.writeCtaLog(u'上网格交易结束[{0}->{1}],仓位减少:{2}'.format(x.openPrice, x.closePrice,closeVolume))
x.volume = x.volume - closeVolume
if x.closePrice == closePrice and x.openStatus and x.volume < closeVolume:
self.writeCtaLog(u'上网格交易结束[{0}->{1}],移除网格,剩余仓位:{2}'.format(x.openPrice, x.closePrice,closeVolume-x.volume))
closeVolume = closeVolume - x.volume
self.upGrids.remove(x)
def removeGridById(self,direction, id):
"""移除指定id的网格"""
if id == EMPTY_STRING or len(id) <1:
return
if direction == DIRECTION_LONG:
for x in self.dnGrids[:]:
if x.id == id:
self.writeCtaLog(u'清除下网格[open={},close={},stop={},volume={}]'.format(x.openPrice,x.closePrice,x.stopPrice,x.volume))
self.dnGrids.remove(x)
if direction == DIRECTION_SHORT:
for x in self.upGrids[:]:
if x.id == id:
self.writeCtaLog(u'清除上网格[open={},close={},stop={},volume={}]'.format(x.openPrice,x.closePrice,x.stopPrice,x.volume))
self.upGrids.remove(x)
def removeGrids(self, direction, priceline, type=EMPTY_STRING):
"""清除价格线以下的网格"""
if direction == DIRECTION_LONG:
for x in self.dnGrids[:]:
if x.openPrice > priceline and not x.orderStatus and not x.openStatus and not x.closeStatus and x.type==type:
self.writeCtaLog(u'清除下网格[open={0}]'.format(x.openPrice))
self.dnGrids.remove(x)
if direction == DIRECTION_SHORT:
for x in self.upGrids[:]:
if x.openPrice < priceline and not x.orderStatus and not x.openStatus and not x.closeStatus and x.type==type:
self.writeCtaLog(u'清除上网格[open={0}]'.format(x.openPrice))
self.upGrids.remove(x)
def moveGrids(self, direction, pricedelta, type=EMPTY_STRING):
"""按pricedelta平移所有网格"""
if direction == DIRECTION_LONG:
for x in self.dnGrids[:]:
x.openPrice += pricedelta # 开仓价格
x.closePrice += pricedelta # 平仓价格
x.stopPrice += pricedelta # 止损价格
x.type = type # 网格类型标签
# self.openPrices = {} # 套利使用,开仓价格,symbol:price
if direction == DIRECTION_SHORT:
for x in self.upGrids[:]:
x.openPrice += pricedelta # 开仓价格
x.closePrice += pricedelta # 平仓价格
x.stopPrice += pricedelta # 止损价格
x.type = type # 网格类型标签
# self.openPrices = {} # 套利使用,开仓价格,symbol:price
def rebuildGrids(self, direction, upline=EMPTY_FLOAT, dnline=EMPTY_FLOAT, midline=EMPTY_FLOAT, upRate=1, dnRate=1, reuse=False, useVariableSteps=False):
"""重新拉网
清除未挂单的网格,
在上轨/下轨位置重新挂单
upRate , 上轨网格高度比率
dnRate, 下轨网格高度比率
"""
self.writeCtaLog(u'重新拉网:direction:{},upline:{},dnline:{}'.format(direction, upline, dnline))
# 检查上下网格的高度比率,不能低于0.5
if upRate < 0.5 or dnRate < 0.5:
upRate = max(0.5, upRate)
dnRate = max(0.5, dnRate)
# 计算每个网格的高度。如果使用变高的网格,则每过5格把网格搞的增加(self.gridHeight/2)
gridSteps = [0]*self.maxLots
for i in range(1, self.maxLots, 1):
if useVariableSteps == False:
gridSteps[i] = self.gridHeight * i
else:
j = int(i / 5)
gridSteps[i] = gridSteps[i-1] + self.gridHeight + self.gridHeight / 2 * j
# 重建下网格(移除未挂单、保留开仓得网格、在最低价之下才增加网格
if direction == DIRECTION_LONG:
min_long_price = midline
remove_grids = []
opened_grids = []
# 移除未挂单的下网格
for x in self.dnGrids[:]:
if not x.orderStatus and not x.openStatus and not x.closeStatus:
remove_grids.append(u'{}=>{}'.format(x.openPrice, x.closePrice))
self.dnGrids.remove(x)
else:
opened_grids.append(u'{}=>{}'.format(x.openPrice, x.closePrice))
if x.openPrice < min_long_price:
min_long_price = x.openPrice
if len(remove_grids) > 0:
self.writeCtaLog(u'清除下网格[{}]'.format(remove_grids))
if len(opened_grids) > 0:
self.writeCtaLog(u'保留下网格[{}]'.format(opened_grids))
# 需要重建的剩余网格数量
remainLots = len(self.dnGrids)
lots = self.maxLots - remainLots
dnline = min(dnline, min_long_price-self.gridHeight*dnRate)
self.writeCtaLog(u'需要重建的网格数量:{0},起点:{1}'.format(lots, dnline))
if lots > 0:
for i in range(0, lots, 1):
# 做多,开仓价为下阻力线-网格高度*i,平仓价为开仓价+止盈高度,开仓数量为缺省
open_price = int((dnline - gridSteps[i+remainLots] * dnRate) / self.minDiff ) * self.minDiff
close_price = int((open_price + self.gridWin* dnRate)/self.minDiff) * self.minDiff
grid = CtaGrid(direction=DIRECTION_LONG,
openprice=open_price,
closeprice=close_price,
volume=self.volume*self.getVolumeRate(remainLots + i))
grid.reuse = reuse
self.dnGrids.append(grid)
self.writeCtaLog(u'重新拉下网格:[{0}~{1}]'.format(dnline, dnline - gridSteps[-1] * dnRate))
# 重建上网格(移除未挂单、保留开仓得网格、在最高价之上才增加网格
if direction == DIRECTION_SHORT:
max_short_price = midline # 最高开空价
remove_grids = [] # 移除的网格列表
opened_grids = [] # 已开仓的网格列表
# 移除未挂单的上网格
for x in self.upGrids[:]:
if not x.orderStatus and not x.openStatus and not x.closeStatus:
remove_grids.append(u'{}=>{}'.format(x.openPrice, x.closePrice))
self.upGrids.remove(x)
else:
opened_grids.append(u'{}=>{}'.format(x.openPrice, x.closePrice))
if x.openPrice > max_short_price:
max_short_price = x.openPrice
if len(remove_grids) > 0:
self.writeCtaLog(u'清除上网格[{}]'.format(remove_grids))
if len(opened_grids) > 0:
self.writeCtaLog(u'保留上网格[{}]'.format(opened_grids))
# 需要重建的剩余网格数量
remainLots = len(self.upGrids)
lots = self.maxLots - remainLots
upline = max(upline, max_short_price+self.gridHeight*upRate)
self.writeCtaLog(u'需要重建的网格数量:{0},起点:{1}'.format(lots, upline))
if lots > 0:
# 做空,开仓价为上阻力线+网格高度*i,平仓价为开仓价-止盈高度,开仓数量为缺省
for i in range(0, lots, 1):
open_price = int((upline + gridSteps[i+remainLots] * upRate) / self.minDiff) * self.minDiff
close_price = int((open_price - self.gridWin * upRate) / self.minDiff) * self.minDiff
grid = CtaGrid(direction=DIRECTION_SHORT,
openprice=open_price,
closeprice=close_price,
volume=self.volume*self.getVolumeRate(remainLots + i))
grid.reuse = reuse
self.upGrids.append(grid)
self.writeCtaLog(u'重新拉上网格:[{0}~{1}]'.format(upline, upline + gridSteps[-1] * upRate))
def recount_avg_open_price(self):
"""计算网格的平均开仓价"""
up_open_list = [x for x in self.upGrids if x.openStatus]
self.max_up_open_price = 0 - sys.maxsize
self.avg_up_open_price = 0 - sys.maxsize
self.min_dn_open_price = sys.maxsize
self.avg_dn_open_price = sys.maxsize
total_price = EMPTY_FLOAT
total_volume = EMPTY_INT
for x in up_open_list:
self.max_up_open_price = max(self.max_up_open_price, x.openPrice)
total_price += x.openPrice*x.volume
total_volume += x.volume
if total_volume > 0:
self.avg_up_open_price = total_price/total_volume
total_price = EMPTY_FLOAT
total_volume = EMPTY_INT
dn_open_list = [x for x in self.dnGrids if x.openStatus]
for x in dn_open_list:
self.min_dn_open_price = min(self.min_dn_open_price,x.openPrice)
total_price += x.openPrice*x.volume
total_volume += x.volume
if total_volume > 0:
self.avg_dn_open_price = total_price/total_volume
def count_avg_open_price(self, grid_list):
"""计算平均开仓价"""
total_price = EMPTY_FLOAT
total_volume = EMPTY_INT
avg_price = EMPTY_FLOAT
for g in grid_list:
total_price += g.openPrice * g.volume
total_volume += g.volume
if total_volume > EMPTY_INT:
avg_price = total_price / total_volume
return avg_price
def combineOpenedGrids(self,direction,type=EMPTY_STRING):
"""合并已开仓的网格"""
total_open_price = EMPTY_FLOAT
total_close_price = EMPTY_FLOAT
total_volume = EMPTY_INT
saved_grid = None
if direction == DIRECTION_SHORT:
opened_short_grids = self.getGrids(direction=direction, opened=True, ordered=False, type | |
<reponame>par2/lamana<gh_stars>1-10
#------------------------------------------------------------------------------
'''Confirm output of general models.'''
import logging
import itertools as it
import matplotlib as mpl
mpl.use('Agg') # required to prevent DISPLAY error; must be before pyplot (REF 050)
import matplotlib.pyplot as plt
import nose.tools as nt
import lamana as la
from lamana.utils import tools as ut
from lamana.utils import plottools as upt
from lamana.models import Wilson_LT as wlt
from lamana.lt_exceptions import PlottingError
# Setup -----------------------------------------------------------------------
dft = wlt.Defaults()
# TESTS -----------------------------------------------------------------------
# TODO: _cycle_depth
def test_cycler_depth1():
'''Check cycler repeats for a given depth.
Notes
-----
Testing an infinite generator is not straight-forward. It must be consumed.
We will use itertools.islice() to consume up to an arbitrary index, e.g. 10.
'''
# Depth: 1 2 3 4 5 6
iter_ = ['A', 'B', 'C', 'D', 'E', 'F']
cycler1 = la.output_._cycle_depth(iter_, depth=None)
cycler2 = la.output_._cycle_depth(iter_, depth=2)
cycler3 = la.output_._cycle_depth(iter_, depth=3)
# Consume the infinite generator with islice.
actual1 = list(it.islice(cycler1, 10))
actual2 = list(it.islice(cycler2, 10))
actual3 = list(it.islice(cycler3, 10))
expected1 = ['A', 'B', 'C', 'D', 'E', 'F', 'A', 'B', 'C', 'D']
expected2 = ['A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B']
expected3 = ['A', 'B', 'C', 'A', 'B', 'C', 'A', 'B', 'C', 'A']
nt.assert_almost_equals(actual1, expected1)
nt.assert_almost_equals(actual2, expected2)
nt.assert_almost_equals(actual3, expected3)
# Single Plots ----------------------------------------------------------------
# TODO: need to add kw in distribplot to turn off plot window; shut down plt.show()
@nt.raises(PlottingError)
def test_distribplot_unnormalized_error1():
'''Check raises PlottingError if geometry > 1 for unnormalized plot.'''
case = ut.laminator(['400-200-800', '400-400-400'])[0]
plot = la.output_._distribplot(case.LMs, normalized=False)
plt.close()
# TODO: release after moking mock model with no stress columns; needed for LM
#@nt.raises(InputError)
#def test_distribplot_input_error1():
# '''Check raises InputError if x override does not have 'stress' in the name.'''
# x_col = 'sterss'
# case = ut.laminator(['400-200-800', '400-400-400'])[0]
# plot = la.output_._distribplot(case.LMs, x=x_col)
#
# plt.close()
def test_distribplot_input_error2():
'''Check still looks for stress column, even if bad x column name given.'''
x_col = 'bad_column_name'
case = ut.laminator(['400-200-800'])[0]
plot = la.output_._distribplot(case.LMs, x=x_col)
nt.assert_is_instance(plot, mpl.axes.Axes)
plt.close()
def test_distribplot_instance1():
'''Check distribplot returns an axes.'''
case = ut.laminator(['400-200-800'])[0]
plot = la.output_._distribplot(case.LMs, normalized=True, extrema=True)
nt.assert_is_instance(plot, mpl.axes.Axes)
plt.close()
def test_distribplot_annotate1():
'''Check iif text exists on the plot when annotate=True.'''
case = ut.laminator(['400-200-800'])[0]
plot = la.output_._distribplot(case.LMs, annotate=True)
actual = upt.has_annotations(plot.texts)
nt.assert_true(actual)
plt.close()
def test_distribplot_annotate2():
'''Check ift text exists; return False when annotate=False'''
case = ut.laminator(['400-200-800'])[0]
plot = la.output_._distribplot(case.LMs, annotate=False)
actual = upt.has_annotations(plot.texts)
nt.assert_false(actual)
plt.close()
# TODO: Consider sublclassing from PlotTestCase to close plots
class TestDistribplotDimensions():
'''Check plots dimensions of rectangle patches correctly.
`_distribplot` is the source of distribution plots; backbone to `distributions`.
Notes
-----
- Required creating new axes to prevent plot clobbering
- Some methods use class level plots; some use internal plots.
Internals that make axes need closing.
'''
# Set up cases
case1 = ut.laminator(['400-200-800'])[0]
case2 = ut.laminator(dft.geos_standard)
case3 = ut.laminator(dft.geo_inputs['7-ply'])
# Sample plots
plot1 = la.output_._distribplot(case1.LMs, normalized=True, extrema=True)
fig2, ax2 = plt.subplots() # make new, separate axes; prevent inifinite loop of plt.gca()
plot2 = la.output_._distribplot(case1.LMs, normalized=False, extrema=True, ax=ax2)
# TODO: randomize cases for this test
def test_distribplot_patches_count1(self):
'''Check number of rectangle patches equals number of plies.'''
case = self.case1
LM = case.LMs[0]
npatches = len(self.plot1.artists)
nplies = LM.nplies
actual = npatches
expected = nplies
nt.assert_equal(actual, expected)
def test_distribplot_patches_normalized_dimensions1(self):
'''Check position and dimensions of normalized patches, statically.'''
# Static implies the plot is supplied with fixed, pre-determined values
for i, rec in enumerate(self.plot1.artists):
y_i = float(i + 1) # fixed y positions
h_i = 1.0 # fixed heights
# Rectangle Dimensions
x = rec.get_x()
y = rec.get_y()
width = rec.get_width()
height = rec.get_height()
logging.debug('x: {}, y: {}, w: {}, h: {}'.format(x, y, width, height))
# Rectangle Attributes
#fcolor = rec.get_facecolor()
# For normalized plots, we expect fixed x positions, widths and heights
# Only y positions change
actual = (x, y, width, height)
expected = (-0.378730662983, y_i, 0.757461325966, h_i)
logging.debug('y_i: {}, h_i: {}'.format(y_i, h_i))
nt.assert_almost_equal(actual[0], expected[0])
nt.assert_almost_equal(actual[1], expected[1])
nt.assert_almost_equal(actual[2], expected[2])
nt.assert_almost_equal(actual[3], expected[3])
# TODO: add randomized cases
# NOTE: can accept random cases of equivalent plies, e.g.
# case = ut.laminator(dft.geo_inputs['5-ply'])
# case = ut.laminator(dft.geo_inputs['4-ply'])
# case = ut.laminator(dft.geo_inputs['7-ply'])
def test_distribplot_patches_normalized_dimensions2(self):
'''Check position and dimensions of normalized patches, dynamically.
Notes
-----
Iterating cases, artists and LaminateModels. The LMs must have the a same nplies.
Extrema forced True.
'''
for case_ in self.case3.values():
fig, ax = plt.subplots()
plot = la.output_._distribplot(case_.LMs, normalized=True, extrema=True)
# Calculations on all LMs in a case
x_max = max(LM.max_stress.max() for LM in case_.LMs)
x_min = min(LM.max_stress.min() for LM in case_.LMs)
w_i = abs(x_max - x_min)
# Dynamic implies the dimensional values are unknown and thus not fixed
# Zip the artist and LaminateModel lists together, then iterate both
# to match the artist with
for i, rec in enumerate(plot.artists):
# Rectangle Dimensions
x = rec.get_x()
y = rec.get_y()
width = rec.get_width()
height = rec.get_height()
logging.debug('x: {}, y: {}, w: {}, h: {}'.format(x, y, width, height))
# Rectangle Attributes
#fcolor = rec.get_facecolor()
y_i = float(i + 1)
h_i = 1.0 # heights equal for normalized; actually k
logging.debug('x_i: {}, y_i: {}, w_i: {}, h_i: {}'.format(x_min, y_i, w_i, h_i))
# x postions and widths are fixed; y positions and height change.
actual = (x, y, width, height)
expected = (x_min, y_i, w_i, h_i)
nt.assert_almost_equal(actual[0], expected[0])
nt.assert_almost_equal(actual[1], expected[1])
nt.assert_almost_equal(actual[2], expected[2])
nt.assert_almost_equal(actual[3], expected[3])
plt.close()
def test_distribplot_patches_unnormalized_dimensions1(self):
'''Check position and dimensions of unnormalized patches, statically.'''
# Static implies the plot is supplied with fixed, pre-determined values
#case = ut.laminator(['400-200-800'])[0]
#plot = la.output_._distribplot(LM, normalized=False)
ys = {
0: 0.0,
1: 0.0004,
2: 0.0006,
3: 0.0014,
4: 0.0016,
}
hs = {
0: 0.0004,
1: 0.0002,
2: 0.0008,
3: 0.0002,
4: 0.0004,
}
for i, rec in enumerate(self.plot2.artists):
# Rectangle Dimensions
x = rec.get_x()
y = rec.get_y()
width = rec.get_width()
height = rec.get_height()
logging.debug('x: {}, y: {}, w: {}, h: {}'.format(x, y, width, height))
# Rectangle Attributes
#fcolor = rec.get_facecolor()
# x postions and widths are fixed; y positions and height change.
actual = (x, y, width, height)
expected = (-0.378730662983, ys[i], 0.757461325966, hs[i])
nt.assert_almost_equal(actual[0], expected[0])
nt.assert_almost_equal(actual[1], expected[1])
nt.assert_almost_equal(actual[2], expected[2])
nt.assert_almost_equal(actual[3], expected[3])
def test_distribplot_patches_unnormalized_dimensions2(self):
'''Check position and dimensions of unnormalized patches, dynamically.
Notes
-----
Iterating cases, artists and LaminateModels. Can only handle single geometries.
Extrema are forced True.
'''
for case_ in self.case2.values():
fig, ax = plt.subplots()
plot = la.output_._distribplot(
case_.LMs, normalized=False, extrema=True, ax=ax
)
# Calculations on all LMs in a case
x_max = max(LM.max_stress.max() for LM in case_.LMs)
x_min = min(LM.max_stress.min() for LM in case_.LMs)
w_i = abs(x_max - x_min)
# Dynamic implies the dimensional values are unknown and thus not fixed
# Zip the artist and LaminateModel lists together, then iterate both
# to match the artist with
for i, rec in enumerate(plot.artists):
# Rectangle Dimensions
x = rec.get_x()
y = rec.get_y()
width = rec.get_width()
height = rec.get_height()
logging.debug('x: {}, y: {}, w: {}, h: {}'.format(x, y, width, height))
# Rectangle Attributes
#fcolor = rec.get_facecolor()
# Extract from DataFrames (assume normalized have equal patch dimensions)
df = case_.LMs[0].LMFrame
y_i = df[df['label'] == 'interface']['d(m)'].reset_index(drop=True)[i]
##h_i = case_.snapshots[0]['t(um)'][i]/1e6
h_i = case_.LMs[0].stack_order[i + 1][1] / 1e6 # access stack layer thickness
logging.debug('x_i: {}, y_i: {}, w_i: {}, h_i: {}'.format(x_min, y_i, w_i, h_i))
# x postions and widths are fixed; y positions and height change.
actual = (x, y, width, height)
expected = (x_min, y_i, w_i, h_i)
nt.assert_almost_equal(actual[0], expected[0])
nt.assert_almost_equal(actual[1], expected[1])
nt.assert_almost_equal(actual[2], expected[2])
nt.assert_almost_equal(actual[3], expected[3])
plt.close()
# TODO: case randomizer here
# TODO: since similar methods, consider abstracting plot_df_data_extractor into plottools
class TestDistribplotLines():
'''Check accuracy of plot lines.'''
# Various cases
#case1 = ut.laminator(['400-200-800',], ps=[3,])
#cases2 = ut.laminator(['400-200-800',], ps=[3, 3])
#cases2 = ut.laminator(['400-200-800',], ps=[4, 4])
cases2 = ut.laminator(['400-200-800'], ps=[3, 4])
cases3 = ut.laminator(['400-200-800', '400-400-400', '100-100-100'], ps=[3, 4])
def test_distribplot_lines_count1(self):
'''Check number of lines equals number of case size (# geo_strings).'''
cases = self.cases3
for case_ in cases.values():
fig, ax = plt.subplots()
plot = la.output_._distribplot(case_.LMs, ax=ax)
nlines = len(plot.lines)
ngeo_strings = len(case_.LMs)
ncases = case_.size
actual1 = nlines
expected1 = ngeo_strings
expected2 = ncases
nt.assert_equal(actual1, expected1)
nt.assert_equal(actual1, expected2)
plt.close()
def test_distribplot_lines_normalized_data1(self):
'''Check plot data agrees with LaminateModel data; normalized=True.
Compares lists of zipped (x,y) datapoints.
Notes
-----
- Supports normalize triggering; normalized=True|False
- Supports non-fixed | |
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.preprocess_instance()
"""
self.dataset.preprocess_instance()
def set_current_phase(self, current_phase):
"""
Set current phase in train. It is useful for untest.
current_phase : 1 for join, 0 for update.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.set_current_phase(1)
"""
self.dataset.set_current_phase(current_phase)
def postprocess_instance(self):
"""
Divide pv instance and convey it to input_channel.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.preprocess_instance()
exe.train_from_dataset(dataset)
dataset.postprocess_instance()
"""
self.dataset.postprocess_instance()
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset._set_fleet_send_batch_size"
)
def set_fleet_send_batch_size(self, fleet_send_batch_size=1024):
"""
Set fleet send batch size, default is 1024
Args:
fleet_send_batch_size(int): fleet send batch size
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
dataset.set_fleet_send_batch_size(800)
"""
self.fleet_send_batch_size = fleet_send_batch_size
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset._set_fleet_send_sleep_seconds"
)
def set_fleet_send_sleep_seconds(self, fleet_send_sleep_seconds=0):
"""
Set fleet send sleep time, default is 0
Args:
fleet_send_sleep_seconds(int): fleet send sleep time
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
dataset.set_fleet_send_sleep_seconds(2)
"""
self.fleet_send_sleep_seconds = fleet_send_sleep_seconds
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset._set_merge_by_lineid")
def set_merge_by_lineid(self, merge_size=2):
"""
Set merge by line id, instances of same line id will be merged after
shuffle, you should parse line id in data generator.
Args:
merge_size(int): ins size to merge. default is 2.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
dataset.set_merge_by_lineid()
"""
self.dataset.set_merge_by_lineid(merge_size)
self.merge_by_lineid = True
self.parse_ins_id = True
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset._set_generate_unique_feasigns"
)
def set_generate_unique_feasigns(self, generate_uni_feasigns, shard_num):
self.dataset.set_generate_unique_feasigns(generate_uni_feasigns)
self.gen_uni_feasigns = generate_uni_feasigns
self.local_shard_num = shard_num
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset._generate_local_tables_unlock"
)
def generate_local_tables_unlock(self, table_id, fea_dim, read_thread_num,
consume_thread_num, shard_num):
self.dataset.generate_local_tables_unlock(
table_id, fea_dim, read_thread_num, consume_thread_num, shard_num)
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset.load_into_memory")
def load_into_memory(self):
"""
Load data into memory
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
"""
self._prepare_to_run()
self.dataset.load_into_memory()
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset.preload_into_memory")
def preload_into_memory(self, thread_num=None):
"""
Load data into memory in async mode
Args:
thread_num(int): preload thread num
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.preload_into_memory()
dataset.wait_preload_done()
"""
self._prepare_to_run()
if thread_num is None:
thread_num = self.thread_num
self.dataset.set_preload_thread_num(thread_num)
self.dataset.create_preload_readers()
self.dataset.preload_into_memory()
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset.wait_preload_done")
def wait_preload_done(self):
"""
Wait preload_into_memory done
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.preload_into_memory()
dataset.wait_preload_done()
"""
self.dataset.wait_preload_done()
self.dataset.destroy_preload_readers()
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset.local_shuffle")
def local_shuffle(self):
"""
Local shuffle
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.local_shuffle()
"""
self.dataset.local_shuffle()
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset.global_shuffle")
def global_shuffle(self, fleet=None, thread_num=12):
"""
Global shuffle.
Global shuffle can be used only in distributed mode. i.e. multiple
processes on single machine or multiple machines training together.
If you run in distributed mode, you should pass fleet instead of None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.global_shuffle(fleet)
Args:
fleet(Fleet): fleet singleton. Default None.
thread_num(int): shuffle thread num. Default is 12.
"""
if fleet is not None:
fleet._role_maker.barrier_worker()
if self.trainer_num == -1:
self.trainer_num = fleet.worker_num()
if self.fleet_send_batch_size is None:
self.fleet_send_batch_size = 1024
if self.fleet_send_sleep_seconds is None:
self.fleet_send_sleep_seconds = 0
self.dataset.register_client2client_msg_handler()
self.dataset.set_trainer_num(self.trainer_num)
self.dataset.set_fleet_send_batch_size(self.fleet_send_batch_size)
self.dataset.set_fleet_send_sleep_seconds(self.fleet_send_sleep_seconds)
if fleet is not None:
fleet._role_maker.barrier_worker()
self.dataset.global_shuffle(thread_num)
if fleet is not None:
fleet._role_maker.barrier_worker()
if self.merge_by_lineid:
self.dataset.merge_by_lineid()
if fleet is not None:
fleet._role_maker.barrier_worker()
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset.release_memory")
def release_memory(self):
"""
:api_attr: Static Graph
Release InMemoryDataset memory data, when data will not be used again.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.global_shuffle(fleet)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
exe.train_from_dataset(fluid.default_main_program(), dataset)
dataset.release_memory()
"""
self.dataset.release_memory()
def get_pv_data_size(self):
"""
Get memory data size of Pv, user can call this function to know the pv num
of ins in all workers after load into memory.
Note:
This function may cause bad performance, because it has barrier
Returns:
The size of memory pv data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
print dataset.get_pv_data_size()
"""
return self.dataset.get_pv_data_size()
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset.get_memory_data_size")
def get_memory_data_size(self, fleet=None):
"""
Get memory data size, user can call this function to know the num
of ins in all workers after load into memory.
Note:
This function may cause bad performance, because it has barrier
Args:
fleet(Fleet): Fleet Object.
Returns:
The size of memory data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
print dataset.get_memory_data_size(fleet)
"""
import numpy as np
local_data_size = self.dataset.get_memory_data_size()
local_data_size = np.array([local_data_size])
if fleet is not None:
global_data_size = local_data_size * 0
fleet._role_maker.all_reduce_worker(local_data_size,
global_data_size)
return global_data_size[0]
return local_data_size[0]
@deprecated(
since="2.0.0",
update_to="paddle.distributed.InMemoryDataset.get_shuffle_data_size")
def get_shuffle_data_size(self, fleet=None):
"""
Get shuffle data size, user can call this function to know the num
of ins in all workers after local/global shuffle.
Note:
This function may cause bad performance to local shuffle,
because it has barrier. It does not affect global shuffle.
Args:
fleet(Fleet): Fleet Object.
Returns:
The size of shuffle data.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.load_into_memory()
dataset.global_shuffle(fleet)
print dataset.get_shuffle_data_size(fleet)
"""
import numpy as np
local_data_size = self.dataset.get_shuffle_data_size()
local_data_size = np.array([local_data_size])
if fleet is not None:
global_data_size = local_data_size * 0
fleet._role_maker.all_reduce_worker(local_data_size,
global_data_size)
return global_data_size[0]
return local_data_size[0]
class QueueDataset(DatasetBase):
"""
QueueDataset, it will process data streamly.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
"""
def __init__(self):
"""
Initialize QueueDataset
This class should be created by DatasetFactory
"""
super(QueueDataset, self).__init__()
self.proto_desc.name = "MultiSlotDataFeed"
@deprecated(
since="2.0.0",
update_to="paddle.distributed.QueueDataset._prepare_to_run")
def _prepare_to_run(self):
"""
Set data_feed_desc/thread num/filelist before run,
user no need to call this function.
"""
if self.thread_num > len(self.filelist):
self.thread_num = len(self.filelist)
if self.thread_num == 0:
self.thread_num = 1
self.dataset.set_thread_num(self.thread_num)
self.dataset.set_filelist(self.filelist)
self.dataset.set_data_feed_desc(self.desc())
self.dataset.create_readers()
def local_shuffle(self):
"""
Local shuffle data.
Local shuffle is not supported in QueueDataset
NotImplementedError will be raised
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
dataset.local_shuffle()
Raises:
NotImplementedError: QueueDataset does not support local shuffle
"""
raise NotImplementedError(
"QueueDataset does not support local shuffle, "
"please use InMemoryDataset for local_shuffle")
def global_shuffle(self, fleet=None):
"""
Global shuffle data.
Global shuffle is not supported in QueueDataset
NotImplementedError will be raised
Args:
fleet(Fleet): fleet singleton. Default None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
from paddle.fluid.incubate.fleet.parameter_server.pslib import fleet
dataset = fluid.DatasetFactory().create_dataset("QueueDataset")
dataset.global_shuffle(fleet)
Raises:
NotImplementedError: QueueDataset does not support global shuffle
"""
raise NotImplementedError(
"QueueDataset does not support global shuffle, "
"please use InMemoryDataset for global_shuffle")
class FileInstantDataset(DatasetBase):
"""
FileInstantDataset, it will process data streamly.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory.create_dataset("FileInstantDataset")
"""
def __init__(self):
"""
Initialize FileInstantDataset
This class should be created by DatasetFactory
"""
super(FileInstantDataset, self).__init__()
self.proto_desc.name = "MultiSlotFileInstantDataFeed"
def local_shuffle(self):
"""
Local shuffle
FileInstantDataset does not support local shuffle
"""
raise NotImplementedError(
"FileInstantDataset does not support local shuffle, "
"please use InMemoryDataset for local_shuffle")
def global_shuffle(self, fleet=None):
"""
Global shuffle
FileInstantDataset does not support global shuffle
"""
raise NotImplementedError(
"FileInstantDataset does not support global shuffle, "
"please use InMemoryDataset for global_shuffle")
class BoxPSDataset(InMemoryDataset):
"""
BoxPSDataset: derived from InMemoryDataset.
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset")
"""
def __init__(self):
"""
Initialize BoxPSDataset
This class should be created by DatasetFactory
"""
super(BoxPSDataset, self).__init__()
self.boxps = core.BoxPS(self.dataset)
self.proto_desc.name = "PaddleBoxDataFeed"
def set_date(self, date):
"""
Workaround for date
"""
year = int(date[:4])
month = int(date[4:6])
day = int(date[6:])
self.boxps.set_date(year, month, day)
def begin_pass(self):
"""
Begin Pass
Notify BoxPS to load sparse parameters of next pass to GPU Memory
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset")
dataset.begin_pass()
"""
self.boxps.begin_pass()
def end_pass(self, need_save_delta):
"""
End Pass
Notify BoxPS that current pass ended
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset")
dataset.end_pass(True)
"""
self.boxps.end_pass(need_save_delta)
def wait_preload_done(self):
"""
Wait async preload done
Wait Until Feed Pass Done
Examples:
.. code-block:: python
import paddle.fluid as fluid
dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset")
filelist = ["a.txt", "b.txt"]
dataset.set_filelist(filelist)
dataset.preload_into_memory()
dataset.wait_preload_done()
"""
self.boxps.wait_feed_pass_done()
def load_into_memory(self):
"""
Load | |
4.20927452889608*m.b555 >= -1.90668943590203)
m.c555 = Constraint(expr= - m.x93 + m.x285 - 4.20927452889608*m.b556 >= -1.90668943590203)
m.c556 = Constraint(expr= - m.x94 + m.x286 - 4.20927452889608*m.b557 >= -1.90668943590203)
m.c557 = Constraint(expr= - m.x95 + m.x287 - 4.20927452889608*m.b558 >= -1.90668943590203)
m.c558 = Constraint(expr= - m.x96 + m.x288 - 4.20927452889608*m.b559 >= -1.90668943590203)
m.c559 = Constraint(expr= - m.x98 + m.x278 - 3.92641174288025*m.b549 >= -1.6238266498862)
m.c560 = Constraint(expr= - m.x99 + m.x279 - 3.92641174288025*m.b550 >= -1.6238266498862)
m.c561 = Constraint(expr= - m.x100 + m.x280 - 3.92641174288025*m.b551 >= -1.6238266498862)
m.c562 = Constraint(expr= - m.x101 + m.x281 - 3.92641174288025*m.b552 >= -1.6238266498862)
m.c563 = Constraint(expr= - m.x102 + m.x282 - 3.92641174288025*m.b553 >= -1.6238266498862)
m.c564 = Constraint(expr= - m.x103 + m.x283 - 3.92641174288025*m.b554 >= -1.6238266498862)
m.c565 = Constraint(expr= - m.x104 + m.x284 - 3.92641174288025*m.b555 >= -1.6238266498862)
m.c566 = Constraint(expr= - m.x105 + m.x285 - 3.92641174288025*m.b556 >= -1.6238266498862)
m.c567 = Constraint(expr= - m.x106 + m.x286 - 3.92641174288025*m.b557 >= -1.6238266498862)
m.c568 = Constraint(expr= - m.x107 + m.x287 - 3.92641174288025*m.b558 >= -1.6238266498862)
m.c569 = Constraint(expr= - m.x108 + m.x288 - 3.92641174288025*m.b559 >= -1.6238266498862)
m.c570 = Constraint(expr= - m.x110 + m.x278 - 4.20927452889608*m.b549 >= -1.90668943590203)
m.c571 = Constraint(expr= - m.x111 + m.x279 - 4.20927452889608*m.b550 >= -1.90668943590203)
m.c572 = Constraint(expr= - m.x112 + m.x280 - 4.20927452889608*m.b551 >= -1.90668943590203)
m.c573 = Constraint(expr= - m.x113 + m.x281 - 4.20927452889608*m.b552 >= -1.90668943590203)
m.c574 = Constraint(expr= - m.x114 + m.x282 - 4.20927452889608*m.b553 >= -1.90668943590203)
m.c575 = Constraint(expr= - m.x115 + m.x283 - 4.20927452889608*m.b554 >= -1.90668943590203)
m.c576 = Constraint(expr= - m.x116 + m.x284 - 4.20927452889608*m.b555 >= -1.90668943590203)
m.c577 = Constraint(expr= - m.x117 + m.x285 - 4.20927452889608*m.b556 >= -1.90668943590203)
m.c578 = Constraint(expr= - m.x118 + m.x286 - 4.20927452889608*m.b557 >= -1.90668943590203)
m.c579 = Constraint(expr= - m.x119 + m.x287 - 4.20927452889608*m.b558 >= -1.90668943590203)
m.c580 = Constraint(expr= - m.x120 + m.x288 - 4.20927452889608*m.b559 >= -1.90668943590203)
m.c581 = Constraint(expr= - m.x122 + m.x278 - 3.98613097758187*m.b549 >= -1.68354588458782)
m.c582 = Constraint(expr= - m.x123 + m.x279 - 3.98613097758187*m.b550 >= -1.68354588458782)
m.c583 = Constraint(expr= - m.x124 + m.x280 - 3.98613097758187*m.b551 >= -1.68354588458782)
m.c584 = Constraint(expr= - m.x125 + m.x281 - 3.98613097758187*m.b552 >= -1.68354588458782)
m.c585 = Constraint(expr= - m.x126 + m.x282 - 3.98613097758187*m.b553 >= -1.68354588458782)
m.c586 = Constraint(expr= - m.x127 + m.x283 - 3.98613097758187*m.b554 >= -1.68354588458782)
m.c587 = Constraint(expr= - m.x128 + m.x284 - 3.98613097758187*m.b555 >= -1.68354588458782)
m.c588 = Constraint(expr= - m.x129 + m.x285 - 3.98613097758187*m.b556 >= -1.68354588458782)
m.c589 = Constraint(expr= - m.x130 + m.x286 - 3.98613097758187*m.b557 >= -1.68354588458782)
m.c590 = Constraint(expr= - m.x131 + m.x287 - 3.98613097758187*m.b558 >= -1.68354588458782)
m.c591 = Constraint(expr= - m.x132 + m.x288 - 3.98613097758187*m.b559 >= -1.68354588458782)
m.c592 = Constraint(expr= - m.x134 + m.x278 - 4.04964438330419*m.b549 >= -1.74705929031015)
m.c593 = Constraint(expr= - m.x135 + m.x279 - 4.04964438330419*m.b550 >= -1.74705929031015)
m.c594 = Constraint(expr= - m.x136 + m.x280 - 4.04964438330419*m.b551 >= -1.74705929031015)
m.c595 = Constraint(expr= - m.x137 + m.x281 - 4.04964438330419*m.b552 >= -1.74705929031015)
m.c596 = Constraint(expr= - m.x138 + m.x282 - 4.04964438330419*m.b553 >= -1.74705929031015)
m.c597 = Constraint(expr= - m.x139 + m.x283 - 4.04964438330419*m.b554 >= -1.74705929031015)
m.c598 = Constraint(expr= - m.x140 + m.x284 - 4.04964438330419*m.b555 >= -1.74705929031015)
m.c599 = Constraint(expr= - m.x141 + m.x285 - 4.04964438330419*m.b556 >= -1.74705929031015)
m.c600 = Constraint(expr= - m.x142 + m.x286 - 4.04964438330419*m.b557 >= -1.74705929031015)
m.c601 = Constraint(expr= - m.x143 + m.x287 - 4.04964438330419*m.b558 >= -1.74705929031015)
m.c602 = Constraint(expr= - m.x144 + m.x288 - 4.04964438330419*m.b559 >= -1.74705929031015)
m.c603 = Constraint(expr= - m.x146 + m.x278 - 4.04964438330419*m.b549 >= -1.74705929031015)
m.c604 = Constraint(expr= - m.x147 + m.x279 - 4.04964438330419*m.b550 >= -1.74705929031015)
m.c605 = Constraint(expr= - m.x148 + m.x280 - 4.04964438330419*m.b551 >= -1.74705929031015)
m.c606 = Constraint(expr= - m.x149 + m.x281 - 4.04964438330419*m.b552 >= -1.74705929031015)
m.c607 = Constraint(expr= - m.x150 + m.x282 - 4.04964438330419*m.b553 >= -1.74705929031015)
m.c608 = Constraint(expr= - m.x151 + m.x283 - 4.04964438330419*m.b554 >= -1.74705929031015)
m.c609 = Constraint(expr= - m.x152 + m.x284 - 4.04964438330419*m.b555 >= -1.74705929031015)
m.c610 = Constraint(expr= - m.x153 + m.x285 - 4.04964438330419*m.b556 >= -1.74705929031015)
m.c611 = Constraint(expr= - m.x154 + m.x286 - 4.04964438330419*m.b557 >= -1.74705929031015)
m.c612 = Constraint(expr= - m.x155 + m.x287 - 4.04964438330419*m.b558 >= -1.74705929031015)
m.c613 = Constraint(expr= - m.x156 + m.x288 - 4.04964438330419*m.b559 >= -1.74705929031015)
m.c614 = Constraint(expr= - m.x158 + m.x278 - 3.81671282562382*m.b549 >= -1.51412773262977)
m.c615 = Constraint(expr= - m.x159 + m.x279 - 3.81671282562382*m.b550 >= -1.51412773262977)
m.c616 = Constraint(expr= - m.x160 + m.x280 - 3.81671282562382*m.b551 >= -1.51412773262977)
m.c617 = Constraint(expr= - m.x161 + m.x281 - 3.81671282562382*m.b552 >= -1.51412773262977)
m.c618 = Constraint(expr= - m.x162 + m.x282 - 3.81671282562382*m.b553 >= -1.51412773262977)
m.c619 = Constraint(expr= - m.x163 + m.x283 - 3.81671282562382*m.b554 >= -1.51412773262977)
m.c620 = Constraint(expr= - m.x164 + m.x284 - 3.81671282562382*m.b555 >= -1.51412773262977)
m.c621 = Constraint(expr= - m.x165 + m.x285 - 3.81671282562382*m.b556 >= -1.51412773262977)
m.c622 = Constraint(expr= - m.x166 + m.x286 - 3.81671282562382*m.b557 >= -1.51412773262977)
m.c623 = Constraint(expr= - m.x167 + m.x287 - 3.81671282562382*m.b558 >= -1.51412773262977)
m.c624 = Constraint(expr= - m.x168 + m.x288 - 3.81671282562382*m.b559 >= -1.51412773262977)
m.c625 = Constraint(expr= - m.x170 + m.x278 - 4.35385575770719*m.b549 >= -2.05127066471314)
m.c626 = Constraint(expr= - m.x171 + m.x279 - 4.35385575770719*m.b550 >= -2.05127066471314)
m.c627 = Constraint(expr= - m.x172 + m.x280 - 4.35385575770719*m.b551 >= -2.05127066471314)
m.c628 = Constraint(expr= - m.x173 + m.x281 - 4.35385575770719*m.b552 >= -2.05127066471314)
m.c629 = Constraint(expr= - m.x174 + m.x282 - 4.35385575770719*m.b553 >= -2.05127066471314)
m.c630 = Constraint(expr= - m.x175 + m.x283 - 4.35385575770719*m.b554 >= -2.05127066471314)
m.c631 = Constraint(expr= - m.x176 + m.x284 - 4.35385575770719*m.b555 >= -2.05127066471314)
m.c632 = Constraint(expr= - m.x177 + m.x285 - 4.35385575770719*m.b556 >= -2.05127066471314)
m.c633 = Constraint(expr= - m.x178 + m.x286 - 4.35385575770719*m.b557 >= -2.05127066471314)
m.c634 = Constraint(expr= - m.x179 + m.x287 - 4.35385575770719*m.b558 >= -2.05127066471314)
m.c635 = Constraint(expr= - m.x180 + m.x288 - 4.35385575770719*m.b559 >= -2.05127066471314)
m.c636 = Constraint(expr= - m.x182 + m.x278 - 4.20927452889608*m.b549 >= -1.90668943590203)
m.c637 = Constraint(expr= - m.x183 + m.x279 - 4.20927452889608*m.b550 >= -1.90668943590203)
m.c638 = Constraint(expr= - m.x184 + m.x280 - 4.20927452889608*m.b551 >= -1.90668943590203)
m.c639 = Constraint(expr= - m.x185 + m.x281 - 4.20927452889608*m.b552 >= -1.90668943590203)
m.c640 = Constraint(expr= - m.x186 + m.x282 - 4.20927452889608*m.b553 >= -1.90668943590203)
m.c641 = Constraint(expr= - m.x187 + m.x283 - 4.20927452889608*m.b554 >= -1.90668943590203)
m.c642 = Constraint(expr= - m.x188 + m.x284 - 4.20927452889608*m.b555 >= -1.90668943590203)
m.c643 = Constraint(expr= - m.x189 + m.x285 - 4.20927452889608*m.b556 >= -1.90668943590203)
m.c644 = Constraint(expr= - m.x190 + m.x286 - 4.20927452889608*m.b557 >= -1.90668943590203)
m.c645 = Constraint(expr= - m.x191 + m.x287 - 4.20927452889608*m.b558 >= -1.90668943590203)
m.c646 = Constraint(expr= - m.x192 + m.x288 - 4.20927452889608*m.b559 >= -1.90668943590203)
m.c647 = Constraint(expr= - m.x194 + m.x278 - 4.20927452889608*m.b549 >= -1.90668943590203)
m.c648 = Constraint(expr= - m.x195 + m.x279 - 4.20927452889608*m.b550 >= -1.90668943590203)
m.c649 = Constraint(expr= - m.x196 + m.x280 - 4.20927452889608*m.b551 >= -1.90668943590203)
m.c650 = Constraint(expr= - m.x197 + m.x281 - 4.20927452889608*m.b552 >= -1.90668943590203)
m.c651 = Constraint(expr= - m.x198 + m.x282 - 4.20927452889608*m.b553 >= -1.90668943590203)
m.c652 = Constraint(expr= - m.x199 + m.x283 - 4.20927452889608*m.b554 >= -1.90668943590203)
m.c653 = Constraint(expr= - m.x200 + m.x284 - 4.20927452889608*m.b555 >= -1.90668943590203)
m.c654 = Constraint(expr= - m.x201 + m.x285 - 4.20927452889608*m.b556 >= -1.90668943590203)
m.c655 = Constraint(expr= - m.x202 + m.x286 - 4.20927452889608*m.b557 >= -1.90668943590203)
m.c656 = Constraint(expr= - m.x203 + m.x287 - 4.20927452889608*m.b558 >= -1.90668943590203)
m.c657 = Constraint(expr= - m.x204 + m.x288 - 4.20927452889608*m.b559 >= -1.90668943590203)
m.c658 = Constraint(expr= - m.x206 + m.x278 - 3.92641174288025*m.b549 >= -1.6238266498862)
m.c659 = Constraint(expr= - m.x207 + m.x279 - 3.92641174288025*m.b550 >= -1.6238266498862)
m.c660 = Constraint(expr= - m.x208 + m.x280 - 3.92641174288025*m.b551 >= -1.6238266498862)
m.c661 = Constraint(expr= - m.x209 + m.x281 - 3.92641174288025*m.b552 >= -1.6238266498862)
m.c662 = Constraint(expr= - m.x210 + m.x282 - 3.92641174288025*m.b553 >= -1.6238266498862)
m.c663 = Constraint(expr= - m.x211 + m.x283 - 3.92641174288025*m.b554 >= -1.6238266498862)
m.c664 = Constraint(expr= - m.x212 + m.x284 - 3.92641174288025*m.b555 >= -1.6238266498862)
m.c665 = Constraint(expr= - m.x213 + m.x285 - 3.92641174288025*m.b556 >= -1.6238266498862)
m.c666 = Constraint(expr= - m.x214 + m.x286 - 3.92641174288025*m.b557 >= -1.6238266498862)
m.c667 = Constraint(expr= - m.x215 + m.x287 - 3.92641174288025*m.b558 >= -1.6238266498862)
m.c668 = Constraint(expr= - m.x216 + m.x288 - 3.92641174288025*m.b559 >= -1.6238266498862)
m.c669 = Constraint(expr= - m.x218 + m.x278 - 4.20927452889608*m.b549 >= -1.90668943590203)
m.c670 = Constraint(expr= - m.x219 + m.x279 - 4.20927452889608*m.b550 >= -1.90668943590203)
m.c671 = Constraint(expr= - m.x220 + m.x280 - 4.20927452889608*m.b551 >= -1.90668943590203)
m.c672 = Constraint(expr= - m.x221 + m.x281 - 4.20927452889608*m.b552 >= -1.90668943590203)
m.c673 = Constraint(expr= - m.x222 + m.x282 - 4.20927452889608*m.b553 >= -1.90668943590203)
m.c674 = Constraint(expr= - m.x223 + m.x283 - 4.20927452889608*m.b554 >= -1.90668943590203)
m.c675 = Constraint(expr= - m.x224 + m.x284 - 4.20927452889608*m.b555 >= -1.90668943590203)
m.c676 = Constraint(expr= - m.x225 + m.x285 - 4.20927452889608*m.b556 >= -1.90668943590203)
m.c677 = Constraint(expr= - m.x226 + m.x286 - 4.20927452889608*m.b557 >= -1.90668943590203)
m.c678 = Constraint(expr= - m.x227 + m.x287 - 4.20927452889608*m.b558 >= -1.90668943590203)
m.c679 = Constraint(expr= - m.x228 + m.x288 - 4.20927452889608*m.b559 >= -1.90668943590203)
m.c680 = Constraint(expr= - m.x230 + m.x278 - 3.98613097758187*m.b549 >= -1.68354588458782)
m.c681 = Constraint(expr= - m.x231 + m.x279 - 3.98613097758187*m.b550 >= -1.68354588458782)
m.c682 = Constraint(expr= - m.x232 + m.x280 - | |
old_y = self.program.last_y
if self.state == DRIVER_STATE_PROGRAM:
if cut:
self.program.cut_abs(x, y)
else:
self.program.move_abs(x, y)
else:
# DRIVER_STATE_RASTER
if x == self.current_x and y == self.current_y:
return
if cut:
if x == self.current_x:
self.program.cut_vertical_abs(y=y)
if y == self.current_y:
self.program.cut_horizontal_abs(x=x)
else:
if x == self.current_x:
self.program.move_vertical_abs(y=y)
if y == self.current_y:
self.program.move_horizontal_abs(x=x)
self.current_x = x
self.current_y = y
self.context.signal("driver;position", (old_x, old_y, x, y))
def cut(self, x, y):
"""
Cut to a position x, y. Either absolute or relative depending on the state of
is_relative.
"""
if self.is_relative:
self.cut_relative(x, y)
else:
self.cut_absolute(x, y)
self.ensure_rapid_mode()
self.push_program()
def cut_absolute(self, x, y):
"""
Cut to a position x, y. This is an absolute position.
"""
self.ensure_program_or_raster_mode(x, y)
self.program.cut_abs(x, y)
oldx = self.current_x
oldy = self.current_y
self.current_x = x
self.current_y = y
self.context.signal("driver;position", (oldx, oldy, x, y))
def cut_relative(self, dx, dy):
"""
Cut to a position dx, dy. This is relative to the currently laser position.
"""
x = dx + self.current_x
y = dy + self.current_y
self.cut_absolute(x, y)
def rapid_jog(self, x, y, **kwargs):
"""
Perform a rapid jog. In Moshiboard this is merely a move.
"""
self.ensure_program_or_raster_mode(x, y)
old_x = self.program.last_x
old_y = self.program.last_y
self.program.move_abs(x, y)
self.current_x = x
self.current_y = y
new_x = self.program.last_x
new_y = self.program.last_y
self.context.signal("driver;position", (old_x, old_y, new_x, new_y))
def move(self, x, y):
"""
Move to a position x,y. Either absolute or relative depending on the state of
is_relative.
"""
if self.is_relative:
self.move_relative(x, y)
else:
self.move_absolute(x, y)
self.ensure_rapid_mode()
def move_absolute(self, x, y):
"""
Move to a position x, y. This is an absolute position.
"""
self.ensure_program_or_raster_mode(x, y)
oldx = self.current_x
oldy = self.current_y
self.program.move_abs(x, y)
self.current_x = x
self.current_y = y
x = self.current_x
y = self.current_y
self.context.signal("driver;position", (oldx, oldy, x, y))
def move_relative(self, dx, dy):
"""
Move to a position dx, dy. This is a relative position.
"""
x = dx + self.current_x
y = dy + self.current_y
self.move_absolute(x, y)
def set_speed(self, speed=None):
"""
Set the speed for the driver.
"""
if self.settings.speed != speed:
self.settings.speed = speed
if self.state in (DRIVER_STATE_PROGRAM, DRIVER_STATE_RASTER):
self.state = DRIVER_STATE_MODECHANGE
def set_step(self, step=None):
"""
Set the raster step for the driver.
"""
if self.settings.raster_step != step:
self.settings.raster_step = step
if self.state in (DRIVER_STATE_PROGRAM, DRIVER_STATE_RASTER):
self.state = DRIVER_STATE_MODECHANGE
def calc_home_position(self):
"""
Calculate the home position with the given home adjust and the corner the device is
expected to home to.
"""
x = self.context.home_adjust_x
y = self.context.home_adjust_y
bed_dim = self.context.root
bed_dim.setting(int, "bed_width", 310)
bed_dim.setting(int, "bed_height", 210)
if self.context.home_right:
x += int(bed_dim.bed_width * MILS_IN_MM)
if self.context.home_bottom:
y += int(bed_dim.bed_height * MILS_IN_MM)
return x, y
def home(self, *values):
"""
Send a home command to the device. In the case of Moshiboards this is merely a move to
0,0 in absolute position.
"""
x, y = self.calc_home_position()
try:
x = int(values[0])
except (ValueError, IndexError):
pass
try:
y = int(values[1])
except (ValueError, IndexError):
pass
self.ensure_rapid_mode()
self.settings.speed = 40
self.ensure_program_mode(x, y, x, y)
self.ensure_rapid_mode()
self.current_x = x
self.current_y = y
def lock_rail(self):
pass
def unlock_rail(self, abort=False):
"""
Unlock the Rail or send a "FreeMotor" command.
"""
self.ensure_rapid_mode()
try:
self.output.unlock_rail()
except AttributeError:
pass
def abort(self):
"""
Abort the current work.
"""
self.ensure_rapid_mode()
try:
self.output.estop()
except AttributeError:
pass
@property
def type(self):
return "moshi"
class MoshiController:
"""
The Moshiboard Controller takes data programs built by the MoshiDriver and sends to the Moshiboard
according to established moshi protocols.
The output device is concerned with sending the moshiblobs to the control board and control events and
to the CH341 chip on the Moshiboard. We use the same ch341 driver as the Lhystudios boards. Giving us
access to both libusb drivers and windll drivers.
The protocol for sending rasters is as follows:
Check processing-state of board, seeking 205
Send Preamble.
Check processing-state of board, seeking 205
Send bulk data of moshiblob. No checks between packets.
Send Epilogue.
While Check processing-state is 207:
wait 0.2 seconds
Send Preamble
Send 0,0 offset 0,0 move.
Send Epilogue
Checks done before the Epilogue will have 205 state.
"""
def __init__(self, context, name, channel=None, *args, **kwargs):
context = context.get_context("moshi/output/%s" % name)
self.context = context
self.name = name
self.state = STATE_UNKNOWN
self.is_shutdown = False
self.next = None
self.prev = None
self._thread = None
self._buffer = (
bytearray()
) # Threadsafe buffered commands to be sent to controller.
self._programs = [] # Programs to execute.
self.context._buffer_size = 0
self._main_lock = threading.Lock()
self._status = [0] * 6
self._usb_state = -1
self._connection = None
self.max_attempts = 5
self.refuse_counts = 0
self.connection_errors = 0
self.count = 0
self.abort_waiting = False
self.pipe_channel = context.channel("%s/events" % name)
self.usb_log = context.channel("%s/usb" % name, buffer_size=500)
self.usb_send_channel = context.channel("%s/usb_send" % name)
self.recv_channel = context.channel("%s/recv" % name)
self.ch341 = self.context.open("module/ch341", log=self.usb_log)
self.usb_log.watch(lambda e: context.signal("pipe;usb_status", e))
context.setting(int, "usb_index", -1)
context.setting(int, "usb_bus", -1)
context.setting(int, "usb_address", -1)
context.setting(int, "usb_version", -1)
context.setting(bool, "mock", False)
context.setting(int, "packet_count", 0)
context.setting(int, "rejected_count", 0)
self.context.root.listen("lifecycle;ready", self.on_controller_ready)
def viewbuffer(self):
"""
Viewbuffer is used by the BufferView class if such a value exists it provides a view of the
buffered data. Without this class the BufferView displays nothing. This is optional for any output
device.
"""
buffer = "Current Working Buffer: %s\n" % str(self._buffer)
for p in self._programs:
buffer += "%s\n" % str(p.data)
return buffer
def on_controller_ready(self, origin, *args):
self.start()
def finalize(self, *args, **kwargs):
self.context.root.unlisten("lifecycle;ready", self.on_controller_ready)
if self._thread is not None:
self.is_shutdown = True
def __repr__(self):
return "MoshiController()"
def __len__(self):
"""Provides the length of the buffer of this device."""
return len(self._buffer) + sum(map(len, self._programs))
def realtime_read(self):
"""
The a7xx values used before the AC01 commands. Read preamble.
Also seen randomly 3.2 seconds apart. Maybe keep-alive.
:return:
"""
self.pipe_channel("Realtime: Read...")
self.realtime_pipe(swizzle_table[MOSHI_READ][0])
def realtime_prologue(self):
"""
Before a jump / program / turned on:
:return:
"""
self.pipe_channel("Realtime: Prologue")
self.realtime_pipe(swizzle_table[MOSHI_PROLOGUE][0])
def realtime_epilogue(self):
"""
Status 205
After a jump / program
Status 207
Status 205 Done.
:return:
"""
self.pipe_channel("Realtime: Epilogue")
self.realtime_pipe(swizzle_table[MOSHI_EPILOGUE][0])
def realtime_freemotor(self):
"""
Freemotor command
:return:
"""
self.pipe_channel("Realtime: FreeMotor")
self.realtime_pipe(swizzle_table[MOSHI_FREEMOTOR][0])
def realtime_laser(self):
"""
Laser Command Toggle.
:return:
"""
self.pipe_channel("Realtime: Laser Active")
self.realtime_pipe(swizzle_table[MOSHI_LASER][0])
def realtime_stop(self):
"""
Stop command (likely same as freemotor):
:return:
"""
self.pipe_channel("Realtime: Stop")
self.realtime_pipe(swizzle_table[MOSHI_ESTOP][0])
def realtime_pipe(self, data):
if self._connection is not None:
try:
self._connection.write_addr(data)
except ConnectionError:
self.pipe_channel("Connection error")
else:
self.pipe_channel("Not connected")
realtime_write = realtime_pipe
def open(self):
self.pipe_channel("open()")
if self._connection is None:
connection = self.ch341.connect(
driver_index=self.context.usb_index,
chipv=self.context.usb_version,
bus=self.context.usb_bus,
address=self.context.usb_address,
mock=self.context.mock,
)
self._connection = connection
if self.context.mock:
self._connection.mock_status = 205
self._connection.mock_finish = 207
else:
self._connection.open()
if self._connection is None:
raise ConnectionRefusedError("ch341 connect did not return a connection.")
def close(self):
self.pipe_channel("close()")
if self._connection is not None:
self._connection.close()
self._connection = None
else:
raise ConnectionError
def push_program(self, program):
self.pipe_channel("Pushed: %s" % str(program.data))
self._programs.append(program)
self.start()
def unlock_rail(self):
self.pipe_channel("Control Request: Unlock")
if self._main_lock.locked():
return
else:
self.realtime_freemotor()
def start(self):
"""
Controller state change to Started.
:return:
"""
if self._thread is None or not self._thread.is_alive():
self._thread = self.context.threaded(
self._thread_data_send,
thread_name="MoshiPipe(%s)" % self.context.path,
result=self.stop,
)
self.update_state(STATE_INITIALIZE)
def pause(self):
"""
Pause simply holds the controller from sending any additional packets.
If this state change is done from INITIALIZE it will start the processing.
Otherwise it must be done from ACTIVE or IDLE.
"""
if self.state == STATE_INITIALIZE:
self.start()
self.update_state(STATE_PAUSE)
if self.state == STATE_ACTIVE or self.state == STATE_IDLE:
self.update_state(STATE_PAUSE)
def resume(self):
"""
Resume can only be called from PAUSE.
"""
if self.state == STATE_PAUSE:
self.update_state(STATE_ACTIVE)
def estop(self):
"""
Abort the current buffer and data queue.
"""
self._buffer = bytearray()
self._programs.clear()
self.context.signal("pipe;buffer", 0)
self.realtime_stop()
self.update_state(STATE_TERMINATE)
self.pipe_channel("Control Request: Stop")
def stop(self, *args):
"""
Start the shutdown of the local send thread.
"""
if self._thread is not None:
try:
self._thread.join() # Wait until stop completes before continuing.
except RuntimeError:
pass # Thread is current thread.
self._thread = None
def update_state(self, state):
"""
Update the local state for the output device
"""
if state == self.state:
return
self.state = state
if self.context is not None:
self.context.signal("pipe;thread", self.state)
def update_buffer(self):
"""
Notify listening processes that the buffer size of this output has changed.
"""
if self.context is not None:
self.context._buffer_size = len(self._buffer)
self.context.signal("pipe;buffer", self.context._buffer_size)
| |
barcodes to quickly recognize high quality barcodes which fill almost whole image. Mode helps to quickly recognize generated barcodes from Internet. # noqa: E501
:return: The allow_one_d_fast_barcodes_detector of this ReaderParams. # noqa: E501
:rtype: bool
"""
return self._allow_one_d_fast_barcodes_detector
@allow_one_d_fast_barcodes_detector.setter
def allow_one_d_fast_barcodes_detector(self, allow_one_d_fast_barcodes_detector):
"""Sets the allow_one_d_fast_barcodes_detector of this ReaderParams.
Allows engine for 1D barcodes to quickly recognize high quality barcodes which fill almost whole image. Mode helps to quickly recognize generated barcodes from Internet. # noqa: E501
:param allow_one_d_fast_barcodes_detector: The allow_one_d_fast_barcodes_detector of this ReaderParams. # noqa: E501
:type: bool
"""
self._allow_one_d_fast_barcodes_detector = allow_one_d_fast_barcodes_detector
@property
def allow_one_d_wiped_bars_restoration(self):
"""Gets the allow_one_d_wiped_bars_restoration of this ReaderParams. # noqa: E501
Allows engine for 1D barcodes to recognize barcodes with single wiped/glued bars in pattern. # noqa: E501
:return: The allow_one_d_wiped_bars_restoration of this ReaderParams. # noqa: E501
:rtype: bool
"""
return self._allow_one_d_wiped_bars_restoration
@allow_one_d_wiped_bars_restoration.setter
def allow_one_d_wiped_bars_restoration(self, allow_one_d_wiped_bars_restoration):
"""Sets the allow_one_d_wiped_bars_restoration of this ReaderParams.
Allows engine for 1D barcodes to recognize barcodes with single wiped/glued bars in pattern. # noqa: E501
:param allow_one_d_wiped_bars_restoration: The allow_one_d_wiped_bars_restoration of this ReaderParams. # noqa: E501
:type: bool
"""
self._allow_one_d_wiped_bars_restoration = allow_one_d_wiped_bars_restoration
@property
def allow_qr_micro_qr_restoration(self):
"""Gets the allow_qr_micro_qr_restoration of this ReaderParams. # noqa: E501
Allows engine for QR/MicroQR to recognize damaged MicroQR barcodes. # noqa: E501
:return: The allow_qr_micro_qr_restoration of this ReaderParams. # noqa: E501
:rtype: bool
"""
return self._allow_qr_micro_qr_restoration
@allow_qr_micro_qr_restoration.setter
def allow_qr_micro_qr_restoration(self, allow_qr_micro_qr_restoration):
"""Sets the allow_qr_micro_qr_restoration of this ReaderParams.
Allows engine for QR/MicroQR to recognize damaged MicroQR barcodes. # noqa: E501
:param allow_qr_micro_qr_restoration: The allow_qr_micro_qr_restoration of this ReaderParams. # noqa: E501
:type: bool
"""
self._allow_qr_micro_qr_restoration = allow_qr_micro_qr_restoration
@property
def allow_regular_image(self):
"""Gets the allow_regular_image of this ReaderParams. # noqa: E501
Allows engine to recognize regular image without any restorations as main scan. Mode to recognize image as is. # noqa: E501
:return: The allow_regular_image of this ReaderParams. # noqa: E501
:rtype: bool
"""
return self._allow_regular_image
@allow_regular_image.setter
def allow_regular_image(self, allow_regular_image):
"""Sets the allow_regular_image of this ReaderParams.
Allows engine to recognize regular image without any restorations as main scan. Mode to recognize image as is. # noqa: E501
:param allow_regular_image: The allow_regular_image of this ReaderParams. # noqa: E501
:type: bool
"""
self._allow_regular_image = allow_regular_image
@property
def allow_salt_and_pepper_filtering(self):
"""Gets the allow_salt_and_pepper_filtering of this ReaderParams. # noqa: E501
Allows engine to recognize barcodes with salt and pepper noise type. Mode can remove small noise with white and black dots. # noqa: E501
:return: The allow_salt_and_pepper_filtering of this ReaderParams. # noqa: E501
:rtype: bool
"""
return self._allow_salt_and_pepper_filtering
@allow_salt_and_pepper_filtering.setter
def allow_salt_and_pepper_filtering(self, allow_salt_and_pepper_filtering):
"""Sets the allow_salt_and_pepper_filtering of this ReaderParams.
Allows engine to recognize barcodes with salt and pepper noise type. Mode can remove small noise with white and black dots. # noqa: E501
:param allow_salt_and_pepper_filtering: The allow_salt_and_pepper_filtering of this ReaderParams. # noqa: E501
:type: bool
"""
self._allow_salt_and_pepper_filtering = allow_salt_and_pepper_filtering
@property
def allow_white_spots_removing(self):
"""Gets the allow_white_spots_removing of this ReaderParams. # noqa: E501
Allows engine to recognize image without small white spots as additional scan. Mode helps to recognize noised image as well as median smoothing filtering. # noqa: E501
:return: The allow_white_spots_removing of this ReaderParams. # noqa: E501
:rtype: bool
"""
return self._allow_white_spots_removing
@allow_white_spots_removing.setter
def allow_white_spots_removing(self, allow_white_spots_removing):
"""Sets the allow_white_spots_removing of this ReaderParams.
Allows engine to recognize image without small white spots as additional scan. Mode helps to recognize noised image as well as median smoothing filtering. # noqa: E501
:param allow_white_spots_removing: The allow_white_spots_removing of this ReaderParams. # noqa: E501
:type: bool
"""
self._allow_white_spots_removing = allow_white_spots_removing
@property
def check_more1_d_variants(self):
"""Gets the check_more1_d_variants of this ReaderParams. # noqa: E501
Allows engine to recognize 1D barcodes with checksum by checking more recognition variants. Default value: False. # noqa: E501
:return: The check_more1_d_variants of this ReaderParams. # noqa: E501
:rtype: bool
"""
return self._check_more1_d_variants
@check_more1_d_variants.setter
def check_more1_d_variants(self, check_more1_d_variants):
"""Sets the check_more1_d_variants of this ReaderParams.
Allows engine to recognize 1D barcodes with checksum by checking more recognition variants. Default value: False. # noqa: E501
:param check_more1_d_variants: The check_more1_d_variants of this ReaderParams. # noqa: E501
:type: bool
"""
self._check_more1_d_variants = check_more1_d_variants
@property
def fast_scan_only(self):
"""Gets the fast_scan_only of this ReaderParams. # noqa: E501
Allows engine for 1D barcodes to quickly recognize middle slice of an image and return result without using any time-consuming algorithms. Default value: False. # noqa: E501
:return: The fast_scan_only of this ReaderParams. # noqa: E501
:rtype: bool
"""
return self._fast_scan_only
@fast_scan_only.setter
def fast_scan_only(self, fast_scan_only):
"""Sets the fast_scan_only of this ReaderParams.
Allows engine for 1D barcodes to quickly recognize middle slice of an image and return result without using any time-consuming algorithms. Default value: False. # noqa: E501
:param fast_scan_only: The fast_scan_only of this ReaderParams. # noqa: E501
:type: bool
"""
self._fast_scan_only = fast_scan_only
@property
def region_likelihood_threshold_percent(self):
"""Gets the region_likelihood_threshold_percent of this ReaderParams. # noqa: E501
Sets threshold for detected regions that may contain barcodes. Value 0.7 means that bottom 70% of possible regions are filtered out and not processed further. Region likelihood threshold must be between [0.05, 0.9] Use high values for clear images with few barcodes. Use low values for images with many barcodes or for noisy images. Low value may lead to a bigger recognition time. # noqa: E501
:return: The region_likelihood_threshold_percent of this ReaderParams. # noqa: E501
:rtype: float
"""
return self._region_likelihood_threshold_percent
@region_likelihood_threshold_percent.setter
def region_likelihood_threshold_percent(self, region_likelihood_threshold_percent):
"""Sets the region_likelihood_threshold_percent of this ReaderParams.
Sets threshold for detected regions that may contain barcodes. Value 0.7 means that bottom 70% of possible regions are filtered out and not processed further. Region likelihood threshold must be between [0.05, 0.9] Use high values for clear images with few barcodes. Use low values for images with many barcodes or for noisy images. Low value may lead to a bigger recognition time. # noqa: E501
:param region_likelihood_threshold_percent: The region_likelihood_threshold_percent of this ReaderParams. # noqa: E501
:type: float
"""
self._region_likelihood_threshold_percent = region_likelihood_threshold_percent
@property
def scan_window_sizes(self):
"""Gets the scan_window_sizes of this ReaderParams. # noqa: E501
Scan window sizes in pixels. Allowed sizes are 10, 15, 20, 25, 30. Scanning with small window size takes more time and provides more accuracy but may fail in detecting very big barcodes. Combining of several window sizes can improve detection quality. # noqa: E501
:return: The scan_window_sizes of this ReaderParams. # noqa: E501
:rtype: list[int]
"""
return self._scan_window_sizes
@scan_window_sizes.setter
def scan_window_sizes(self, scan_window_sizes):
"""Sets the scan_window_sizes of this ReaderParams.
Scan window sizes in pixels. Allowed sizes are 10, 15, 20, 25, 30. Scanning with small window size takes more time and provides more accuracy but may fail in detecting very big barcodes. Combining of several window sizes can improve detection quality. # noqa: E501
:param scan_window_sizes: The scan_window_sizes of this ReaderParams. # noqa: E501
:type: list[int]
"""
self._scan_window_sizes = scan_window_sizes
@property
def similarity(self):
"""Gets the similarity of this ReaderParams. # noqa: E501
Similarity coefficient depends on how homogeneous barcodes are. Use high value for for clear barcodes. Use low values to detect barcodes that ara partly damaged or not lighten evenly. Similarity coefficient must be between [0.5, 0.9] # noqa: E501
:return: The similarity of this ReaderParams. # noqa: E501
:rtype: float
"""
return self._similarity
@similarity.setter
def similarity(self, similarity):
"""Sets the similarity of this ReaderParams.
Similarity coefficient depends on how homogeneous barcodes are. Use high value for for clear barcodes. Use low values to detect barcodes that ara partly damaged or not lighten evenly. Similarity coefficient must be between [0.5, 0.9] # noqa: E501
:param similarity: The similarity of this ReaderParams. # noqa: E501
:type: float
"""
self._similarity = similarity
@property
def skip_diagonal_search(self):
"""Gets the skip_diagonal_search of this ReaderParams. # noqa: E501
Allows detector to skip search for diagonal barcodes. Setting it to false will increase detection time but allow to find diagonal barcodes that can be missed otherwise. Enabling of diagonal search leads to a bigger detection time. # noqa: E501
:return: The skip_diagonal_search of this ReaderParams. # noqa: E501
:rtype: bool
"""
return self._skip_diagonal_search
@skip_diagonal_search.setter
def skip_diagonal_search(self, skip_diagonal_search):
"""Sets the skip_diagonal_search of | |
<gh_stars>0
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :mod:`pennylane.plugin.Tensornet` device.
"""
import cmath
# pylint: disable=protected-access,cell-var-from-loop
import math
import pytest
# TODO: remove the following skip when Tensornet has been ported to
# Qubit device, and the gate imports above are removed.
tensorflow = pytest.importorskip("tensorflow", minversion="2.0")
import pennylane as qml
from pennylane import numpy as np, QuantumFunctionError
from pennylane.wires import Wires
from pennylane.beta.devices.numpy_ops import (
CNOT,
CSWAP,
CZ,
SWAP,
CRot3,
CRotx,
CRoty,
CRotz,
H,
Rot3,
Rotx,
Roty,
Rotz,
Rphi,
S,
T,
X,
Y,
Z,
hermitian,
identity,
Toffoli,
spectral_decomposition,
unitary,
)
tn = pytest.importorskip("tensornetwork", minversion="0.3")
U = np.array(
[
[0.83645892 - 0.40533293j, -0.20215326 + 0.30850569j],
[-0.23889780 - 0.28101519j, -0.88031770 - 0.29832709j],
]
)
U2 = np.array(
[
[
-0.07843244 - 3.57825948e-01j,
0.71447295 - 5.38069384e-02j,
0.20949966 + 6.59100734e-05j,
-0.50297381 + 2.35731613e-01j,
],
[
-0.26626692 + 4.53837083e-01j,
0.27771991 - 2.40717436e-01j,
0.41228017 - 1.30198687e-01j,
0.01384490 - 6.33200028e-01j,
],
[
-0.69254712 - 2.56963068e-02j,
-0.15484858 + 6.57298384e-02j,
-0.53082141 + 7.18073414e-02j,
-0.41060450 - 1.89462315e-01j,
],
[
-0.09686189 - 3.15085273e-01j,
-0.53241387 - 1.99491763e-01j,
0.56928622 + 3.97704398e-01j,
-0.28671074 - 6.01574497e-02j,
],
]
)
U_toffoli = np.diag([1 for i in range(8)])
U_toffoli[6:8, 6:8] = np.array([[0, 1], [1, 0]])
U_swap = np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
U_cswap = np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
)
H = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
THETA = np.linspace(0.11, 1, 3)
PHI = np.linspace(0.32, 1, 3)
VARPHI = np.linspace(0.02, 1, 3)
def prep_par(par, op):
"Convert par into a list of parameters that op expects."
if op.par_domain == "A":
return [np.diag([x, 1]) for x in par]
return par
def nodes_and_edges_valid(dev, num_nodes, node_names, rep):
"""Asserts that nodes in a device ``dev`` are properly initialized, when there
are ``num_nodes`` nodes expected, with names ``node_names``, using representation ``rep``."""
if not set(dev._nodes.keys()) == {"state"}:
return False
if not len(dev._nodes["state"]) == num_nodes:
return False
for idx in range(num_nodes):
if not dev._nodes["state"][idx].name == node_names[idx]:
return False
return edges_valid(dev, num_nodes=num_nodes, rep=rep)
def edges_valid(dev, num_nodes, rep):
"""Returns True if the edges in a device ``dev`` are properly initialized, when there
are ``num_nodes`` nodes expected, using representation ``rep``."""
if rep == "exact":
node_edges = [dev._nodes["state"][idx].edges for idx in range(num_nodes)]
node_edges_set = set([edge for sublist in node_edges for edge in sublist])
elif rep == "mps":
node_edges_set = {node.edges[1] for node in dev.mps.nodes}
return node_edges_set == set(dev._free_wire_edges)
class TestAuxiliaryFunctions:
"""Test auxiliary functions."""
def test_spectral_decomposition(self, tol):
"""Test that the correct spectral decomposition is returned."""
a, P = spectral_decomposition(H)
# verify that H = \sum_k a_k P_k
assert np.allclose(H, np.einsum("i,ijk->jk", a, P), atol=tol, rtol=0)
def test_phase_shift(self, tol):
"""Test phase shift is correct"""
# test identity for theta=0
assert np.allclose(Rphi(0), np.identity(2), atol=tol, rtol=0)
# test arbitrary phase shift
phi = 0.5432
expected = np.array([[1, 0], [0, np.exp(1j * phi)]])
assert np.allclose(Rphi(phi), expected, atol=tol, rtol=0)
def test_x_rotation(self, tol):
"""Test x rotation is correct"""
# test identity for theta=0
assert np.allclose(Rotx(0), np.identity(2), atol=tol, rtol=0)
# test identity for theta=pi/2
expected = np.array([[1, -1j], [-1j, 1]]) / np.sqrt(2)
assert np.allclose(Rotx(np.pi / 2), expected, atol=tol, rtol=0)
# test identity for theta=pi
expected = -1j * np.array([[0, 1], [1, 0]])
assert np.allclose(Rotx(np.pi), expected, atol=tol, rtol=0)
def test_y_rotation(self, tol):
"""Test y rotation is correct"""
# test identity for theta=0
assert np.allclose(Roty(0), np.identity(2), atol=tol, rtol=0)
# test identity for theta=pi/2
expected = np.array([[1, -1], [1, 1]]) / np.sqrt(2)
assert np.allclose(Roty(np.pi / 2), expected, atol=tol, rtol=0)
# test identity for theta=pi
expected = np.array([[0, -1], [1, 0]])
assert np.allclose(Roty(np.pi), expected, atol=tol, rtol=0)
def test_z_rotation(self, tol):
"""Test z rotation is correct"""
# test identity for theta=0
assert np.allclose(Rotz(0), np.identity(2), atol=tol, rtol=0)
# test identity for theta=pi/2
expected = np.diag(np.exp([-1j * np.pi / 4, 1j * np.pi / 4]))
assert np.allclose(Rotz(np.pi / 2), expected, atol=tol, rtol=0)
# test identity for theta=pi
assert np.allclose(Rotz(np.pi), -1j * Z, atol=tol, rtol=0)
def test_arbitrary_rotation(self, tol):
"""Test arbitrary single qubit rotation is correct"""
# test identity for phi,theta,omega=0
assert np.allclose(Rot3(0, 0, 0), np.identity(2), atol=tol, rtol=0)
# expected result
def arbitrary_rotation(x, y, z):
"""arbitrary single qubit rotation"""
c = np.cos(y / 2)
s = np.sin(y / 2)
return np.array(
[
[np.exp(-0.5j * (x + z)) * c, -np.exp(0.5j * (x - z)) * s],
[np.exp(-0.5j * (x - z)) * s, np.exp(0.5j * (x + z)) * c],
]
)
a, b, c = 0.432, -0.152, 0.9234
assert np.allclose(Rot3(a, b, c), arbitrary_rotation(a, b, c), atol=tol, rtol=0)
def test_C_x_rotation(self, tol):
"""Test controlled x rotation is correct"""
# test identity for theta=0
assert np.allclose(CRotx(0), np.identity(4), atol=tol, rtol=0)
# test identity for theta=pi/2
expected = np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1 / np.sqrt(2), -1j / np.sqrt(2)],
[0, 0, -1j / np.sqrt(2), 1 / np.sqrt(2)],
]
)
assert np.allclose(CRotx(np.pi / 2), expected, atol=tol, rtol=0)
# test identity for theta=pi
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1j], [0, 0, -1j, 0]])
assert np.allclose(CRotx(np.pi), expected, atol=tol, rtol=0)
def test_C_y_rotation(self, tol):
"""Test controlled y rotation is correct"""
# test identity for theta=0
assert np.allclose(CRoty(0), np.identity(4), atol=tol, rtol=0)
# test identity for theta=pi/2
expected = np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1 / np.sqrt(2), -1 / np.sqrt(2)],
[0, 0, 1 / np.sqrt(2), 1 / np.sqrt(2)],
]
)
assert np.allclose(CRoty(np.pi / 2), expected, atol=tol, rtol=0)
# test identity for theta=pi
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1], [0, 0, 1, 0]])
assert np.allclose(CRoty(np.pi), expected, atol=tol, rtol=0)
def test_C_z_rotation(self, tol):
"""Test controlled z rotation is correct"""
# test identity for theta=0
assert np.allclose(CRotz(0), np.identity(4), atol=tol, rtol=0)
# test identity for theta=pi/2
expected = np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, np.exp(-1j * np.pi / 4), 0],
[0, 0, 0, np.exp(1j * np.pi / 4)],
]
)
assert np.allclose(CRotz(np.pi / 2), expected, atol=tol, rtol=0)
# test identity for theta=pi
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, -1j, 0], [0, 0, 0, 1j]])
assert np.allclose(CRotz(np.pi), expected, atol=tol, rtol=0)
def test_controlled_arbitrary_rotation(self, tol):
"""Test controlled arbitrary rotation is correct"""
# test identity for phi,theta,omega=0
assert np.allclose(CRot3(0, 0, 0), np.identity(4), atol=tol, rtol=0)
# test identity for phi,theta,omega=pi
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, -1], [0, 0, 1, 0]])
assert np.allclose(CRot3(np.pi, np.pi, np.pi), expected, atol=tol, rtol=0)
def arbitrary_Crotation(x, y, z):
"""controlled arbitrary single qubit rotation"""
c = np.cos(y / 2)
s = np.sin(y / 2)
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, np.exp(-0.5j * (x + z)) * c, -np.exp(0.5j * (x - z)) * s],
[0, 0, np.exp(-0.5j * (x - z)) * s, np.exp(0.5j * (x + z)) * c],
]
)
a, b, c = 0.432, -0.152, 0.9234
assert np.allclose(CRot3(a, b, c), arbitrary_Crotation(a, b, c), atol=tol, rtol=0)
class TestMatrixOperations:
"""Tests for unitary and hermitian functions."""
def test_unitary(self, tol):
"""Test that the unitary function produces the correct output."""
out = unitary(U)
# verify output type
assert isinstance(out, np.ndarray)
# verify equivalent to input state
assert np.allclose(out, U, atol=tol, rtol=0)
def test_unitary_exceptions(self):
"""Tests that the unitary function raises the proper errors."""
# test non-square matrix
with | |
"""Control functions for WFSC."""
import numpy as np
# import multiprocessing
# from astropy.io import fits
# import matplotlib.pyplot as plt
import falco
def wrapper(mp, cvar, jacStruct):
"""
Outermost wrapper function for all the controller functions.
Parameters
----------
mp : ModelParameters
Structure containing optical model parameters
cvar : ModelParameters
Structure containing controller variables
jacStruct : ModelParameters
Structure containing control Jacobians for each specified DM.
Returns
-------
None
Changes are made by reference to mp.
"""
# if type(mp) is not falco.config.ModelParameters:
# raise TypeError('Input "mp" must be of type ModelParameters')
# pass
# with falco.util.TicToc('Using the Jacobian to make other matrices'):
print('Using the Jacobian to make other matrices...', end='')
# Compute matrices for linear control with regular EFC
cvar.GstarG_wsum = np.zeros((cvar.NeleAll, cvar.NeleAll))
cvar.RealGstarEab_wsum = np.zeros((cvar.NeleAll, 1))
for im in range(mp.jac.Nmode):
Gmode = np.zeros((mp.Fend.corr.Npix, 1), dtype=complex) # Initialize a row to concatenate onto
if(any(mp.dm_ind == 1)): Gmode = np.hstack((Gmode,np.squeeze(jacStruct.G1[:,:,im])))
if(any(mp.dm_ind == 2)): Gmode = np.hstack((Gmode,np.squeeze(jacStruct.G2[:,:,im])))
if(any(mp.dm_ind == 8)): Gmode = np.hstack((Gmode,np.squeeze(jacStruct.G8[:,:,im])))
if(any(mp.dm_ind == 9)): Gmode = np.hstack((Gmode,np.squeeze(jacStruct.G9[:,:,im])))
Gmode = Gmode[:, 1:] # Remove the zero column used for initialization
# Square matrix part stays the same if no re-linearization has occurrred.
cvar.GstarG_wsum += mp.jac.weights[im]*np.real(np.conj(Gmode).T @ Gmode)
# The G^*E part changes each iteration because the E-field changes.
# Apply 2-D spatial weighting to E-field in dark hole pixels.
Eweighted = mp.WspatialVec*cvar.EfieldVec[:, im]
# Apply the Jacobian weights and add to the total.
cvar.RealGstarEab_wsum += mp.jac.weights[im]*np.real(
np.conj(Gmode).T @ Eweighted.reshape(mp.Fend.corr.Npix, 1))
# Make the regularization matrix. (Define only diagonal here to save RAM.)
cvar.EyeGstarGdiag = np.max(np.diag(cvar.GstarG_wsum))*np.ones(cvar.NeleAll)
cvar.EyeNorm = np.max(np.diag(cvar.GstarG_wsum))
print('done.')
# Call the Controller Function
print('Control beginning ...')
# Established, conventional controllers
if mp.controller.lower() == 'plannedefc':
dDM = _planned_efc(mp, cvar)
elif mp.controller.lower() == 'gridsearchefc':
dDM = _grid_search_efc(mp, cvar)
# Update the DM commands by adding the delta control signal
if(any(mp.dm_ind == 1)): mp.dm1.V += dDM.dDM1V
if(any(mp.dm_ind == 2)): mp.dm2.V += dDM.dDM2V
if(any(mp.dm_ind == 8)): mp.dm8.V += dDM.dDM8V
if(any(mp.dm_ind == 9)): mp.dm9.V += dDM.dDM9V
# Save the delta from the previous command
if(any(mp.dm_ind == 1)): mp.dm1.dV = dDM.dDM1V
if(any(mp.dm_ind == 2)): mp.dm2.dV = dDM.dDM2V
if(any(mp.dm_ind == 8)): mp.dm8.dV = dDM.dDM8V
if(any(mp.dm_ind == 9)): mp.dm9.dV = dDM.dDM9V
def cull_actuators(mp, cvar, jacStruct):
"""
Remove weak actuators from the controlled set.
Parameters
----------
mp : ModelParameters
Structure containing optical model parameters
cvar : ModelParameters
Structure containing controller variables
jacStruct : ModelParameters
Structure containing control Jacobians for each specified DM.
Returns
-------
None
Changes are made by reference to mp and jacStruct.
"""
if type(mp) is not falco.config.ModelParameters:
raise TypeError('Input "mp" must be of type ModelParameters')
# Reduce the number of actuators used based on their relative strength
# in the Jacobian
if(cvar.flagCullAct and cvar.flagRelin):
print('Weeding out weak actuators from the control Jacobian...')
if(any(mp.dm_ind == 1)):
G1intNorm = np.sum(np.mean(np.abs(jacStruct.G1)**2, axis=2), axis=0)
G1intNorm = G1intNorm/np.max(G1intNorm)
mp.dm1.act_ele = np.nonzero(G1intNorm >= 10**(mp.logGmin))[0]
del G1intNorm
if(any(mp.dm_ind == 2)):
G2intNorm = np.sum(np.mean(np.abs(jacStruct.G2)**2, axis=2), axis=0)
G2intNorm = G2intNorm/np.max(G2intNorm)
mp.dm2.act_ele = np.nonzero(G2intNorm >= 10**(mp.logGmin))[0]
del G2intNorm
if(any(mp.dm_ind == 8)):
G8intNorm = np.sum(np.mean(np.abs(jacStruct.G8)**2, axis=2), axis=0)
G8intNorm = G8intNorm/np.max(G8intNorm)
mp.dm8.act_ele = np.nonzero(G8intNorm >= 10**(mp.logGmin))[0]
del G8intNorm
if(any(mp.dm_ind == 9)):
G9intNorm = np.sum(np.mean(np.abs(jacStruct.G9)**2, axis=2), axis=0)
G9intNorm = G9intNorm/np.max(G9intNorm)
mp.dm9.act_ele = np.nonzero(G9intNorm >= 10**(mp.logGmin))[0]
del G9intNorm
# Add back in all actuators that are tied (to make the tied actuator
# logic easier)
if(any(mp.dm_ind == 1)):
for ti in range(mp.dm1.tied.shape[0]):
if not (any(mp.dm1.act_ele == mp.dm1.tied[ti, 0])):
mp.dm1.act_ele = np.hstack([mp.dm1.act_ele, mp.dm1.tied[ti, 0]])
if not (any(mp.dm1.act_ele == mp.dm1.tied[ti, 1])):
mp.dm1.act_ele = np.hstack([mp.dm1.act_ele, mp.dm1.tied[ti, 1]])
# Need to sort for the logic in model_Jacobian.m
mp.dm1.act_ele = np.sort(mp.dm1.act_ele)
if(any(mp.dm_ind == 2)):
for ti in range(mp.dm2.tied.shape[0]):
if not any(mp.dm2.act_ele == mp.dm2.tied[ti, 0]):
mp.dm2.act_ele = np.hstack([mp.dm2.act_ele, mp.dm2.tied[ti, 0]])
if not any(mp.dm2.act_ele == mp.dm2.tied[ti, 1]):
mp.dm2.act_ele = np.hstack([mp.dm2.act_ele, mp.dm2.tied[ti, 1]])
# Need to sort for the logic in model_Jacobian.m
mp.dm2.act_ele = np.sort(mp.dm2.act_ele)
# if(any(mp.dm_ind == 8))
# for ti=1:size(mp.dm8.tied,1)
# if(any(mp.dm8.act_ele==mp.dm8.tied(ti,1))==false); mp.dm8.act_ele = [mp.dm8.act_ele; mp.dm8.tied(ti,1)]; end
# if(any(mp.dm8.act_ele==mp.dm8.tied(ti,2))==false); mp.dm8.act_ele = [mp.dm8.act_ele; mp.dm8.tied(ti,2)]; end
# end
# mp.dm8.act_ele = sort(mp.dm8.act_ele);
# end
# if(any(mp.dm_ind == 9))
# for ti=1:size(mp.dm9.tied,1)
# if(any(mp.dm9.act_ele==mp.dm9.tied(ti,1))==false); mp.dm9.act_ele = [mp.dm9.act_ele; mp.dm9.tied(ti,1)]; end
# if(any(mp.dm9.act_ele==mp.dm9.tied(ti,2))==false); mp.dm9.act_ele = [mp.dm9.act_ele; mp.dm9.tied(ti,2)]; end
# end
# mp.dm9.act_ele = sort(mp.dm9.act_ele);
# end
# Update the number of elements used per DM
if(any(mp.dm_ind == 1)): mp.dm1.Nele = mp.dm1.act_ele.size
if(any(mp.dm_ind == 2)): mp.dm2.Nele = mp.dm2.act_ele.size
if(any(mp.dm_ind == 8)): mp.dm8.Nele = mp.dm8.act_ele.size
if(any(mp.dm_ind == 9)): mp.dm9.Nele = mp.dm9.act_ele.size
if(any(mp.dm_ind == 1)): print(' DM1: %d/%d (%.2f%%) actuators kept for Jacobian' % (mp.dm1.Nele, mp.dm1.NactTotal, 100*mp.dm1.Nele/mp.dm1.NactTotal))
if(any(mp.dm_ind == 2)): print(' DM2: %d/%d (%.2f%%) actuators kept for Jacobian' % (mp.dm2.Nele, mp.dm2.NactTotal, 100*mp.dm2.Nele/mp.dm2.NactTotal))
if(any(mp.dm_ind == 8)): print(' DM8: %d/%d (%.2f%%) actuators kept for Jacobian' % (mp.dm8.Nele, mp.dm8.NactTotal, 100*mp.dm8.Nele/mp.dm8.NactTotal))
if(any(mp.dm_ind == 9)): print(' DM9: %d/%d (%.2f%%) actuators kept for Jacobian' % (mp.dm9.Nele, mp.dm9.NactTotal, 100*mp.dm9.Nele/mp.dm9.NactTotal))
# Crop out unused actuators from the control Jacobian
if(any(mp.dm_ind == 1)): jacStruct.G1 = jacStruct.G1[:, mp.dm1.act_ele, :]
if(any(mp.dm_ind == 2)): jacStruct.G2 = jacStruct.G2[:, mp.dm2.act_ele, :]
if(any(mp.dm_ind == 8)): jacStruct.G8 = jacStruct.G8[:, mp.dm8.act_ele, :]
if(any(mp.dm_ind == 9)): jacStruct.G9 = jacStruct.G9[:, mp.dm9.act_ele, :]
return None
def _grid_search_efc(mp, cvar):
"""
Perform a grid search over specified variables for the controller.
Parameters
----------
mp : ModelParameters
Structure containing optical model parameters
cvar : ModelParameters
Structure containing controller variables
Returns
-------
dDM : ModelParameters
Structure containing the delta DM commands for each DM
"""
# Make all combinations of the values
vals_list = [(x, y) for y in mp.ctrl.dmfacVec for x in mp.ctrl.log10regVec]
Nvals = len(mp.ctrl.log10regVec) * len(mp.ctrl.dmfacVec)
InormVec = np.zeros(Nvals) # Initialize
# Temporarily store computed DM commands so that the best one does not have
# to be re-computed
if(any(mp.dm_ind == 1)):
dDM1V_store = np.zeros((mp.dm1.Nact, mp.dm1.Nact, Nvals))
if(any(mp.dm_ind == 2)):
dDM2V_store = np.zeros((mp.dm2.Nact, mp.dm2.Nact, Nvals))
if(any(mp.dm_ind == 8)):
dDM8V_store = np.zeros((mp.dm8.NactTotal, Nvals))
if(any(mp.dm_ind == 9)):
dDM9V_store = np.zeros((mp.dm9.NactTotal, Nvals))
# Empirically find the regularization value giving the best contrast
# if(mp.flagParallel and mp.ctrl.flagUseModel):
# # Run the controller in parallel
# pool = multiprocessing.Pool(processes=mp.Nthreads)
# results = [pool.apply_async(_efc, args=(ni,vals_list,mp,cvar)) for ni in np.arange(Nvals,dtype=int) ]
# results_ctrl = [p.get() for p in results] # All the Jacobians in a list
# pool.close()
# pool.join()
#
# # Convert from a list to arrays:
# for ni in range(Nvals):
# InormVec[ni] = results_ctrl[ni][0]
# if(any(mp.dm_ind == 1)): dDM1V_store[:,:,ni] = results_ctrl[ni][1].dDM1V
# if(any(mp.dm_ind == 2)): dDM2V_store[:,:,ni] = results_ctrl[ni][1].dDM2V
# else:
for ni in range(Nvals):
[InormVec[ni], dDM_temp] = _efc(ni, vals_list, mp, cvar)
# delta voltage commands
if(any(mp.dm_ind == 1)): dDM1V_store[:, :, ni] = dDM_temp.dDM1V
if(any(mp.dm_ind == 2)): dDM2V_store[:, :, ni] = dDM_temp.dDM2V
if(any(mp.dm_ind == 8)): dDM8V_store[:, ni] = dDM_temp.dDM8V
if(any(mp.dm_ind == 9)): dDM9V_store[:, ni] = dDM_temp.dDM9V
# Print out results to the command line
print('Scaling factor:\t', end='')
for ni in range(Nvals):
print('%.2f\t\t' % (vals_list[ni][1]), end='')
print('\nlog10reg: \t', end='')
for ni in range(Nvals):
print('%.1f\t\t' % (vals_list[ni][0]), end='')
print('\nInorm: \t', end='')
for ni in range(Nvals):
print('%.2e\t' % (InormVec[ni]), end='')
print('\n', end='')
# Find the best scaling factor and Lagrange multiplier pair based on the
# best contrast.
# [cvar.cMin,indBest] = np.min(InormVec)
indBest = np.argmin(InormVec)
cvar.cMin = np.min(InormVec)
dDM = falco.config.Object()
# delta voltage commands
if(any(mp.dm_ind == 1)): dDM.dDM1V = np.squeeze(dDM1V_store[:, :, indBest])
if(any(mp.dm_ind == 2)): dDM.dDM2V = np.squeeze(dDM2V_store[:, :, indBest])
if(any(mp.dm_ind == 8)): dDM.dDM8V = np.squeeze(dDM8V_store[:, indBest])
if(any(mp.dm_ind == 9)): dDM.dDM9V = np.squeeze(dDM9V_store[:, indBest])
cvar.log10regUsed = vals_list[indBest][0]
dmfacBest = vals_list[indBest][1]
if(mp.ctrl.flagUseModel):
print('Model-based grid search expects log10reg, = %.1f,\t dmfac = %.2f,\t %4.2e normalized intensity.'
% (cvar.log10regUsed, dmfacBest, cvar.cMin))
else:
print('Empirical grid search finds log10reg, = %.1f,\t dmfac = %.2f,\t %4.2e normalized intensity.'
% (cvar.log10regUsed, dmfacBest, cvar.cMin))
return dDM
def _planned_efc(mp, cvar):
"""
Perform a scheduled/planned set of EFC iterations.
Parameters
----------
mp : ModelParameters
Structure containing optical model parameters
cvar : ModelParameters
Structure containing controller variables
Returns
-------
dDM : ModelParameters
Structure containing the delta DM commands for each DM
"""
# Make all combinations of the values
vals_list = [(x, y) for y in mp.ctrl.dmfacVec for x in mp.ctrl.log10regVec]
Nvals = len(mp.ctrl.log10regVec) * len(mp.ctrl.dmfacVec)
InormVec = np.zeros(Nvals) # Initialize
# Make more obvious names for conditions:
relinearizeNow = any(np.array(mp.gridSearchItrVec) == cvar.Itr)
useBestLog10Reg = np.imag(mp.ctrl.log10regSchedIn[cvar.Itr]) != 0
realLog10RegIsZero = np.real(mp.ctrl.log10regSchedIn[cvar.Itr]) == 0
# | |
<reponame>ddr95070/RMIsaac<gh_stars>0
'''
Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
NVIDIA CORPORATION and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA CORPORATION is strictly prohibited.
'''
from collections import deque
import json
import select
import socket
import threading
import time
from typing import Callable, List
import uuid
from isaac import Message as im
# The maximum number of messages to store for a given channel before dropping the oldest message
MESSAGE_QUEUE_SIZE = 25
# How many seconds to wait between calls to the update callback
UPDATE_CALLBACK_PERIOD = 1
def _signed_to_unsigned(value: int, bits: int = 64) -> int:
''' Reinterprets the bits of a signed integer as an unsigned integer '''
if value < 0:
return value + 2**bits
return value
def _unsigned_to_signed(value: int, bits: int = 64) -> int:
''' Reinterprets the bits of an unsigned integer as an signed integer '''
if value >= (1 << (bits - 1)):
return value - 2**bits
return value
def _transform_field_of_type(schema, struct, transform, match_type: str = "uint64"):
'''
Go through a dictionary encoded capnp-proto and perform the transformation function on all
fields that match the given type
Args:
schema (capnp schema): The capnp schema the dictionary follows
struct (dict): The dictionary to transform
transform (function): The transformation function to apply on all matching fields
match_type (str): The capnp type of fields to be transformed
'''
for field in schema.node.to_dict()["struct"]["fields"]:
# Get the name and type of the field
name = field["name"]
field_type = list(field['slot']['type'].keys())[0]
field_data = field['slot']['type'][field_type]
if field_type == match_type:
struct[name] = transform(struct[name])
elif field_type == "struct":
_transform_field_of_type(im.CAPNP_TYPE_ID_DICT[field_data["typeId"]].schema,
struct[name], transform, match_type)
def _proto_to_dict(proto):
'''
Converts a capnp proto to a dictionary and convert all 64 bit integers to strings
This is due to https://github.com/capnproto/capnproto/issues/617. When capnp deserializes
json to a proto, it expects all int64/uint64 fields to be encoded as strings. Pycapnp,
however, serializes them as integers, so this conversion step is necessary.
'''
proto_dict = proto.to_dict()
_transform_field_of_type(proto.schema, proto_dict, lambda x: str(x))
return proto_dict
def _dict_to_proto(proto_dict, proto_id):
'''
Converts a dictionary where all 64 bit integers are strings to a capnp proto
This is due to https://github.com/capnproto/capnproto/issues/617. When capnp serializes a proto
to json, all int64/uint64 fields are encoded as strings. Pycapnp, however expects them to be
integers, so this conversion step is necessary.
'''
_transform_field_of_type(im.CAPNP_TYPE_ID_DICT[proto_id].schema, proto_dict, lambda x: int(x))
return im.CAPNP_TYPE_ID_DICT[proto_id].from_dict(proto_dict)
class JsonTcpServerConnection:
'''
A class representing the connection between a JsonTcpServer and a TCP Client.
'''
def __init__(self, connection):
self._socket = connection
self._outgoing_text = ""
self._incoming_text = ""
self._messages = {}
self._socket.setblocking(False)
self._connected = True
self._message_callback = None
def fileno(self):
'''
Gets fileno of underlying socket, used to allow JsonTcpServerConnection as input to the
"select" function
'''
return self._socket.fileno()
@property
def has_data_to_write(self):
'''Returns whether or not there is data queued to be sent to the client'''
return len(self._outgoing_text) > 0
@property
def address(self):
'''Gets the address of the connected TCP Client'''
return self._socket.getpeername()
def send_message(self, message, channel: str):
'''
Sends a message to the TCP client
Args:
message (MessageBuilder): A Capnp message to send to the client
channel (str): The channel to send the message on
Raises:
IOError: The client has disconnected
'''
if not self.connected:
raise IOError("Client has disconnected")
message_uuid = uuid.uuid4()
serialized_message = {
"header": {
"channel": channel,
"pubtime": str(message.pubtime),
"acqtime": str(message.acqtime),
"proto": str(_unsigned_to_signed(message.proto.schema.node.id)),
"uuid": {
"lower": (message_uuid.int >> 0) & ((1 << 64) - 1),
"upper": (message_uuid.int >> 64) & ((1 << 64) - 1),
}
},
"payload": _proto_to_dict(message.proto)
}
self._outgoing_text += json.dumps(serialized_message) + "\n"
def set_message_callback(self, callback: Callable[[im.MessageBuilder, str], None]):
'''Sets a callback function to be called whenever a new message is received'''
self._message_callback = callback
def get_channels(self) -> List:
'''Returns a list of all channels that have unread messages'''
return self._messages.keys()
def get_next_message(self, channel: str):
'''
Gets the next unread message on the given channel and pops it from the channel's queue.
Args:
channel (str): The channel to fetch the next message from
Returns:
A MessageBuilder containing the oldest unread message or 'None' if all messages have
been read
Raises:
IOError: The client has disconnected
'''
if not self.connected:
raise IOError("Client has disconnected")
# Attempt to get the next message from the deque, otherwise return None
try:
return self._messages.get(channel, deque()).popleft()
except IndexError as e:
return None
def _process_input_buffer(self):
'''
Parses the incoming text buffer, and if any complete messages have been received, converts
them to capnp messages
'''
split_text = self._incoming_text.split("\n")
new_messages = split_text[:-1]
self._incoming_text = split_text[-1]
# For each message, build a capnp message
for message_text in new_messages:
# Get the header/payload from the json text
json_message = json.loads(message_text)
header = json_message['header']
payload = json_message['payload']
# Convert fields that may be stored as string to int
header["proto"] = int(header["proto"])
header["acqtime"] = int(header["acqtime"])
header["pubtime"] = int(header["pubtime"])
# The proto id is transmitted as int64_t, if its negative it needs to be
# converted to a uint64_t
header["proto"] = _signed_to_unsigned(header["proto"])
# Create a capnp message from the payload
payload_message = _dict_to_proto(payload, header["proto"])
# Create an ISAAC style message
builder = im.MessageBuilder()
builder.proto = payload_message
builder.acqtime = header["acqtime"]
builder.pubtime = header["pubtime"]
# Put the message into the appropriate message queue based on channel
channel = header["channel"]
if channel not in self._messages.keys():
self._messages[channel] = deque([], MESSAGE_QUEUE_SIZE)
self._messages[channel].append(builder)
if self._message_callback is not None:
self._message_callback(builder, channel)
def _send(self):
'''
Sends bytes until there are no more to send or the socket cannot send anymore without
blocking
'''
try:
while len(self._outgoing_text) > 0:
bytes_sent = self._socket.send(self._outgoing_text.encode())
if bytes_sent == 0:
raise IOError("Socket connection down")
self._outgoing_text = self._outgoing_text[bytes_sent:]
except BlockingIOError as e:
pass
def _recv(self):
'''
Receives bytes until the socket cannot receive anymore without blocking
'''
try:
while True:
data = self._socket.recv(4096)
if len(data) == 0:
raise IOError("Socket connection down")
self._incoming_text += data.decode()
except BlockingIOError as e:
pass
self._process_input_buffer()
def update(self):
'''
Attempts to read/write any pending bytes on the nonblocking socket
'''
try:
self._send()
self._recv()
except IOError as e:
self.close()
@property
def connected(self) -> bool:
return self._connected
def close(self):
self._socket.close()
self._connected = False
class JsonTcpServer:
'''
This class listens on the specified port for connections from TCP Clients and creates a
JsonTcpServerConnection class to manage new incoming connections. It spawns a background thread
to asynchronously check for new connections and update the connections themselves
'''
def __init__(self,
port: int = 9998,
new_connection_handler: Callable[[JsonTcpServerConnection], None] = None,
update_callback: Callable[[], None] = None) -> None:
'''
Creates the JsonTcpServer
Args:
port (int): The TCP port to listen for incoming connections on
new_connection_handler (function): A callback function to be called when there is a new
connection
update_callback (function): A callback function to be called after the JsonTcpServer
updates all connection objects
'''
# Create a TCP server for robots to connect to and recieve missions
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.bind(("", port))
self._socket.setblocking(False)
self._socket.listen()
self._new_connection_handler = new_connection_handler
self._update_callback = update_callback
self._connections = []
# Start a thread to listen for connections and pull data from those connections
self._running = True
def thread_function():
self._thread_function()
self._thread = threading.Thread(target=thread_function)
self._thread.daemon = True
self._thread.start()
@property
def connections(self) -> List[JsonTcpServerConnection]:
return self._connections.copy()
def cleanup(self):
self._running = False
self._thread.join()
def _thread_function(self):
# The time when the next update should occur
next_update_callback_time = time.monotonic()
while self._running:
# First, get any pending connections
try:
while True:
connection, address = self._socket.accept()
new_connection = JsonTcpServerConnection(connection)
self._connections.append(new_connection)
if self._new_connection_handler is not None:
self._new_connection_handler(new_connection)
except BlockingIOError:
pass
# Wait until either:
# - One of the connections is ready to read/written to
# - It is time to call the update callback
timeout = max(0, next_update_callback_time - time.monotonic())
readable, writable, exception = select.select(self._connections,
filter(lambda x: x.has_data_to_write,
self._connections),
self._connections, timeout)
# Update all connections
for connection in readable + writable:
connection.update()
# Remove all connections that are closed
self._connections = list(filter(lambda c: c.connected, self._connections))
# Call the update callback function if enough time has elapsed
current_time = time.monotonic()
if current_time >= | |
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
import gc
import os
import sys
import shutil
import pycompss.api.parameter as parameter
from pycompss.api.exceptions import COMPSsException
from pycompss.runtime.task.commons import TaskCommons
from pycompss.runtime.commons import TRACING_HOOK_ENV_VAR
from pycompss.runtime.task.parameter import Parameter
from pycompss.runtime.task.parameter import get_compss_type
from pycompss.runtime.task.arguments import get_name_from_vararg
from pycompss.runtime.task.arguments import get_name_from_kwarg
from pycompss.runtime.task.arguments import is_vararg
from pycompss.runtime.task.arguments import is_kwarg
from pycompss.runtime.task.arguments import is_return
from pycompss.util.exceptions import PyCOMPSsException
from pycompss.util.objects.properties import create_object_by_con_type
from pycompss.util.logger.helpers import swap_logger_name
from pycompss.util.storages.persistent import is_psco
from pycompss.util.serialization.serializer import deserialize_from_file
from pycompss.util.serialization.serializer import serialize_to_file
from pycompss.util.serialization.serializer import serialize_to_file_mpienv
from pycompss.util.std.redirects import std_redirector
from pycompss.util.std.redirects import not_std_redirector
from pycompss.util.objects.util import group_iterable
from pycompss.util.tracing.helpers import emit_event
from pycompss.worker.commons.constants import EXECUTE_USER_CODE_EVENT
from pycompss.worker.commons.worker import build_task_parameter
# The cache is only available currently for piper_worker.py and python >= 3.8
# If supported in the future by another worker, add a common interface
# with these two functions and import the appropriate.
from pycompss.worker.piper.cache.tracker import retrieve_object_from_cache
from pycompss.worker.piper.cache.tracker import insert_object_into_cache_wrapper
from pycompss.worker.piper.cache.tracker import replace_object_into_cache
from pycompss.worker.piper.cache.tracker import in_cache
try:
import numpy as np
except ImportError:
np = None
if __debug__:
import logging
logger = logging.getLogger(__name__)
class TaskWorker(TaskCommons):
"""
Task code for the Worker:
Process the task decorator and prepare call the user function.
"""
__slots__ = ['cache_ids', 'cache_queue', 'cached_references']
def __init__(self,
decorator_arguments,
user_function,
on_failure,
defaults):
# Initialize TaskCommons
super(self.__class__, self).__init__(decorator_arguments,
user_function,
on_failure,
defaults)
# These variables are initialized on call since they are only for
# the worker
self.cache_ids = None
self.cache_queue = None
self.cached_references = []
# placeholder to keep the object references and avoid garbage collector
def call(self, *args, **kwargs):
# type: (tuple, dict) -> (list, list, list, list)
""" Main task code at worker side.
This function deals with task calls in the worker's side
Note that the call to the user function is made by the worker,
not by the user code.
:return: A function that calls the user function with the given
parameters and does the proper serializations and updates
the affected objects.
"""
global logger
# Grab logger from kwargs (shadows outer logger since it is set by
# the worker).
logger = kwargs['compss_logger'] # noqa
with swap_logger_name(logger, __name__):
if __debug__:
logger.debug("Starting @task decorator worker call")
# Redirect stdout/stderr if necessary to show the prints/exceptions
# in the job out/err files
redirect_std = True
if kwargs['compss_log_files']:
# Redirect all stdout and stderr during the user code execution
# to job out and err files.
job_out, job_err = kwargs['compss_log_files']
else:
job_out, job_err = None, None
redirect_std = False
if __debug__:
logger.debug("Redirecting stdout to: " + str(job_out))
logger.debug("Redirecting stderr to: " + str(job_err))
with std_redirector(job_out, job_err) if redirect_std else not_std_redirector(): # noqa: E501
# Update the on_failure attribute (could be defined by @on_failure)
if "on_failure" in self.decorator_arguments:
self.on_failure = self.decorator_arguments["on_failure"]
# if task defines on_failure property the decorator is ignored
kwargs.pop("on_failure", None)
else:
self.on_failure = kwargs.pop("on_failure", "RETRY")
self.defaults = kwargs.pop("defaults", {})
# Pop cache if available
self.cache_ids = kwargs.pop("cache_ids", None)
self.cache_queue = kwargs.pop("cache_queue", None)
if __debug__:
logger.debug("Revealing objects")
# All parameters are in the same args list. At the moment we only know
# the type, the name and the "value" of the parameter. This value may
# be treated to get the actual object (e.g: deserialize it, query the
# database in case of persistent objects, etc.)
self.reveal_objects(args,
kwargs["compss_python_MPI"],
kwargs["compss_collections_layouts"])
if __debug__:
logger.debug("Finished revealing objects")
logger.debug("Building task parameters structures")
# After this line all the objects in arg have a "content" field, now
# we will segregate them in User positional and variadic args
user_args, user_kwargs, ret_params = self.segregate_objects(args)
num_returns = len(ret_params)
if __debug__:
logger.debug("Finished building parameters structures.")
# Self definition (only used when defined in the task)
# Save the self object type and value before executing the task
# (it could be persisted inside if its a persistent object)
self_type = None
self_value = None
has_self = False
if args and not isinstance(args[0], Parameter):
if __debug__:
logger.debug("Detected self parameter")
# Then the first arg is self
has_self = True
self_type = get_compss_type(args[0])
if self_type == parameter.TYPE.EXTERNAL_PSCO:
if __debug__:
logger.debug("\t - Self is a PSCO")
self_value = args[0].getID()
else:
# Since we are checking the type of the deserialized self
# parameter, get_compss_type will return that its type is
# parameter.TYPE.OBJECT, which although it is an object, self
# is always a file for the runtime. So we must force its type
# to avoid that the return message notifies that it has a new
# type "object" which is not supported for python objects in
# the runtime.
self_type = parameter.TYPE.FILE
self_value = 'null'
# Call the user function with all the reconstructed parameters and
# get the return values.
if __debug__:
logger.debug("Invoking user code")
# Now execute the user code
result = self.execute_user_code(user_args,
user_kwargs,
kwargs['compss_tracing'])
user_returns, compss_exception, default_values = result
if __debug__:
logger.debug("Finished user code")
python_mpi = False
if kwargs["compss_python_MPI"]:
python_mpi = True
# Deal with defaults if any
if default_values:
self.manage_defaults(args, default_values)
# Deal with INOUTs and COL_OUTs
self.manage_inouts(args, python_mpi)
# Deal with COMPSsExceptions
if compss_exception is not None:
if __debug__:
logger.warning("Detected COMPSs Exception. Raising.")
raise compss_exception
# Deal with returns (if any)
user_returns = self.manage_returns(num_returns, user_returns,
ret_params, python_mpi)
# Check old targetDirection
if 'targetDirection' in self.decorator_arguments:
target_label = 'targetDirection'
logger.info("Detected deprecated targetDirection. Please, change it to target_direction") # noqa: E501
else:
target_label = 'target_direction'
# We must notify COMPSs when types are updated
new_types, new_values = self.manage_new_types_values(num_returns,
user_returns,
args,
has_self,
target_label,
self_type,
self_value)
# Clean cached references
if self.cached_references:
# Let the garbage collector act
self.cached_references = None
# Release memory after task execution
self.__release_memory__()
if __debug__ and "COMPSS_WORKER_PROFILE_PATH" in os.environ:
self.__report_heap__()
if __debug__:
logger.debug("Finished @task decorator")
return new_types, new_values, self.decorator_arguments[target_label], args # noqa: E501
@staticmethod
def __release_memory__(): # noqa
# type: (...) -> None
""" Release memory after task execution explicitly.
:return: None
"""
# Call garbage collector: The memory may not be freed to the SO,
# although the objects are removed.
gc.collect()
# Then try to deallocate the empty memory.
try:
import ctypes
libc = ctypes.CDLL("libc.so.6")
libc.malloc_trim(0)
except Exception: # noqa
if __debug__:
logger.warning("Could NOT deallocate memory.")
@staticmethod
def __report_heap__(): # noqa
# type: (...) -> None
""" Prints the heap status.
:return: None
"""
if __debug__:
logger.debug("Memory heap report:")
try:
import guppy # noqa
except ImportError:
logger.warning("Could NOT import Guppy.")
else:
hpy = guppy.hpy()
if __debug__:
logger.debug(hpy.heap())
def reveal_objects(self, args, # noqa
python_mpi=False, # noqa
collections_layouts=None): # noqa
# type: (tuple, bool, list) -> None
""" Get the objects from the args message.
This function takes the arguments passed from the persistent worker
and treats them to get the proper parameters for the user function.
:param args: Arguments.
:param python_mpi: If the task is python MPI.
:param collections_layouts: Layouts of collections params for python
MPI tasks.
:return: None
"""
if self.storage_supports_pipelining():
if __debug__:
logger.debug("The storage supports pipelining.")
# Perform the pipelined getByID operation
pscos = [x for x in args if
x.content_type == parameter.TYPE.EXTERNAL_PSCO]
identifiers = [x.content for x in pscos]
from storage.api import getByID # noqa
objects = getByID(*identifiers)
# Just update the Parameter object with its content
for (obj, value) in zip(objects, pscos):
obj.content = value
# Deal with all the parameters that are NOT returns
for arg in [x for x in args if
isinstance(x, Parameter) and not is_return(x.name)]:
self.retrieve_content(arg, "", python_mpi, collections_layouts)
@staticmethod
def storage_supports_pipelining():
# type: () -> bool
""" Check if storage supports pipelining.
Some storage implementations use pipelining
Pipelining means "accumulate the getByID queries and perform them
in a single megaquery".
If this feature is not available (storage does not support it)
| |
<gh_stars>1-10
import os
import json
import logging
import itertools
import hglib
import re
import requests
from datetime import datetime
from django.db import transaction
from django.db import models
from django.utils import timezone
from django.utils.six import with_metaclass
from django.utils import timezone
from django.dispatch import receiver
from django.db.models.signals import pre_save, pre_delete,post_save,post_delete
from django.core.exceptions import ValidationError,ObjectDoesNotExist
from django.core.validators import RegexValidator
from django.conf import settings
from tablemanager.models import Workspace,Publish
from wmsmanager.models import WmsLayer
from borg_utils.borg_config import BorgConfiguration
from borg_utils.resource_status import ResourceStatus,ResourceStatusMixin,ResourceAction
from borg_utils.transaction import TransactionMixin
from borg_utils.signals import refresh_select_choices
from borg_utils.hg_batch_push import try_set_push_owner, try_clear_push_owner, increase_committed_changes, try_push_to_repository
from borg_utils.utils import file_md5
logger = logging.getLogger(__name__)
slug_re = re.compile(r'^[a-z0-9_]+$')
validate_slug = RegexValidator(slug_re, "Slug can only contain lowercase letters, numbers and underscores", "invalid")
SRS_CHOICES = (
("EPSG:4326","EPSG:4326"),
("EPSG:3857","EPSG:3857"),
)
class LayerGroupEmpty(Exception):
pass
class LayerGroup(models.Model,ResourceStatusMixin,TransactionMixin):
name = models.SlugField(max_length=128,null=False,unique=True, help_text="The name of layer group", validators=[validate_slug])
title = models.CharField(max_length=320,null=True,blank=True)
workspace = models.ForeignKey(Workspace, null=False)
srs = models.CharField(max_length=320,null=False,choices=SRS_CHOICES)
abstract = models.TextField(null=True,blank=True)
geoserver_setting = models.TextField(blank=True,null=True,editable=False)
status = models.CharField(max_length=32, null=False, editable=False,choices=ResourceStatus.layer_status_options)
last_publish_time = models.DateTimeField(null=True,editable=False)
last_unpublish_time = models.DateTimeField(null=True,editable=False)
last_modify_time = models.DateTimeField(null=False,editable=False,default=timezone.now)
def clean(self):
#import ipdb;ipdb.set_trace()
self.name = self.name.strip() if self.name and self.name.strip() else None
self.title = self.title.strip() if self.title and self.title.strip() else None
self.abstract = self.abstract.strip() if self.abstract and self.abstract.strip() else None
if not self.name:
raise ValidationError("name is required.")
try:
o = LayerGroup.objects.get(pk=self.pk)
except ObjectDoesNotExist:
o = None
if (o
and o.name == self.name
and o.title == self.title
and o.srs == self.srs
and o.workspace == self.workspace
and o.abstract == self.abstract
and o.geoserver_setting == self.geoserver_setting
):
#not changeed
raise ValidationError("Not changed.")
if o:
self.status = self.next_status(ResourceAction.UPDATE)
else:
self.status = ResourceStatus.New
self.last_modify_time = timezone.now()
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
try:
if self.try_begin_transaction("layergroup_save"):
with transaction.atomic():
super(LayerGroup,self).save(force_insert,force_update,using,update_fields)
else:
super(LayerGroup,self).save(force_insert,force_update,using,update_fields)
finally:
self.try_clear_transaction("layergroup_save")
def delete(self,using=None):
logger.info('Delete {0}:{1}'.format(type(self),self.name))
try:
if self.try_begin_transaction("layergroup_delete"):
with transaction.atomic():
super(LayerGroup,self).delete(using)
else:
super(LayerGroup,self).delete(using)
finally:
self.try_clear_transaction("layergroup_delete")
def check_circular_dependency(self,editing_group_layer=None,parents=None):
"""
check whether it has some cycle dependencies.
"""
if parents:
parents.append(self)
else:
parents = [self]
queryset = LayerGroupLayers.objects.filter(group = self)
if editing_group_layer :
if editing_group_layer.pk:
queryset = queryset.exclude(pk = editing_group_layer.pk)
queryset = itertools.chain(queryset,[editing_group_layer])
for group_layer in queryset:
if group_layer.sub_group:
if group_layer.sub_group in parents:
#cycle dependency found
raise ValidationError("Found a circular dependency:{0}".format("=>".join([g.name for g in parents + [group_layer.sub_group]])))
else:
group_layer.sub_group.check_circular_dependency(None,parents)
def get_inclusions(self,editing_group_layer=None, check_multi_inclusion = True, included_publishs=None, included_layers=None, included_groups=None):
"""
Get all included layers and sub groups.
If in editing mode, editing_group_layer should be the edting layer gorup
If in non editing mode, editing_group_layer should be None.
Return a three elements tuple:
first element is a dictionary between included publishs and its immediately including group;
second element is a dictionary between included layers and its immediately including group;
third element is a dictionary between included groups and its immediately including group;
"""
if not included_publishs:
included_publishs = {}
if not included_layers:
included_layers = {}
if not included_groups:
included_groups = {}
queryset = LayerGroupLayers.objects.filter(group = self)
if editing_group_layer :
if editing_group_layer.pk:
queryset = queryset.exclude(pk = editing_group_layer.pk)
queryset = itertools.chain(queryset,[editing_group_layer])
for group_layer in queryset:
if group_layer.publish:
if check_multi_inclusion and group_layer.publish in included_publishs:
if included_publishs[group_layer.publish].group == group_layer.group:
raise ValidationError("Found multiple inclusion:Publish {0} is already included by {1}".format(group_layer.publish.name,included_publishs[group_layer.publish].group.name))
else:
raise ValidationError("Found multiple inclusion:Publish {0} is included by {1} and {2}".format(group_layer.publish.name,included_publishs[group_layer.publish].group.name,group_layer.group.name))
else:
included_publishs[group_layer.publish] = group_layer
elif group_layer.layer:
if check_multi_inclusion and group_layer.layer in included_layers:
if included_layers[group_layer.layer].group == group_layer.group:
raise ValidationError("Found multiple inclusion:Layer {0} is already included by {1}".format(group_layer.layer.name,included_layers[group_layer.layer].group.name))
else:
raise ValidationError("Found multiple inclusion:Layer {0} is included by {1} and {2}".format(group_layer.layer.name,included_layers[group_layer.layer].group.name,group_layer.group.name))
else:
included_layers[group_layer.layer] = group_layer
elif group_layer.sub_group:
if check_multi_inclusion and group_layer.sub_group in included_groups:
if included_groups[group_layer.sub_group].group == group_layer.group:
raise ValidationError("Found multiple inclusion:sub group {0} is already included by {1}".format(group_layer.sub_group.name,included_groups[group_layer.sub_group].group.name))
else:
raise ValidationError("Found multiple inclusion:sub group {0} is included by {1} and {2}".format(group_layer.sub_group.name,included_groups[group_layer.sub_group].group.name,group_layer.group.name))
else:
included_groups[group_layer.sub_group] = group_layer
sub_inclusion = group_layer.sub_group.get_inclusions(None,check_multi_inclusion,included_publishs,included_layers,included_groups)
included_publishs.update(sub_inclusion[0])
included_layers.update(sub_inclusion[1])
included_groups.update(sub_inclusion[2])
return (included_publishs,included_layers,included_groups)
def json_filename(self,action='publish'):
if action in ['publish','unpublish']:
return os.path.join(self.workspace.publish_channel.name,"layergroups", "{}.{}.json".format(self.workspace.name, self.name))
else:
return os.path.join(self.workspace.publish_channel.name,"layergroups", "{}.{}.{}.json".format(self.workspace.name, self.name,action))
def json_filename_abs(self,action='publish'):
return os.path.join(BorgConfiguration.BORG_STATE_REPOSITORY, self.json_filename(action))
@property
def builtin_metadata(self):
meta_data = {}
meta_data["workspace"] = self.workspace.name
meta_data["name"] = self.name
meta_data["service_type"] = "WMS"
meta_data["service_type_version"] = self.workspace.publish_channel.wms_version
meta_data["title"] = self.title
meta_data["abstract"] = self.abstract
meta_data["modified"] = self.last_modify_time.astimezone(timezone.get_default_timezone()).strftime("%Y-%m-%d %H:%M:%S.%f") if self.last_modify_time else None
meta_data["crs"] = self.srs or None
#ows resource
meta_data["ows_resource"] = {}
if self.workspace.publish_channel.wms_endpoint:
meta_data["ows_resource"]["wms"] = True
meta_data["ows_resource"]["wms_version"] = self.workspace.publish_channel.wms_version
meta_data["ows_resource"]["wms_endpoint"] = self.workspace.publish_channel.wms_endpoint
geo_settings = json.loads(self.geoserver_setting) if self.geoserver_setting else {}
if geo_settings.get("create_cache_layer",False) and self.workspace.publish_channel.gwc_endpoint:
meta_data["ows_resource"]["gwc"] = True
meta_data["ows_resource"]["gwc_endpoint"] = self.workspace.publish_channel.gwc_endpoint
return meta_data
def update_catalogue_service(self, extra_datas=None):
meta_data = self.builtin_metadata
if extra_datas:
meta_data.update(extra_datas)
res = requests.post("{}/catalogue/api/records/".format(settings.CSW_URL),json=meta_data,auth=(settings.CSW_USER,settings.CSW_PASSWORD),verify=settings.CSW_CERT_VERIFY)
if 400 <= res.status_code < 600 and res.content:
res.reason = "{}({})".format(res.reason,res.content)
res.raise_for_status()
try:
meta_data = res.json()
except:
if res.content.find("microsoft") >= 0:
res.status_code = 401
res.reason = "Please login"
else:
res.status_code = 400
res.reason = "Unknown reason"
res.raise_for_status()
#add extra data to meta data
meta_data["workspace"] = self.workspace.name
meta_data["name"] = self.name
meta_data["native_name"] = self.name
meta_data["auth_level"] = self.workspace.auth_level
meta_data["preview_path"] = "{}{}".format(BorgConfiguration.MASTER_PATH_PREFIX, BorgConfiguration.PREVIEW_DIR)
meta_data["spatial_data"] = True
meta_data["channel"] = self.workspace.publish_channel.name
meta_data["sync_geoserver_data"] = self.workspace.publish_channel.sync_geoserver_data
if self.geoserver_setting:
meta_data["geoserver_setting"] = json.loads(self.geoserver_setting)
return meta_data
def unpublish(self):
"""
unpublish layer group
"""
#remove it from catalogue service
res = requests.delete("{}/catalogue/api/records/{}:{}/".format(settings.CSW_URL,self.workspace.name,self.name),auth=(settings.CSW_USER,settings.CSW_PASSWORD),verify=settings.CSW_CERT_VERIFY)
if res.status_code != 404:
res.raise_for_status()
publish_file = self.json_filename_abs('publish')
publish_json = None
if os.path.exists(publish_file):
with open(publish_file,"r") as f:
publish_json = json.loads(f.read())
else:
publish_json = {}
json_file = self.json_filename_abs('unpublish');
json_out = None
try_set_push_owner("layergroup")
hg = None
try:
if publish_json.get("action","publish") != "remove":
json_out = {}
json_out["name"] = self.name
json_out["workspace"] = self.workspace.name
json_out["spatial_data"] = True
json_out["channel"] = self.workspace.publish_channel.name
json_out["sync_geoserver_data"] = self.workspace.publish_channel.sync_geoserver_data
json_out['action'] = "remove"
#retrieve meta data from the last publish task
meta_json = publish_json
if "meta" in publish_json and "file" in publish_json["meta"]:
meta_file = publish_json["meta"]["file"][len(BorgConfiguration.MASTER_PATH_PREFIX):]
if os.path.exists(meta_file):
with open(meta_file,"r") as f:
meta_json = json.loads(f.read())
else:
meta_json = {}
for key in ["name","workspace","channel","spatial_data","sync_geoserver_data"]:
if key in meta_json:
json_out[key] = meta_json[key]
else:
json_out = publish_json
json_out["remove_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f")
#create the dir if required
if not os.path.exists(os.path.dirname(json_file)):
os.makedirs(os.path.dirname(json_file))
with open(json_file, "wb") as output:
json.dump(json_out, output, indent=4)
hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY)
#remove other related json files
json_files = [ self.json_filename_abs(action) for action in [ 'empty_gwc' ] ]
#get all existing files.
json_files = [ f for f in json_files if os.path.exists(f) ]
if json_files:
hg.remove(files=json_files)
json_files.append(json_file)
hg.commit(include=json_files, user="borgcollector",addremove=True, message="Unpublish layer group {}.{}".format(self.workspace.name, self.name))
increase_committed_changes()
try_push_to_repository("layergroup",hg)
finally:
if hg: hg.close()
try_clear_push_owner("layergroup")
def publish(self):
"""
Only publish the member layers which is already published.
"""
json_filename = self.json_filename_abs('publish');
try_set_push_owner("layergroup")
hg = None
try:
json_out = self.update_catalogue_service(extra_datas={"publication_date": datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")})
layers = []
for group_layer in LayerGroupLayers.objects.filter(group=self).order_by("order"):
if group_layer.layer and group_layer.layer.is_published:
layers.append({"type":"wms_layer","name":group_layer.layer.kmi_name,"store":group_layer.layer.server.name,"workspace":group_layer.layer.server.workspace.name})
elif group_layer.publish and group_layer.publish.is_published:
layers.append({"type":"publish","name":group_layer.publish.name,"workspace":group_layer.publish.workspace.name})
elif group_layer.sub_group and group_layer.sub_group.is_published:
layers.append({"type":"group","name":group_layer.sub_group.name,"workspace":group_layer.sub_group.workspace.name})
if not layers:
#layergroup is empty,remove it.
raise LayerGroupEmpty("Layer group can't be empty.")
json_out["layers"] = layers
json_out["srs"] = self.srs or None
json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f")
inclusions = self.get_inclusions()
dependent_groups = []
for group in inclusions[2].keys():
if group.is_published:
dependent_groups.append({"name":group.name,"workspace":group.workspace.name})
json_out["dependent_groups"] = dependent_groups
#create the dir if required
if not os.path.exists(os.path.dirname(json_filename)):
os.makedirs(os.path.dirname(json_filename))
with open(json_filename, "wb") as output:
json.dump(json_out, output, indent=4)
hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY)
#remove other related json files
json_files = [ self.json_filename_abs(action) for action in [ 'empty_gwc' ] ]
#get all existing files.
json_files = [ f for f in json_files if os.path.exists(f) ]
if json_files:
hg.remove(files=json_files)
json_files.append(json_filename)
hg.commit(include=json_files, user="borgcollector",addremove=True, message="Update layer group {}.{}".format(self.workspace.name, self.name))
increase_committed_changes()
try_push_to_repository("layergroup",hg)
finally:
if hg: hg.close()
try_clear_push_owner("layergroup")
def empty_gwc(self):
"""
update layer group's json for empty gwc to the repository
"""
if self.publish_status.unpublished:
#layer is not published, no need to empty gwc
raise ValidationError("The layergroup({0}) is not published before.".format(self.name))
json_filename = self.json_filename_abs('empty_gwc');
try_set_push_owner("layergroup")
hg = None
try:
json_out = {}
json_out["name"] = self.name
json_out["workspace"] = self.workspace.name
json_out["action"] = "empty_gwc"
json_out["publish_time"] = timezone.localtime(timezone.now()).strftime("%Y-%m-%d %H:%M:%S.%f")
if self.geoserver_setting:
json_out["geoserver_setting"] = json.loads(self.geoserver_setting)
#create the dir if required
if not os.path.exists(os.path.dirname(json_filename)):
os.makedirs(os.path.dirname(json_filename))
with open(json_filename, "wb") as output:
json.dump(json_out, output, indent=4)
hg = hglib.open(BorgConfiguration.BORG_STATE_REPOSITORY)
hg.commit(include=[json_filename],addremove=True, user="borgcollector", message="Empty GWC of layer group {}.{}".format(self.workspace.name, self.name))
increase_committed_changes()
try_push_to_repository("layergroup",hg)
finally:
if hg: hg.close()
try_clear_push_owner("layergroup")
def __str__(self):
return self.name
class Meta:
ordering = ["workspace","name"]
class LayerGroupLayers(models.Model,TransactionMixin):
group = models.ForeignKey(LayerGroup,null=False,blank=False,related_name="group_layer")
layer = models.ForeignKey(WmsLayer,null=True,blank=False)
publish = models.ForeignKey(Publish,null=True,blank=True,editable=False)
sub_group = models.ForeignKey(LayerGroup,null=True,blank=True,related_name="subgroup_layer",editable=False)
order = models.PositiveIntegerField(null=False,blank=False)
def clean(self):
if not self.group_id:
raise ValidationError("group is required")
self.publish = None
self.sub_group = None
if (
(self.layer is None and self.sub_group is None and self.publish is None)
or (self.layer and self.sub_group and self.publish)
):
#currently publish and sub_group are disabled
raise ValidationError("Layer required")
| |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bitget(Exchange):
def describe(self):
return self.deep_extend(super(bitget, self).describe(), {
'id': 'bitget',
'name': 'Bitget',
'countries': ['SG'],
'version': 'v3',
'rateLimit': 1000, # up to 3000 requests per 5 minutes ≈ 600 requests per minute ≈ 10 requests per second ≈ 100 ms
'has': {
'cancelOrder': True,
'cancelOrders': True,
'CORS': None,
'createOrder': True,
'fetchAccounts': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchWithdrawals': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1d',
'1w': '1w',
},
'hostname': 'bitget.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/88317935-a8a21c80-cd22-11ea-8e2b-4b9fac5975eb.jpg',
'api': {
'data': 'https://api.{hostname}',
'api': 'https://api.{hostname}',
'capi': 'https://capi.{hostname}',
'swap': 'https://capi.{hostname}',
},
'www': 'https://www.bitget.com',
'doc': [
'https://bitgetlimited.github.io/apidoc/en/swap',
'https://bitgetlimited.github.io/apidoc/en/spot',
],
'fees': 'https://www.bitget.cc/zh-CN/rate?tab=1',
'test': {
'rest': 'https://testnet.bitget.com',
},
'referral': 'https://www.bitget.com/expressly?languageType=0&channelCode=ccxt&vipCode=tg9j',
},
'api': {
'data': {
'get': [
'market/history/kline', # Kline data
'market/detail/merged', # Get aggregated ticker
'market/tickers', # Get all trading tickers
'market/allticker', # Get all trading market method 2
'market/depth', # Get Market Depth Data
'market/trade', # Get Trade Detail Data
'market/history/trade', # Get record of trading
'market/detail', # Get Market Detail 24h Volume
'common/symbols', # Query all trading pairs and accuracy supported in the station
'common/currencys', # Query all currencies supported in the station
'common/timestamp', # Query system current time
],
},
'api': {
'get': [
'account/accounts', # Get all accounts of current user(即account_id)。
'accounts/{account_id}/balance', # Get the balance of the specified account
'order/orders', # Query order, deprecated
'order/orders/openOrders',
'order/orders/history',
'order/deposit_withdraw', # Query assets history
],
'post': [
'order/orders/place', # Place order
'order/orders/{order_id}/submitcancel', # Request to cancel an order request
'order/orders/batchcancel', # Bulk order cancellation
'order/orders/{order_id}', # Query an order details
'order/orders/{order_id}/matchresults', # Query the transaction details of an order
'order/matchresults', # Query current order, order history
],
},
'capi': {
'get': [
'market/time',
'market/contracts',
'market/depth',
'market/tickers',
'market/ticker',
'market/trades',
'market/candles',
'market/index',
'market/open_count',
'market/open_interest',
'market/price_limit',
'market/funding_time',
'market/mark_price',
'market/open_count',
'market/historyFundRate',
],
},
'swap': {
'get': [
'account/accounts',
'account/account',
'account/settings',
'position/allPosition',
'position/singlePosition',
'position/holds',
'order/detail',
'order/orders',
'order/fills',
'order/current',
'order/currentPlan', # conditional
'order/history',
'order/historyPlan', # conditional
'trace/closeTrack',
'trace/currentTrack',
'trace/historyTrack',
'trace/summary',
'trace/profitSettleTokenIdGroup',
'trace/profitDateGroupList',
'trace/profitDateList',
'trace/waitProfitDateList',
],
'post': [
'account/leverage',
'account/adjustMargin',
'account/modifyAutoAppendMargin',
'order/placeOrder',
'order/batchOrders',
'order/cancel_order',
'order/cancel_batch_orders',
'order/plan_order',
'order/cancel_plan',
'position/changeHoldModel',
'trace/closeTrackOrder',
],
},
},
'fees': {
'spot': {
'taker': self.parse_number('0.002'),
'maker': self.parse_number('0.002'),
},
'swap': {
'taker': self.parse_number('0.0006'),
'maker': self.parse_number('0.0004'),
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request — Invalid request format
# 401 Unauthorized — Invalid API Key
# 403 Forbidden — You do not have access to the requested resource
# 404 Not Found
# 500 Internal Server Error — We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
# '0': ExchangeError, # 200 successful,when the order placement / cancellation / operation is successful
'4001': ExchangeError, # no data received in 30s
'4002': ExchangeError, # Buffer full. cannot write data
# --------------------------------------------------------
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeError, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not supported in self website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# '30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30038': OnMaintenance, # {"client_oid":"","code":"30038","error_code":"30038","error_message":"Matching engine is being upgraded. Please try in about 1 minute.","message":"Matching engine is being upgraded. Please try in about 1 minute.","order_id":"-1","result":false}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please | |
<filename>tensorflow/python/eager/def_function_xla_jit_test.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tests import xla_test
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class DefFunctionTest(xla_test.XLATestCase):
def testAutoclusteringWithTfFunction(self):
if 'tpu' in self.device.lower():
self.skipTest('Autoclustering does not run on TPU')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=False)
def outer(a, b, c):
return a * inner(b, c) + c
@def_function.function(experimental_compile=True)
def inner(b, c):
return b + c * b
i1 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i2 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
i3 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0])
with context.collect_graphs(optimized=True) as graphs:
outer(i1, i2, i3)
if test_util.is_xla_enabled():
self.assertIn('_XlaRun', [n.op for n in graphs[0].node])
else:
self.assertNotIn('_XlaRun', [n.op for n in graphs[0].node])
def testBasic(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x, a):
return x + a
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([2, 3, 3, 4, 4], func(inputs, 1))
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
self.assertAllClose([2, 3, 3, 4, 4], xla_func(inputs, 1))
def testBasicInt32(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def fn(x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3], dtype=dtypes.int32)
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
self.assertAllClose([2, 3, 3, 4, 4], fn(inputs, 1))
def testDerivative(self):
with ops.device('device:{}:0'.format(self.device)):
if test.is_built_with_rocm():
return
def fn(x, a):
return 2 * x + a
xla_func = def_function.function(fn, experimental_compile=True)
with backprop.GradientTape() as tape:
inputs = constant_op.constant([1., 2., 2., 3., 3.])
tape.watch(inputs)
outputs = xla_func(inputs, 1)
self.assertAllClose([2, 2, 2, 2, 2], tape.gradient(outputs, inputs))
# pylint: disable=protected-access
(forward, backward) = xla_func.get_concrete_function(
inputs, 1)._delayed_rewrite_functions.forward_backward()
# Check that the must-compile attribute gets correctly propagated to the
# created derivatives.
self.assertTrue(backward.function_def.attr['_XlaMustCompile'])
self.assertTrue(forward.definition.attr['_XlaMustCompile'])
# Calling function with experimental_compile=True from
# experimental_compile=False should compile the inner func.
def testNestedCall(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162800687: Inner function runs on host')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def fn(x, a):
return x + a
@def_function.function(experimental_compile=False)
def fn2(x, a):
return fn(x, a)
inputs = constant_op.constant([1, 2, 2, 3, 3])
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
self.assertAllClose([2, 3, 3, 4, 4], fn2(inputs, 1))
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns'
' wrong status type')
def testNestedCallUnsupportedOps(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return array_ops.unique(x).y
xla_func = def_function.function(fn, experimental_compile=True)
def fn2(x):
return xla_func(x)
func = def_function.function(fn2, experimental_compile=False)
inputs = constant_op.constant([1, 2, 2, 3, 3])
if not test.is_built_with_rocm():
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not compilable'):
func(inputs)
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns'
' wrong status type')
def testUnsupportedOps(self):
with ops.device('device:{}:0'.format(self.device)):
def fn(x):
return array_ops.unique(x).y # Unique is not supported by XLA
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
inputs = constant_op.constant([1, 2, 2, 3, 3])
self.assertAllClose([1, 2, 3], func(inputs))
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not compilable'):
xla_func(inputs)
def testFunctionGradient(self):
with ops.device('device:{}:0'.format(self.device)):
v = resource_variable_ops.ResourceVariable(2.0)
def fn(x):
return v * x
func = def_function.function(fn, experimental_compile=False)
xla_func = def_function.function(fn, experimental_compile=True)
def run_and_check(test_func):
x = constant_op.constant(3.0)
with backprop.GradientTape() as tape:
y = test_func(x)
dy = tape.gradient(y, v)
self.assertAllClose(6.0, y)
self.assertAllClose(3.0, dy)
run_and_check(func)
if not test.is_built_with_rocm():
# XLA support is not yet enabled for TF ROCm
run_and_check(xla_func)
@test_util.disable_mlir_bridge('TODO(b/162521846): MLIR bridge fails'
' msan, function library not found')
def testControlFlow(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(x):
assert control_flow_util.GraphOrParentsInXlaContext(
ops.get_default_graph())
x = ops.convert_to_tensor(x)
def body(i, a):
return i + 1, control_flow_ops.cond(i > 2, lambda: a + (x**2),
lambda: a + 3)
return control_flow_ops.while_loop(
lambda i, *_: i < 10,
body, (constant_op.constant(0), constant_op.constant(3.)),
maximum_iterations=10)[1]
@def_function.function(experimental_compile=True)
def g(x):
x = ops.convert_to_tensor(x)
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return y, tape.gradient(y, x)
self.assertAllClose(40.0, f(2.0))
self.assertAllClose([40.0, 28.0], g(2.0))
def testMethodCompilation(self):
if test.is_built_with_rocm():
return
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(experimental_compile=True)
def f1(self, x, a):
return x + a
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
self.assertAllClose([2, 3, 3, 4, 4], c.f1(inputs, 1))
@test_util.disable_mlir_bridge('TODO(b/162272821): MLIR bridge returns '
' wrong status type')
def testMethodCompilationUnsupportedFunc(self):
if test.is_built_with_rocm():
return
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(experimental_compile=True)
def f1(self, x):
return array_ops.unique(x).y
inputs = constant_op.constant([1, 2, 2, 3, 3])
c = C()
with self.assertRaisesRegex(errors.InvalidArgumentError,
'not compilable'):
c.f1(inputs)
def testMustBeConstantPropagation(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162799319: Cannot resolve constant on TPU')
with ops.device('device:{}:0'.format(self.device)):
if test.is_built_with_rocm():
return
@def_function.function(experimental_compile=True)
def f():
return constant_op.constant([0, 2, 1], dtype=dtypes.int32)
@def_function.function(experimental_compile=True)
def g(a, b):
return array_ops.transpose(a, b)
@def_function.function
def z():
return g(array_ops.ones([3, 4, 3], dtype=dtypes.float32), f())
z()
@test_util.disable_mlir_bridge('TODO(b/162271237): argmax gives different'
' results in MLIR-based bridge')
def testArgMinMax(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def argmax(x):
return math_ops.argmax(x)
@def_function.function(experimental_compile=True)
def argmin(x):
return math_ops.argmin(x)
self.assertAllClose(0, argmax(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmax(array_ops.ones([10])))
self.assertAllClose(0, argmin(array_ops.ones([10], dtype=dtypes.float32)))
self.assertAllClose(0, argmin(array_ops.ones([10])))
@test_util.disable_mlir_bridge('TensorArray support not implemented')
def testErrorMessagePassingTensorArray(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=1, element_shape=[])
ta = ta.write(0, 2 * x)
y = ta.read(0)
return y
x = constant_op.constant(3.14)
with backprop.GradientTape() as tape:
tape.watch(x)
with self.assertRaisesRegex(errors.UnimplementedError,
'TensorList crossing the XLA/TF boundary'):
y = f(x)
tape.gradient(y, x)
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(experimental_compile=True)(f)
inputs = constant_op.constant([3.14, 2.68, 7.69])
self.assertAllClose([6.28, 5.36, 15.38, 9.42, 8.04, 23.07], f(inputs))
self.assertAllClose(compiled_f(inputs), f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Multidim(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3, 2])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(experimental_compile=True)(f)
inputs = constant_op.constant([[3.14, 21.1], [2.68, 22.2], [7.69, 23.3]])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatV2Scalars(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[1])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
compiled_f = def_function.function(experimental_compile=True)(f)
inputs = constant_op.constant([3.14])
self.assertAllClose(f(inputs), compiled_f(inputs))
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGrad(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
return tape.gradient(y, x)
compiled_g = def_function.function(experimental_compile=True)(g)
self.assertAllClose([5.0, 5.0, 5.0], g())
self.assertAllClose(compiled_g(), g())
@test_util.disable_mlir_bridge('TODO(b/162281863): MLIR bridge errors out'
' lowering TensorListConcatV2')
def testTensorListConcatGradNestedCompile(self):
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(x):
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=2, element_shape=[3])
ta = ta.write(0, 2 * x)
ta = ta.write(1, 3 * x)
return ta.concat()
@def_function.function(experimental_compile=True)
def g():
x = constant_op.constant([3.14, 2.68, 7.69])
with backprop.GradientTape() as tape:
tape.watch(x)
y = f(x)
out = tape.gradient(y, x)
return out
self.assertAllClose([5.0, 5.0, 5.0], g())
def testCumsum(self):
if 'tpu' in self.device.lower():
self.skipTest('b/162771302: 64bit rewrite of cumsum not supported')
with ops.device('device:{}:0'.format(self.device)):
@def_function.function(experimental_compile=True)
def f(x):
return math_ops.cumsum(x)
f64_input = constant_op.constant([1.1, 2.2, 3.3], dtype=dtypes.float64)
self.assertAllClose([1.1, 3.3, 6.6], f(f64_input))
def testNoExcessiveRetracing(self):
with ops.device('device:{}:0'.format(self.device)):
inner_retracings = 0
@def_function.function(experimental_compile=True)
def inner(a, b):
nonlocal inner_retracings
inner_retracings += 1
return a * b + a
def outer(a, b):
return inner(a, b)
func_input = random_ops.random_normal([10, 10])
for _ in range(2):
def_function.function(outer)(func_input, func_input)
self.assertEqual(inner_retracings, 1)
def testUpdateVariable(self):
with ops.device('device:{}:0'.format(self.device)):
on_gpu = 'gpu' in self.device.lower()
v = variables.Variable([3.1, 3.2])
@def_function.function(experimental_compile=True)
def update_var(a, b):
v.assign_add(a * b)
arg1 = random_ops.random_normal([2])
arg2 = random_ops.random_normal([2])
initial_usage = context.context().get_total_memory_usage(
v.device) if on_gpu else 0
update_var(arg1, arg2)
final_usage = context.context().get_total_memory_usage(
v.device) if on_gpu else 0
self.assertEqual(initial_usage, final_usage)
@test_util.disable_mlir_bridge('TODO(b/162381930): MLIR bridge renames '
' functions')
def testUpdateVariableInClass(self):
with ops.device('device:{}:0'.format(self.device)):
class C(object):
@def_function.function(experimental_compile=True)
def update_var(self, a, b):
if not hasattr(self, | |
<reponame>hboshnak/python_toolbox
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
import operator
import heapq
import itertools
import numbers
import collections
import functools
import copy
from python_toolbox import misc_tools
from python_toolbox import math_tools
from python_toolbox.third_party.decorator import decorator
from .lazy_tuple import LazyTuple
from .ordered_dict import OrderedDict
from .various_ordered_sets import FrozenOrderedSet
from .various_frozen_dicts import FrozenDict, FrozenOrderedDict
from .abstract import Ordered
class _NO_DEFAULT(misc_tools.NonInstantiable):
'''Stand-in value used in `_BaseBagMixin.pop` when no default is wanted.'''
class _ZeroCountAttempted(Exception):
'''
An attempt was made to add a value with a count of zero to a bag.
This exception is used only internally for flow control; it'll be caught
internally and the zero item would be silently removed.
'''
def _count_elements_slow(mapping, iterable):
'''Put elements from `iterable` into `mapping`.'''
mapping_get = mapping.get
for element in iterable:
mapping[element] = mapping_get(element, 0) + 1
try:
from _collections import _count_elements
except ImportError:
_count_elements = _count_elements_slow
def _process_count(count):
'''Process a count of an item to ensure it's a positive `int`.'''
if not math_tools.is_integer(count):
raise TypeError(
f'You passed {repr(count)} as a count, while a `Bag` can only '
f'handle integer counts.'
)
if count < 0:
raise TypeError(
f"You passed {repr(count)} as a count, while `Bag` doesn't support"
f"negative amounts."
)
if count == 0:
raise _ZeroCountAttempted
return int(count)
class _BootstrappedCachedProperty(misc_tools.OwnNameDiscoveringDescriptor):
'''
A property that is calculated only once for an object, and then cached.
This is redefined here in `bagging.py`, in addition to having it defined in
`python_toolbox.caching`, because we can't import the canonical
`CachedProperty` from there because of an import loop.
Usage:
class MyObject:
# ... Regular definitions here
def _get_personality(self):
print('Calculating personality...')
time.sleep(5) # Time consuming process that creates personality
return 'Nice person'
personality = _BootstrappedCachedProperty(_get_personality)
You can also put in a value as the first argument if you'd like to have it
returned instead of using a getter. (It can be a tobag static value like
`0`). If this value happens to be a callable but you'd still like it to be
used as a static value, use `force_value_not_getter=True`.
'''
def __init__(self, getter_or_value, doc=None, name=None,
force_value_not_getter=False):
'''
Construct the cached property.
`getter_or_value` may be either a function that takes the parent object
and returns the value of the property, or the value of the property
itself, (as long as it's not a callable.)
You may optionally pass in the name that this property has in the
class; this will save a bit of processing later.
'''
misc_tools.OwnNameDiscoveringDescriptor.__init__(self, name=name)
if callable(getter_or_value) and not force_value_not_getter:
self.getter = getter_or_value
else:
self.getter = lambda thing: getter_or_value
self.__doc__ = doc or getattr(self.getter, '__doc__', None)
def __get__(self, obj, our_type=None):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = self.getter(obj)
setattr(obj, self.get_our_name(obj, our_type=our_type), value)
return value
def __call__(self, method_function):
'''
Decorate method to use value of `CachedProperty` as a context manager.
'''
def inner(same_method_function, self_obj, *args, **kwargs):
with getattr(self_obj, self.get_our_name(self_obj)):
return method_function(self_obj, *args, **kwargs)
return decorator(inner, method_function)
def __repr__(self):
return f'<{type(self).__name__}: {self.our_name or self.getter}>'
class _BaseBagMixin:
'''
Mixin for `FrozenBag` and `FrozenOrderedBag`.
Most of the bag functionality is implemented here, with a few finishing
touches in the classes that inherit from this. This mixin is used both for
ordered, unordered, frozen and mutable bags, so only the methods that are
general to all of them are implemented here.
'''
def __init__(self, iterable={}):
super().__init__()
if isinstance(iterable, collections.abc.Mapping):
for key, value, in iterable.items():
try:
self._dict[key] = _process_count(value)
except _ZeroCountAttempted:
continue
else:
_count_elements(self._dict, iterable)
__getitem__ = lambda self, key: self._dict.get(key, 0)
def most_common(self, n=None):
'''
List the `n` most common elements and their counts, sorted.
Results are sorted from the most common to the least. If `n is None`,
then list all element counts.
>>> Bag('abcdeabcdabcaba').most_common(3)
(('a', 5), ('b', 4), ('c', 3))
'''
if n is None:
return tuple(sorted(self.items(), key=operator.itemgetter(1),
reverse=True))
return tuple(heapq.nlargest(n, self.items(),
key=operator.itemgetter(1)))
@property
def elements(self):
'''
Iterate over elements repeating each as many times as its count.
>>> c = Bag('ABCABC')
>>> tuple(c.elements)
('A', 'B', 'A', 'B', 'C', 'C')
'''
return itertools.chain.from_iterable(
itertools.starmap(itertools.repeat, self.items())
)
def __contains__(self, item):
return (self[item] >= 1)
n_elements = property(
lambda self: sum(self.values()),
doc='''Number of total elements in the bag.'''
)
@property
def frozen_bag_bag(self):
'''
A `FrozenBagBag` of this bag.
This means, a bag where `3: 4` means "The original bag has 4 different
keys with a value of 3."
Example:
>>> bag = Bag('abracadabra')
>>> bag
Bag({'b': 2, 'r': 2, 'a': 5, 'd': 1, 'c': 1})
>>> bag.frozen_bag_bag
FrozenBagBag({1: 2, 2: 2, 5: 1})
'''
from .frozen_bag_bag import FrozenBagBag
return FrozenBagBag(self.values())
def __or__(self, other):
'''
Make a union bag of these two bags.
The new bag will have, for each key, the higher of the two amounts for
that key in the two original bags.
Example:
>>> Bag('abbb') | Bag('bcc')
Bag({'b': 3, 'c': 2, 'a': 1})
'''
if not isinstance(other, _BaseBagMixin):
return NotImplemented
return type(self)(self._dict_type(
(key, max(self[key], other[key]))
for key in FrozenOrderedSet(self) | FrozenOrderedSet(other))
)
def __and__(self, other):
'''
Make an intersection bag of these two bags.
The new bag will have, for each key, the lower of the two amounts for
that key in the two original bags.
Example:
>>> Bag('abbb') & Bag('bcc')
Bag({'b': 1,})
'''
if not isinstance(other, _BaseBagMixin):
return NotImplemented
return type(self)(self._dict_type(
(key, min(self[key], other[key]))
for key in FrozenOrderedSet(self) & FrozenOrderedSet(other))
)
def __add__(self, other):
'''
Make a sum bag of these two bags.
The new bag will have, for each key, the sum of the two amounts for
that key in each of the two original bags.
Example:
>>> Bag('abbb') + Bag('bcc')
Bag({'b': 4, 'c': 2, 'a': 1})
'''
if not isinstance(other, _BaseBagMixin):
return NotImplemented
return type(self)(self._dict_type(
(key, self[key] + other[key])
for key in FrozenOrderedSet(self) | FrozenOrderedSet(other))
)
def __sub__(self, other):
'''
Get the subtraction of one bag from another.
This creates a new bag which has the items of the first bag minus the
items of the second one. Negative counts are truncated to zero: If
there are any items in the second bag that are more than the items in
the first bag, the result for that key will simply be zero rather than
a negative amount.
'''
if not isinstance(other, _BaseBagMixin):
return NotImplemented
return type(self)(self._dict_type(
(key, max(self[key] - other[key], 0)) for key in self)
)
def __mul__(self, other):
'''Get a new bag that has all counts multiplied by the integer `other`.'''
if not math_tools.is_integer(other):
return NotImplemented
return type(self)(self._dict_type((key, count * other) for
key, count in self.items()))
__rmul__ = lambda self, other: self * other
def __floordiv__(self, other):
'''
Do a floor-division `self // other`.
`other` can be either an integer or a bag.
If `other` is an integer, the result will be the biggest bag possible
so that `result * other <= self`.
If `other` is a bag, the result will be the maximum number of times you
can put `other` inside of `self` without having it surpass `self` for
any key. (Or in other words, the biggest integer possible so that
`result * other <= self`.)
'''
if math_tools.is_integer(other):
return (
type(self)(self._dict_type((key, count // other) for
key, count in self.items()))
)
elif isinstance(other, _BaseBagMixin):
for key in other:
if key not in self:
assert other[key] >= 1
return 0
division_results = []
for key in self:
if other[key] >= 1:
division_results.append(self[key] // other[key])
if division_results:
return min(division_results)
else:
raise ZeroDivisionError
else:
return NotImplemented
def __mod__(self, other):
'''
Do a modulo `self % other`.
`other` can be either an integer or a bag.
If `other` is an integer, the result will be a bag with `% other` done
on the count of every item from `self`. Or you can also think of it as
`self - (self // other)`, which happens to be the same bag.
If `other` is a bag, the result will be the bag that's left when you
subtract as many copies of `other` from this bag, until you can't
subtract without truncating some keys. Or in other words, it's `self -
| |
bl_info = {
"name": "export",
"description": "Export to N64 Display List",
"author": "WadeMalone",
"version": (1, 0, 1),
"blender": (2, 70, 0),
"warning": "",
"location": "View3D > Tool",
"wiki_url": "",
"tracker_url": "",
"support": "COMMUNITY",
"category": "N64 Model Editing" }
import bpy
import os
import random
import string
import math
import bpy
import mathutils
import copy
from .gbicom import TEXTURE_4B_TLUT, PRE_BUILT_SETTINGS, GBI_Xport64
class Obj_Properties_Xport64(bpy.types.PropertyGroup):
bl_label = "Xport64 Object Export Properties"
totalUVValues = [[[]]] #This stores the UV values actually used in the VTXlist. It is used for comparison in the polylist
class exportTest(bpy.types.PropertyGroup):
bl_label = "Export Class"
# **********************************************************************************************************************************************
# 1 ***************************************************************** EXPORT VERT ****************************************************************
# **********************************************************************************************************************************************
class VTX_Xport64(bpy.types.Operator):
bl_idname = "export.xport64"
bl_label = "Xport64 - Display List"
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: Many of the most used variables in VTX_Xport64 are defined in __init__.py
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: exportVert sorts through the vertices of the current object and tries to reduce the
# number of vertices required to render the model on the N64.
#---------------------------------------------------------------------------------------------------------
def exportVert(self, o, obj, objCounter):
print("Exporting Vertices from export.py -> exportVert(self, o, obj, objCounter):")
bitshift = 6 # Bitshift mod
loadlim = 29 # Amount of verts the system will load at a time 32 max limit
exportPolyList = True
DEBUG = obj.obj_props.debug_DL #NOTE: User can debug script and look for problem areas
SHOWTIPS = obj.obj_props.guide_DL #NOTE: User can view tips and guidance on how to utilize this tool
#TEST Exporting C files
path = 'Users\micha\Documents\Game Design'
filename = path + obj.name + '.c'
name = self.clean_name(obj.name)
self.uvValue.append([]) #Set up for each new object [object][vertex][u,v]
self.usedUVValues.append([]) ##Set up for each new object [object][vertex][u,v]
self.usedUVValues[objCounter].append([])
self.allVertexColors.append([]) #Set up for each new object [object][vertex][r,g,b]
self.materials.append([]) #object number objCounter for material checks
self.definitionsFile.append([])
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: clean_name removes invalid characters and sets name of file that will be exported
#---------------------------------------------------------------------------------------------------------
#ANIMATION Test for animations in scene...
scene = bpy.context.scene
sceneprops = scene.scene_props
frame_current = scene.frame_current
r = range(scene.frame_start, scene.frame_end + 1)
l = len(str(len(r)))
#TO_REMOVE#obs = []
# bones["upper_arm.R"].name
#print("Name of bone... %s" % obj.modifiers["Armature"].name)
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: Export any #include files at the top of the file. Currently, up to 10 can be included.
#---------------------------------------------------------------------------------------------------------
o.write("\n//>-- OBJECT %i: START ------------------------------------------ %s -------------------------------------------------\n\n" % (objCounter, name))
if sceneprops.create_header_file == True: #NOTE ----- Save these commands in a string for later use in .h file for defintions and declarations:
self.definitionsFile[self.commandCount] = ("\n//>-- OBJECT %i: START ------------------------------------------ %s -------------------------------------------------\n\n" % (objCounter, name))
self.definitionsFile.append([])
self.commandCount +=1
for f in r:
if obj.obj_props.anim_Method == "VTX DL":
scene.frame_set(f)
scene.update()
mod_obj = obj
mod_mesh = mod_obj.to_mesh(scene, True, 'RENDER') # creates duplicated mesh used for animation capture
if obj.obj_props.anim_Method == "VTX DL":
vert = mod_mesh.vertices
else:
vert = obj.data.vertices
poly = obj.data.polygons
uv = obj.data.uv_layers.active
vertexList = []
redirectPointer = 0
# Choose vertex---[number][redirect]
redirectValue = []
uvCount = 0
index = 0
redirect = 0
count = 0
allPrimColors = [[]]
primColorCount = 0
mesh = obj.data
counter = 0
#---------------------------------------------------------------------------------------------------------
#VER_NOTES: 'for face in mesh.polygons:' No longer used, this will be removed provided it causes no errors
#---------------------------------------------------------------------------------------------------------
for face in mesh.polygons:
material = face.material_index
polygon = face.index
self.materials[objCounter].append([])
self.materials[objCounter][counter].append(polygon) #polygon ID
self.materials[objCounter][counter].append(material) #material slot
#if self.debugCheck == True:
#o.write("face: %i material index: %i \n" %(self.materials[objCounter][counter][0], self.materials[objCounter][counter][1]))
counter +=1
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: Check for polygons w/ 4 or more vertices. If found, split them into tris
#---------------------------------------------------------------------------------------------------------
for p in poly:
if len(p.vertices) > 3:
o.write("//WARNING: Triangulate before exporting. \n")
break
#---------------------------------------------------------------------------------------------------------
#VER_NOTES: 'allPrimColors[0]' No longer needed, this will be removed provided it causes no errors.
# Still needs to be removed from collision exporter first.
#---------------------------------------------------------------------------------------------------------
#Set up List for storing Primative Colors and assign allPrimColors[0] as 255,255,255
allPrimColors[0].append(255.0)
allPrimColors[0].append(255.0)
allPrimColors[0].append(255.0)
allPrimColors.append([])
primColorCount += 1
if obj.obj_props.debug_DL == True: #DEBUG CHECK VALUES:---
o.write("//Test Primative Color Storage = %i, %i, %i \n" % (allPrimColors[0][0], allPrimColors[0][1], allPrimColors[0][2]))
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: 'if obj.obj_props.sort_Method == "COLLISION":'
# If object is tagged as a collision object, begin collider object structure.
#---------------------------------------------------------------------------------------------------------
if obj.obj_props.sort_Method == "COLLISION":
o.write("MeshCollider %s_MColVtx_%i [] = {\n" % (name, f))
else:
o.write("Vtx %s_VertList_%i [] = {\n" % (name, f))
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: 'sceneprops.create_header_file == True:' If user selects to export definitions / declarations to a header file:
#---------------------------------------------------------------------------------------------------------
if sceneprops.create_header_file == True: #NOTE ----- Save these commands in a string for later use in .h file for defintions and declarations:
if obj.obj_props.sort_Method == "COLLISION":
self.definitionsFile[self.commandCount] = ("extern MeshCollider %s_MColVtx_%i [];\n" % (name, f))
else:
self.definitionsFile[self.commandCount] = ("extern Vtx %s_VertList_%i [];\n" % (name, f))
self.definitionsFile.append([])
self.commandCount +=1
print("\n\n\n-------- Output start frame: %i Start Frame: %i End Frame: %i --------\n\n\n" % (f, scene.frame_start, scene.frame_end))
allVertIndex = 0
redirectPointer = 0
count = 0
for face in poly:
currentActiveVertex = [0,0,0]
currentActiveLoop = [0,0,0]
redirectValueCheck = [0,0,0] #checks verts of each polygon. If attempting to re-direct but the difference between the highest and lower polygon is too great, create a new vertex listing
vertexListCheck = [0,0,0] #set as true or false whether this is a duplicate
vertCheck = 0
redirectDifference = 0
tempList = [0,0,0]
offsetDuplicates = 0
debugCounter = 0
matTextureSize = [0,0]
objMaterials = obj.data.materials
materialCounter = 0
materialCounter = face.material_index
activeMat = objMaterials[materialCounter]
matProperties = objMaterials[materialCounter].mat_props
#ERROR CHECK ----- to see if the current active material has nodes, which is a requirement for exporting:
checkForMaterial = getattr(obj, "active_material", False)
checkForNodes = getattr(checkForMaterial, "node_tree", False)
if checkForNodes != None: # make sure object has nodes enabled
print(checkForNodes)
else:
#print("Make sure all objects contain a material.")
print("Object %s does not have a material assigned!" % obj.name)
self.report({'WARNING'}, "Xport64 WARNING ----- Material '%s' does not have Nodes enabled and could not be exported. " % checkForMaterial.name)
self.report({'WARNING'}, "Xport64 TIP ----- To enable nodes on Material '%s', open the Material tab in the properties window. Select '%s', expand 'Surface' and press the 'Use Noes' button. " % (checkForMaterial.name, checkForMaterial.name))
o.write("//Xport64 WARNING ----- Material '%s' does not have Nodes enabled and could not be exported. " % checkForMaterial.name)
return {'FINISHED'}
# checkForMaterial = getattr(obj, "active_material", False)
# if checkForMaterial != None: #Make sure object has a material assigned
# activeMat = obj.active_material
# print(checkForMaterial)
# checkForNodes = getattr(activeMat, "node_tree", False)
# if checkForNodes != None: # make sure object has nodes enabled
# print(checkForNodes)
if 'Image Texture' in activeMat.node_tree.nodes:
textureImage = activeMat.node_tree.nodes["Image Texture"].image
textureName = textureImage.name
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: The following splits the image name from an extension such as: removeExtension[0] = name (ex. debugTexture) removeExtension[1] = extension (ex .png)
#---------------------------------------------------------------------------------------------------------
#---------------------------------------------------------------------------------------------------------
#TO_DO_NOTE: Check to make sure that new materials method uses the same extension removal function.
#---------------------------------------------------------------------------------------------------------
removeExtension = os.path.splitext(textureName) #splits extension and file name
textureName = removeExtension[0] #assigns file name to textureName without the extension
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: Get texture image resolution if a texture is being used.
#---------------------------------------------------------------------------------------------------------
if matProperties.custom_tex_res == True:
matTextureSize[0] = matProperties.texture_res_w
matTextureSize[1] = matProperties.texture_res_h
else:
matTextureSize = textureImage.size
else:
matTextureSize = [1,1]
for vert, loop in zip(face.vertices, face.loop_indices):
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: Gather temporary Vertex data- For each poly, grab the coordinates, uv, and vertex color information. This will repeat 3 times per face/poly:
#---------------------------------------------------------------------------------------------------------
if vertCheck < 3:
if obj.obj_props.anim_Method == "VTX DL":
coord = mod_mesh.vertices[vert].co * obj.matrix_world
else:
coord = obj.data.vertices[vert].co * obj.matrix_world
uv = obj.data.uv_layers.active.data[loop].uv if obj.data.uv_layers.active else (0,0)
vcol = obj.data.vertex_colors.active.data[loop].color if obj.data.vertex_colors.active else (1,1,1,1)
#---------------------------------------------------------------------------------------------------------
#EXPORTER_NOTES: A second Vertex Color layer needs to be set to gather alpha information. Alpha can be denoted from Black to White.
# This needs to be set from 0-255 with 255 (white) being solid and 0(black) being transparent. Then store it as a single value by dividing the RGB total by 3.
#---------------------------------------------------------------------------------------------------------
if | |
<filename>warehouse/migrations/versions/283c68f2ab2_initial_migration.py<gh_stars>1000+
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Initial Migration
Revision ID: <PASSWORD>
Revises: None
Create Date: 2015-02-01 14:07:10.983672
"""
import citext
import sqlalchemy as sa
from alembic import op
revision = "2<PASSWORD>"
down_revision = None
def upgrade():
op.execute("CREATE EXTENSION IF NOT EXISTS citext")
op.create_table(
"openid_discovered",
sa.Column("url", sa.Text(), nullable=False),
sa.Column("created", sa.DateTime(), nullable=True),
sa.Column("services", sa.LargeBinary(), nullable=True),
sa.Column("op_endpoint", sa.Text(), nullable=True),
sa.Column("op_local", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("url"),
)
op.create_table(
"accounts_user",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("password", sa.String(length=128), nullable=False),
sa.Column("last_login", sa.DateTime(), nullable=False),
sa.Column("is_superuser", sa.Boolean(), nullable=False),
sa.Column("username", citext.CIText(), nullable=False),
sa.Column("name", sa.String(length=100), nullable=False),
sa.Column("is_staff", sa.Boolean(), nullable=False),
sa.Column("is_active", sa.Boolean(), nullable=False),
sa.Column(
"date_joined", sa.DateTime(), server_default=sa.text("now()"), nullable=True
),
sa.CheckConstraint(
"username ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'",
name="accounts_user_valid_username",
),
sa.CheckConstraint("length(username) <= 50", name="packages_valid_name"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("username"),
)
op.create_table(
"packages",
sa.Column("name", sa.Text(), nullable=False),
sa.Column("stable_version", sa.Text(), nullable=True),
sa.Column("normalized_name", sa.Text(), nullable=True),
sa.Column(
"autohide", sa.Boolean(), server_default=sa.text("true"), nullable=True
),
sa.Column(
"comments", sa.Boolean(), server_default=sa.text("true"), nullable=True
),
sa.Column("bugtrack_url", sa.Text(), nullable=True),
sa.Column(
"hosting_mode", sa.Text(), server_default="pypi-explicit", nullable=False
),
sa.Column(
"created", sa.DateTime(), server_default=sa.text("now()"), nullable=False
),
sa.CheckConstraint(
"name ~* '^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$'::text",
name="packages_valid_name",
),
sa.PrimaryKeyConstraint("name"),
)
op.create_table("dual", sa.Column("dummy", sa.Integer(), nullable=True))
op.create_table(
"cheesecake_main_indices",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("absolute", sa.Integer(), nullable=False),
sa.Column("relative", sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"trove_classifiers",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("classifier", sa.Text(), nullable=True),
sa.Column("l2", sa.Integer(), nullable=True),
sa.Column("l3", sa.Integer(), nullable=True),
sa.Column("l4", sa.Integer(), nullable=True),
sa.Column("l5", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("classifier", name="trove_classifiers_classifier_key"),
)
op.create_index(
"trove_class_class_idx", "trove_classifiers", ["classifier"], unique=False
)
op.create_index("trove_class_id_idx", "trove_classifiers", ["id"], unique=False)
op.create_table(
"browse_tally",
sa.Column("trove_id", sa.Integer(), nullable=False),
sa.Column("tally", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("trove_id"),
)
op.create_table(
"timestamps",
sa.Column("name", sa.Text(), nullable=False),
sa.Column("value", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("name"),
)
op.create_table(
"oauth_nonce",
sa.Column("timestamp", sa.Integer(), nullable=False),
sa.Column("consumer", sa.String(length=32), nullable=False),
sa.Column("nonce", sa.String(length=32), nullable=False),
sa.Column("token", sa.String(length=32), nullable=True),
)
op.create_table(
"oid_associations",
sa.Column("server_url", sa.String(length=2047), nullable=False),
sa.Column("handle", sa.String(length=255), nullable=False),
sa.Column("secret", sa.LargeBinary(length=128), nullable=False),
sa.Column("issued", sa.Integer(), nullable=False),
sa.Column("lifetime", sa.Integer(), nullable=False),
sa.Column("assoc_type", sa.String(length=64), nullable=False),
sa.CheckConstraint("length(secret) <= 128", name="secret_length_constraint"),
sa.PrimaryKeyConstraint("server_url", "handle"),
)
op.create_table(
"oid_nonces",
sa.Column("server_url", sa.String(length=2047), nullable=False),
sa.Column("timestamp", sa.Integer(), nullable=False),
sa.Column("salt", sa.String(length=40), nullable=False),
sa.PrimaryKeyConstraint("server_url", "timestamp", "salt"),
)
op.create_table(
"openid_sessions",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("url", sa.Text(), nullable=True),
sa.Column("assoc_handle", sa.Text(), nullable=True),
sa.Column("expires", sa.DateTime(), nullable=True),
sa.Column("mac_key", sa.Text(), nullable=True),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"openid_nonces",
sa.Column("created", sa.DateTime(), nullable=True),
sa.Column("nonce", sa.Text(), nullable=True),
)
op.create_index("openid_nonces_created", "openid_nonces", ["created"], unique=False)
op.create_index("openid_nonces_nonce", "openid_nonces", ["nonce"], unique=False)
op.create_table(
"file_registry",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("filename", sa.Text(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("filename", name="file_registry_filename_key"),
)
op.create_table(
"openid_whitelist",
sa.Column("name", sa.Text(), nullable=False),
sa.Column("trust_root", sa.Text(), nullable=False),
sa.Column("created", sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint("name", "trust_root"),
)
op.create_table(
"releases",
sa.Column("name", sa.Text(), nullable=False),
sa.Column("version", sa.Text(), nullable=False),
sa.Column("author", sa.Text(), nullable=True),
sa.Column("author_email", sa.Text(), nullable=True),
sa.Column("maintainer", sa.Text(), nullable=True),
sa.Column("maintainer_email", sa.Text(), nullable=True),
sa.Column("home_page", sa.Text(), nullable=True),
sa.Column("license", sa.Text(), nullable=True),
sa.Column("summary", sa.Text(), nullable=True),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("keywords", sa.Text(), nullable=True),
sa.Column("platform", sa.Text(), nullable=True),
sa.Column("download_url", sa.Text(), nullable=True),
sa.Column("_pypi_ordering", sa.Integer(), nullable=True),
sa.Column("_pypi_hidden", sa.Boolean(), nullable=True),
sa.Column("description_html", sa.Text(), nullable=True),
sa.Column("cheesecake_installability_id", sa.Integer(), nullable=True),
sa.Column("cheesecake_documentation_id", sa.Integer(), nullable=True),
sa.Column("cheesecake_code_kwalitee_id", sa.Integer(), nullable=True),
sa.Column("requires_python", sa.Text(), nullable=True),
sa.Column("description_from_readme", sa.Boolean(), nullable=True),
sa.Column(
"created", sa.DateTime(), server_default=sa.text("now()"), nullable=False
),
sa.ForeignKeyConstraint(
["cheesecake_code_kwalitee_id"], ["cheesecake_main_indices.id"]
),
sa.ForeignKeyConstraint(
["cheesecake_documentation_id"], ["cheesecake_main_indices.id"]
),
sa.ForeignKeyConstraint(
["cheesecake_installability_id"], ["cheesecake_main_indices.id"]
),
sa.ForeignKeyConstraint(["name"], ["packages.name"], onupdate="CASCADE"),
sa.PrimaryKeyConstraint("name", "version"),
)
op.create_index(
"release_name_created_idx",
"releases",
["name", sa.text("created DESC")],
unique=False,
)
op.create_index("release_name_idx", "releases", ["name"], unique=False)
op.create_index(
"release_pypi_hidden_idx", "releases", ["_pypi_hidden"], unique=False
)
op.create_index("release_version_idx", "releases", ["version"], unique=False)
op.create_table(
"mirrors",
sa.Column("ip", sa.Text(), nullable=False),
sa.Column("user_name", citext.CIText(), nullable=True),
sa.Column("index_url", sa.Text(), nullable=True),
sa.Column("last_modified_url", sa.Text(), nullable=True),
sa.Column("local_stats_url", sa.Text(), nullable=True),
sa.Column("stats_url", sa.Text(), nullable=True),
sa.Column("mirrors_url", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(["user_name"], ["accounts_user.username"]),
sa.PrimaryKeyConstraint("ip"),
)
op.create_table(
"oauth_consumers",
sa.Column("consumer", sa.String(length=32), nullable=False),
sa.Column("secret", sa.String(length=64), nullable=False),
sa.Column("date_created", sa.Date(), nullable=False),
sa.Column("created_by", citext.CIText(), nullable=True),
sa.Column("last_modified", sa.Date(), nullable=False),
sa.Column("description", sa.String(length=255), nullable=False),
sa.ForeignKeyConstraint(
["created_by"], ["accounts_user.username"], onupdate="CASCADE"
),
sa.PrimaryKeyConstraint("consumer"),
)
op.create_table(
"accounts_email",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.Column("email", sa.String(length=254), nullable=False),
sa.Column("primary", sa.Boolean(), nullable=False),
sa.Column("verified", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(
["user_id"], ["accounts_user.id"], initially="DEFERRED", deferrable=True
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("email", name="accounts_email_email_key"),
)
op.create_index(
"accounts_email_email_like", "accounts_email", ["email"], unique=False
)
op.create_index(
"accounts_email_user_id", "accounts_email", ["user_id"], unique=False
)
op.create_table(
"oauth_access_tokens",
sa.Column("token", sa.String(length=32), nullable=False),
sa.Column("secret", sa.String(length=64), nullable=False),
sa.Column("consumer", sa.String(length=32), nullable=False),
sa.Column("date_created", sa.Date(), nullable=False),
sa.Column("last_modified", sa.Date(), nullable=False),
sa.Column("user_name", citext.CIText(), nullable=True),
sa.ForeignKeyConstraint(
["user_name"],
["accounts_user.username"],
onupdate="CASCADE",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("token"),
)
op.create_table(
"csrf_tokens",
sa.Column("name", citext.CIText(), nullable=False),
sa.Column("token", sa.Text(), nullable=True),
sa.Column("end_date", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["name"], ["accounts_user.username"], onupdate="CASCADE", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("name"),
)
op.create_table(
"oauth_request_tokens",
sa.Column("token", sa.String(length=32), nullable=False),
sa.Column("secret", sa.String(length=64), nullable=False),
sa.Column("consumer", sa.String(length=32), nullable=False),
sa.Column("callback", sa.Text(), nullable=True),
sa.Column("date_created", sa.Date(), nullable=False),
sa.Column("user_name", citext.CIText(), nullable=True),
sa.ForeignKeyConstraint(
["user_name"],
["accounts_user.username"],
onupdate="CASCADE",
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("token"),
)
op.create_table(
"cookies",
sa.Column("cookie", sa.Text(), nullable=False),
sa.Column("name", citext.CIText(), nullable=True),
sa.Column("last_seen", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["name"], ["accounts_user.username"], onupdate="CASCADE", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("cookie"),
)
op.create_index("cookies_last_seen", "cookies", ["last_seen"], unique=False)
op.create_table(
"openids",
sa.Column("id", sa.Text(), nullable=False),
sa.Column("name", citext.CIText(), nullable=True),
sa.ForeignKeyConstraint(
["name"], ["accounts_user.username"], onupdate="CASCADE", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_table(
"sshkeys",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", citext.CIText(), nullable=True),
sa.Column("key", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["name"], ["accounts_user.username"], onupdate="CASCADE", ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("sshkeys_name", "sshkeys", ["name"], unique=False)
op.create_table(
"rego_otk",
sa.Column("name", citext.CIText(), nullable=True),
sa.Column("otk", sa.Text(), nullable=True),
sa.Column("date", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["name"], ["accounts_user.username"], ondelete="CASCADE"
),
sa.UniqueConstraint("otk", name="rego_otk_unique"),
)
op.create_index("rego_otk_name_idx", "rego_otk", ["name"], unique=False)
op.create_index("rego_otk_otk_idx", "rego_otk", ["otk"], unique=False)
op.create_table(
"cheesecake_subindices",
sa.Column("main_index_id", sa.Integer(), nullable=False),
sa.Column("name", sa.Text(), nullable=False),
sa.Column("value", sa.Integer(), nullable=False),
sa.Column("details", sa.Text(), nullable=False),
sa.ForeignKeyConstraint(["main_index_id"], ["cheesecake_main_indices.id"]),
sa.PrimaryKeyConstraint("main_index_id", "name"),
)
op.create_table(
"accounts_gpgkey",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("user_id", sa.Integer(), nullable=False),
sa.Column("key_id", citext.CIText(), nullable=False),
sa.Column("verified", sa.Boolean(), nullable=False),
sa.CheckConstraint(
"key_id ~* '^[A-F0-9]{8}$'::citext", name="accounts_gpgkey_valid_key_id"
),
sa.ForeignKeyConstraint(
["user_id"], ["accounts_user.id"], initially="DEFERRED", deferrable=True
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("key_id", name="accounts_gpgkey_key_id_key"),
)
op.create_index(
"accounts_gpgkey_user_id", "accounts_gpgkey", ["user_id"], unique=False
)
op.create_table(
"roles",
sa.Column("role_name", sa.Text(), nullable=True),
sa.Column("user_name", citext.CIText(), nullable=True),
sa.Column("package_name", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["package_name"], ["packages.name"], onupdate="CASCADE"
),
sa.ForeignKeyConstraint(
["user_name"], ["accounts_user.username"], onupdate="CASCADE"
),
)
op.create_index("roles_pack_name_idx", "roles", ["package_name"], unique=False)
op.create_index("roles_user_name_idx", "roles", ["user_name"], unique=False)
op.create_table(
"journals",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.Text(), nullable=True),
sa.Column("version", sa.Text(), nullable=True),
sa.Column("action", sa.Text(), nullable=True),
sa.Column("submitted_date", sa.DateTime(), nullable=True),
sa.Column("submitted_by", citext.CIText(), nullable=True),
sa.Column("submitted_from", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["submitted_by"], ["accounts_user.username"], onupdate="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"journals_changelog",
"journals",
["submitted_date", "name", "version", "action"],
unique=False,
)
op.create_index("journals_id_idx", "journals", ["id"], unique=False)
op.create_index(
"journals_latest_releases",
"journals",
["submitted_date", "name", "version"],
unique=False,
postgresql_where=sa.text(
"journals.version IS NOT NULL AND journals.action = 'new release'"
),
)
op.create_index("journals_name_idx", "journals", ["name"], unique=False)
op.create_index("journals_version_idx", "journals", ["version"], unique=False)
op.create_table(
"ratings",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.Text(), nullable=False),
sa.Column("version", sa.Text(), nullable=False),
sa.Column("user_name", citext.CIText(), nullable=False),
sa.Column("date", sa.DateTime(), nullable=True),
sa.Column("rating", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["user_name"], ["accounts_user.username"], ondelete="CASCADE"
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name", "version", "user_name", name="ratings_name_key"),
)
op.create_index("rating_name_version", "ratings", ["name", "version"], unique=False)
op.create_table(
"release_classifiers",
sa.Column("name", sa.Text(), nullable=True),
sa.Column("version", sa.Text(), nullable=True),
sa.Column("trove_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
sa.ForeignKeyConstraint(["trove_id"], ["trove_classifiers.id"]),
)
op.create_index("rel_class_name_idx", "release_classifiers", ["name"], unique=False)
op.create_index(
"rel_class_name_version_idx",
"release_classifiers",
["name", "version"],
unique=False,
)
op.create_index(
"rel_class_trove_id_idx", "release_classifiers", ["trove_id"], unique=False
)
op.create_index(
"rel_class_version_id_idx", "release_classifiers", ["version"], unique=False
)
op.create_table(
"release_urls",
sa.Column("name", sa.Text(), nullable=True),
sa.Column("version", sa.Text(), nullable=True),
sa.Column("url", sa.Text(), nullable=True),
sa.Column("packagetype", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
op.create_index("release_urls_name_idx", "release_urls", ["name"], unique=False)
op.create_index(
"release_urls_packagetype_idx", "release_urls", ["packagetype"], unique=False
)
op.create_index(
"release_urls_version_idx", "release_urls", ["version"], unique=False
)
op.create_table(
"release_dependencies",
sa.Column("name", sa.Text(), nullable=True),
sa.Column("version", sa.Text(), nullable=True),
sa.Column("kind", sa.Integer(), nullable=True),
sa.Column("specifier", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
op.create_index("rel_dep_name_idx", "release_dependencies", ["name"], unique=False)
op.create_index(
"rel_dep_name_version_idx",
"release_dependencies",
["name", "version"],
unique=False,
)
op.create_index(
"rel_dep_name_version_kind_idx",
"release_dependencies",
["name", "version", "kind"],
unique=False,
)
op.create_table(
"comments_journal",
sa.Column("name", sa.Text(), nullable=True),
sa.Column("version", sa.Text(), nullable=True),
sa.Column("id", sa.Integer(), nullable=True),
sa.Column("submitted_by", citext.CIText(), nullable=True),
sa.Column("date", sa.DateTime(), nullable=True),
sa.Column("action", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["submitted_by"], ["accounts_user.username"], ondelete="CASCADE"
),
)
op.create_table(
"release_files",
sa.Column("name", sa.Text(), nullable=True),
sa.Column("version", sa.Text(), nullable=True),
sa.Column("python_version", sa.Text(), nullable=True),
sa.Column("packagetype", sa.Text(), nullable=True),
sa.Column("comment_text", sa.Text(), nullable=True),
sa.Column("filename", sa.Text(), nullable=True),
sa.Column("md5_digest", sa.Text(), nullable=True),
sa.Column(
"downloads", sa.Integer(), server_default=sa.text("0"), nullable=True
),
sa.Column("upload_time", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
sa.UniqueConstraint("filename", name="release_files_filename_key"),
sa.UniqueConstraint("md5_digest", name="release_files_md5_digest_key"),
)
op.create_index("release_files_name_idx", "release_files", ["name"], unique=False)
op.create_index(
"release_files_name_version_idx",
"release_files",
["name", "version"],
unique=False,
)
op.create_index(
"release_files_packagetype_idx", "release_files", ["packagetype"], unique=False
)
op.create_index(
"release_files_version_idx", "release_files", ["version"], unique=False
)
op.create_table(
"release_requires_python",
sa.Column("name", sa.Text(), nullable=True),
sa.Column("version", sa.Text(), nullable=True),
sa.Column("specifier", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
)
op.create_index(
"rel_req_python_name_idx", "release_requires_python", ["name"], unique=False
)
op.create_index(
"rel_req_python_name_version_idx",
"release_requires_python",
["name", "version"],
unique=False,
)
op.create_index(
"rel_req_python_version_id_idx",
"release_requires_python",
["version"],
unique=False,
)
op.create_table(
"description_urls",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.Text(), nullable=True),
sa.Column("version", sa.Text(), nullable=True),
sa.Column("url", sa.Text(), nullable=True),
sa.ForeignKeyConstraint(
["name", "version"],
["releases.name", "releases.version"],
onupdate="CASCADE",
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(
"description_urls_name_idx", "description_urls", ["name"], unique=False
)
op.create_index(
"description_urls_name_version_idx",
"description_urls",
["name", "version"],
unique=False,
)
op.create_table(
"comments",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("rating", sa.Integer(), nullable=True),
sa.Column("user_name", citext.CIText(), nullable=True),
| |
<gh_stars>0
import re
from pathlib import Path
from typing import Optional
import pytest
from eth_utils import is_checksum_address
from ethpm_types import ContractType
from hexbytes import HexBytes
from ape import Contract
from ape.api import Address, ReceiptAPI
from ape.exceptions import DecodingError
from ape.types import ContractLog
SOLIDITY_CONTRACT_ADDRESS = "0xBcF7FFFD8B256Ec51a36782a52D0c34f6474D951"
VYPER_CONTRACT_ADDRESS = "0x274b028b03A250cA03644E6c578D81f019eE1323"
MATCH_TEST_CONTRACT = re.compile(r"<TestContract((Sol)|(Vy))")
@pytest.fixture
def assert_log_values(owner, chain):
def _assert_log_values(log: ContractLog, number: int, previous_number: Optional[int] = None):
assert log.person == owner
assert is_checksum_address(log.person)
assert isinstance(log.b, HexBytes)
expected_previous_number = number - 1 if previous_number is None else previous_number
assert log.prevNum == expected_previous_number, "Event param 'prevNum' has unexpected value"
assert log.newNum == number, "Event param 'newNum' has unexpected value"
return _assert_log_values
def test_init_at_unknown_address():
contract = Contract(SOLIDITY_CONTRACT_ADDRESS)
assert type(contract) == Address
assert contract.address == SOLIDITY_CONTRACT_ADDRESS
def test_init_specify_contract_type(
solidity_contract_instance, vyper_contract_type, owner, networks_connected_to_tester
):
# Vyper contract type is very close to solidity's.
# This test purposely uses the other just to show we are able to specify it externally.
contract = Contract(solidity_contract_instance.address, contract_type=vyper_contract_type)
assert contract.address == solidity_contract_instance.address
assert contract.contract_type == vyper_contract_type
assert contract.setNumber(2, sender=owner)
assert contract.myNumber() == 2
def test_deploy(
sender, contract_container, networks_connected_to_tester, project, chain, clean_contracts_cache
):
contract = contract_container.deploy(sender=sender, something_else="IGNORED")
assert contract.address in (SOLIDITY_CONTRACT_ADDRESS, VYPER_CONTRACT_ADDRESS)
# Verify can reload same contract from cache
contract_from_cache = Contract(contract.address)
assert contract_from_cache.contract_type == contract.contract_type
assert contract_from_cache.address == contract.address
# Clean up for next test
del chain.contracts._local_contracts[contract_from_cache.address]
def test_repr(contract_instance):
assert re.match(
rf"<TestContract((Sol)|(Vy)) {contract_instance.address}>", repr(contract_instance)
)
assert repr(contract_instance.setNumber) == "setNumber(uint256 num)"
assert repr(contract_instance.myNumber) == "myNumber() -> uint256"
assert (
repr(contract_instance.NumberChange)
== "NumberChange(address person, bytes32 b, uint256 prevNum, uint256 indexed newNum)"
)
def test_contract_logs_from_receipts(owner, contract_instance, assert_log_values):
event_type = contract_instance.NumberChange
# Invoke a transaction 3 times that generates 3 logs.
receipt_0 = contract_instance.setNumber(1, sender=owner)
receipt_1 = contract_instance.setNumber(2, sender=owner)
receipt_2 = contract_instance.setNumber(3, sender=owner)
def assert_receipt_logs(receipt: ReceiptAPI, num: int):
logs = [log for log in event_type.from_receipt(receipt)]
assert len(logs) == 1
assert_log_values(logs[0], num)
# Also verify can we logs the other way
logs = [log for log in receipt.decode_logs(event_type)]
assert len(logs) == 1
assert_log_values(logs[0], num)
assert_receipt_logs(receipt_0, 1)
assert_receipt_logs(receipt_1, 2)
assert_receipt_logs(receipt_2, 3)
def test_contract_logs_from_event_type(contract_instance, owner, assert_log_values):
event_type = contract_instance.NumberChange
contract_instance.setNumber(1, sender=owner)
contract_instance.setNumber(2, sender=owner)
contract_instance.setNumber(3, sender=owner)
logs = [log for log in event_type]
assert len(logs) == 3, "Unexpected number of logs"
assert_log_values(logs[0], 1)
assert_log_values(logs[1], 2)
assert_log_values(logs[2], 3)
def test_contract_logs_index_access(contract_instance, owner, assert_log_values):
event_type = contract_instance.NumberChange
contract_instance.setNumber(1, sender=owner)
contract_instance.setNumber(2, sender=owner)
contract_instance.setNumber(3, sender=owner)
assert_log_values(event_type[0], 1)
assert_log_values(event_type[1], 2)
assert_log_values(event_type[2], 3)
# Verify negative index access
assert_log_values(event_type[-3], 1)
assert_log_values(event_type[-2], 2)
assert_log_values(event_type[-1], 3)
def test_contract_logs_splicing(contract_instance, owner, assert_log_values):
event_type = contract_instance.NumberChange
contract_instance.setNumber(1, sender=owner)
contract_instance.setNumber(2, sender=owner)
contract_instance.setNumber(3, sender=owner)
logs = event_type[:2]
assert len(logs) == 2
assert_log_values(logs[0], 1)
assert_log_values(logs[1], 2)
logs = event_type[2:]
assert len(logs) == 1
assert_log_values(logs[0], 3)
log = event_type[1]
assert_log_values(log, 2)
def test_contract_logs_range(contract_instance, owner, assert_log_values):
contract_instance.setNumber(1, sender=owner)
logs = [
log for log in contract_instance.NumberChange.range(100, event_parameters={"newNum": 1})
]
assert len(logs) == 1, "Unexpected number of logs"
assert_log_values(logs[0], 1)
def test_contract_logs_range_start_and_stop(contract_instance, owner, chain):
# Create 1 event
contract_instance.setNumber(1, sender=owner)
# Grab start block after first event
start_block = chain.blocks.height
contract_instance.setNumber(2, sender=owner)
contract_instance.setNumber(3, sender=owner)
stop = 30 # Stop can be bigger than height, it doesn't not matter
logs = [log for log in contract_instance.NumberChange.range(start_block, stop=stop)]
assert len(logs) == 3, "Unexpected number of logs"
def test_contract_logs_range_only_stop(contract_instance, owner, chain):
# Create 1 event
contract_instance.setNumber(1, sender=owner)
contract_instance.setNumber(2, sender=owner)
contract_instance.setNumber(3, sender=owner)
stop = 100 # Stop can be bigger than height, it doesn't not matter
logs = [log for log in contract_instance.NumberChange.range(stop)]
assert len(logs) == 3, "Unexpected number of logs"
def test_contract_logs_range_with_paging(contract_instance, owner, chain, assert_log_values):
# Create 1 log each in the first 3 blocks.
for i in range(3):
contract_instance.setNumber(i + 1, sender=owner)
# Mine 3 times to ensure we can handle uneventful blocks.
for i in range(3):
chain.mine()
# Create one more log after the empty blocks.
contract_instance.setNumber(100, sender=owner)
logs = [log for log in contract_instance.NumberChange.range(100, block_page_size=1)]
assert len(logs) == 4, "Unexpected number of logs"
assert_log_values(logs[0], 1)
assert_log_values(logs[1], 2)
assert_log_values(logs[2], 3)
assert_log_values(logs[3], 100, previous_number=3)
def test_contract_logs_range_over_paging(contract_instance, owner, chain):
# Create 1 log each in the first 3 blocks.
for i in range(3):
contract_instance.setNumber(i + 1, sender=owner)
# 50 is way more than 3 but it shouldn't matter.
logs = [log for log in contract_instance.NumberChange.range(100, block_page_size=50)]
assert len(logs) == 3, "Unexpected number of logs"
def test_contract_logs_from_non_indexed_range(contract_instance, owner):
contract_instance.setNumber(1, sender=owner)
with pytest.raises(DecodingError):
_ = [
log for log in contract_instance.NumberChange.range(0, event_parameters={"prevNum": 1})
]
def test_structs(contract_instance, sender, chain):
actual = contract_instance.getStruct()
actual_sender, actual_prev_block = actual
# Expected: a == msg.sender
assert actual.a == actual["a"] == actual[0] == actual_sender == sender
assert is_checksum_address(actual.a)
# Expected: b == block.prevhash.
assert actual.b == actual["b"] == actual[1] == actual_prev_block == chain.blocks[-2].hash
assert type(actual.b) == HexBytes
def test_nested_structs(contract_instance, sender, chain):
actual_1 = contract_instance.getNestedStruct1()
actual_2 = contract_instance.getNestedStruct2()
actual_sender_1, actual_prev_block_1 = actual_1.t
actual_sender_2, actual_prev_block_2 = actual_2.t
# Expected: t.a == msg.sender
assert actual_1.t.a == actual_1.t["a"] == actual_1.t[0] == actual_sender_1 == sender
assert is_checksum_address(actual_1.t.a)
assert is_checksum_address(actual_sender_1)
assert actual_1.foo == 1
assert actual_2.t.a == actual_2.t["a"] == actual_2.t[0] == actual_sender_2 == sender
assert is_checksum_address(actual_2.t.a)
assert is_checksum_address(actual_sender_2)
assert actual_2.foo == 2
# Expected: t.b == block.prevhash.
assert (
actual_1.t.b
== actual_1.t["b"]
== actual_1.t[1]
== actual_prev_block_1
== chain.blocks[-2].hash
)
assert type(actual_1.t.b) == HexBytes
assert (
actual_2.t.b
== actual_2.t["b"]
== actual_2.t[1]
== actual_prev_block_2
== chain.blocks[-2].hash
)
assert type(actual_2.t.b) == HexBytes
def test_nested_structs_in_tuples(contract_instance, sender, chain):
result_1 = contract_instance.getNestedStructWithTuple1()
struct_1 = result_1[0]
assert result_1[1] == 1
assert struct_1.foo == 1
assert struct_1.t.a == sender
assert is_checksum_address(struct_1.t.a)
result_2 = contract_instance.getNestedStructWithTuple2()
struct_2 = result_2[1]
assert result_2[0] == 2
assert struct_2.foo == 2
assert struct_2.t.a == sender
assert is_checksum_address(struct_2.t.a)
def test_vyper_structs_with_array(vyper_contract_instance, sender):
# NOTE: Vyper struct arrays <=0.3.3 don't include struct info
actual = vyper_contract_instance.getStructWithArray()
assert actual.foo == 1
assert actual.bar == 2
assert len(actual.arr) == 2
def test_solidity_structs_with_array(solidity_contract_instance, sender):
actual = solidity_contract_instance.getStructWithArray()
assert actual.foo == 1
assert actual.bar == 2
assert len(actual.arr) == 2, "Unexpected array length"
assert actual.arr[0].a == sender
assert is_checksum_address(actual.arr[0].a)
def test_arrays(contract_instance, sender):
assert contract_instance.getEmptyList() == []
assert contract_instance.getSingleItemList() == [1]
assert contract_instance.getFilledList() == [1, 2, 3]
def test_address_arrays(contract_instance, sender):
actual = contract_instance.getAddressList()
assert actual == [sender, sender]
assert is_checksum_address(actual[0])
assert is_checksum_address(actual[1])
def test_contract_instance_as_address_input(contract_instance, sender):
contract_instance.setAddress(contract_instance, sender=sender)
assert contract_instance.theAddress() == contract_instance
def test_account_as_address_input(contract_instance, sender):
contract_instance.setAddress(sender, sender=sender)
assert contract_instance.theAddress() == sender
def test_vyper_struct_arrays(vyper_contract_instance, sender):
# NOTE: Vyper struct arrays <=0.3.3 don't include struct info
actual_dynamic = vyper_contract_instance.getDynamicStructList()
assert len(actual_dynamic) == 2
assert actual_dynamic[0][0][0] == sender
assert is_checksum_address(actual_dynamic[0][0][0])
assert actual_dynamic[0][1] == 1
assert actual_dynamic[1][0][0] == sender
assert is_checksum_address(actual_dynamic[1][0][0])
assert actual_dynamic[1][1] == 2
actual_static = vyper_contract_instance.getStaticStructList()
assert len(actual_static) == 2
assert actual_static[0][0] == 1
assert actual_static[0][1][0] == sender
assert is_checksum_address(actual_static[0][1][0])
assert actual_static[1][0] == 2
assert actual_static[1][1][0] == sender
assert is_checksum_address(actual_static[1][1][0])
def test_solidity_dynamic_struct_arrays(solidity_contract_instance, sender):
# Run test twice to make sure we can call method more than 1 time and have
# the same result.
for _ in range(2):
actual_dynamic = solidity_contract_instance.getDynamicStructList()
assert len(actual_dynamic) == 2
assert actual_dynamic[0].foo == 1
assert actual_dynamic[0].t.a == sender
assert is_checksum_address(actual_dynamic[0].t.a)
assert actual_dynamic[1].foo == 2
assert actual_dynamic[1].t.a == sender
assert is_checksum_address(actual_dynamic[1].t.a)
def test_solidity_static_struct_arrays(solidity_contract_instance, sender):
# Run test twice to make sure we can call method more than 1 time and have
# the same result.
for _ in range(2):
actual_dynamic = solidity_contract_instance.getStaticStructList()
assert len(actual_dynamic) == 2
assert actual_dynamic[0].foo == 1
assert actual_dynamic[0].t.a == sender
assert is_checksum_address(actual_dynamic[0].t.a)
assert actual_dynamic[1].foo == 2
assert actual_dynamic[1].t.a == sender
assert is_checksum_address(actual_dynamic[1].t.a)
def test_solidity_named_tuple(solidity_contract_instance):
actual = solidity_contract_instance.getNamedSingleItem()
assert actual == 123
actual = solidity_contract_instance.getTupleAllNamed()
assert actual == (123, 321)
assert actual.foo == 123
assert actual.bar == 321
actual = solidity_contract_instance.getPartiallyNamedTuple()
assert actual == (123, 321)
def test_vyper_named_tuple(vyper_contract_instance):
actual = vyper_contract_instance.getMultipleValues()
assert actual == (123, 321)
def test_call_transaction(contract_instance, owner, chain):
# Transaction never submitted because using `call`.
init_block = chain.blocks[-1]
contract_instance.setNumber.call(1, sender=owner)
# No mining happens because its a call
assert init_block == chain.blocks[-1]
def test_contract_two_events_with_same_name(owner, networks_connected_to_tester):
provider = networks_connected_to_tester
base_path = Path(__file__).parent / "data" / "contracts" / "ethereum" / "local"
interface_path = base_path / "Interface.json"
impl_path = base_path / "InterfaceImplementation.json"
interface_contract_type = ContractType.parse_raw(interface_path.read_text())
impl_contract_type = ContractType.parse_raw(impl_path.read_text())
event_name = "FooEvent"
# Ensure test is setup correctly in case scenario-data changed on accident
assert len([e for e in impl_contract_type.events if e.name == event_name]) == 2
assert len([e for e in interface_contract_type.events if e.name == event_name]) == 1
impl_container = provider.create_contract_container(impl_contract_type)
impl_instance = owner.deploy(impl_container)
with pytest.raises(AttributeError) as err:
_ = impl_instance.FooEvent
expected_err_prefix = f"Multiple events named '{event_name}'"
assert expected_err_prefix in str(err.value)
expected_sig_from_impl | |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
from psychopy import locale_setup, sound, gui, visual, core, data, event, logging
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__)).decode(sys.getfilesystemencoding())
os.chdir(_thisDir)
# Store info about the experiment session
expName = 'stroop' # from the Builder filename that created this script
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False:
core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# Data file name stem = absolute path + name; later add .psyexp, .csv, .log, etc
filename = _thisDir + os.sep + u'data/%s_%s_%s' % (expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=u'/home/moji/Downloads/stroopB/groupB.psyexp',
savePickle=True, saveWideText=True,
dataFileName=filename)
# save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# Start Code - component code to be run before the window creation
# Setup the Window
win = visual.Window(
size=(1920, 1080), fullscr=True, screen=0,
allowGUI=False, allowStencil=False,
monitor='testMonitor', color=[0,0,0], colorSpace='rgb',
blendMode='avg', useFBO=True)
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
# Initialize components for Routine "instructions"
instructionsClock = core.Clock()
instrText = visual.TextStim(win=win, name='instrText',
text='Welcome to Stroop Test\nRemember, choose the Color of the text, ignoring the word.\n\nInstruction:\nr = red\nb = blue\ng = green\ny = yellow\n\nPress any key to continue ...',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
# Initialize components for Routine "trial"
trialClock = core.Clock()
targe = visual.TextStim(win=win, name='targe',
text='default text',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color=1.0, colorSpace='rgb', opacity=1,
depth=0.0);
# Initialize components for Routine "thanks"
thanksClock = core.Clock()
text = visual.TextStim(win=win, name='text',
text='Thank you for participat this experminet\n\n(you have been in Group B)\n\nFall 2017 ES-Aarhus\n\nPress Enter',
font='Arial',
pos=(0, 0), height=0.1, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
depth=0.0);
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# ------Prepare to start Routine "instructions"-------
t = 0
instructionsClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
instraction = event.BuilderKeyResponse()
# keep track of which components have finished
instructionsComponents = [instrText, instraction]
for thisComponent in instructionsComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "instructions"-------
while continueRoutine:
# get current time
t = instructionsClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *instrText* updates
if t >= 0.0 and instrText.status == NOT_STARTED:
# keep track of start time/frame for later
instrText.tStart = t
instrText.frameNStart = frameN # exact frame index
instrText.setAutoDraw(True)
# *instraction* updates
if t >= 0.0 and instraction.status == NOT_STARTED:
# keep track of start time/frame for later
instraction.tStart = t
instraction.frameNStart = frameN # exact frame index
instraction.status = STARTED
# keyboard checking is just starting
if instraction.status == STARTED:
theseKeys = event.getKeys()
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in instructionsComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "instructions"-------
for thisComponent in instructionsComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# the Routine "instructions" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
# set up handler to look after randomisation of conditions etc
trials = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions('groupBIF.xlsx'),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial.keys():
exec(paramName + '= thisTrial.' + paramName)
# ------Prepare to start Routine "trial"-------
t = 0
trialClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
targe.setColor(colour, colorSpace='rgb')
targe.setText(word)
response = event.BuilderKeyResponse()
# keep track of which components have finished
trialComponents = [targe, response]
for thisComponent in trialComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "trial"-------
while continueRoutine:
# get current time
t = trialClock.getTime()
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *targe* updates
if t >= 0.5 and targe.status == NOT_STARTED:
# keep track of start time/frame for later
targe.tStart = t
targe.frameNStart = frameN # exact frame index
targe.setAutoDraw(True)
frameRemains = 0.5 + 5- win.monitorFramePeriod * 0.75 # most of one frame period left
if targe.status == STARTED and t >= frameRemains:
targe.setAutoDraw(False)
# *response* updates
if t >= 0.5 and response.status == NOT_STARTED:
# keep track of start time/frame for later
response.tStart = t
response.frameNStart = frameN # exact frame index
response.status = STARTED
# keyboard checking is just starting
win.callOnFlip(response.clock.reset) # t=0 on next screen flip
event.clearEvents(eventType='keyboard')
if response.status == STARTED:
theseKeys = event.getKeys(keyList=['r', 'b', 'g', 'y'])
# check for quit:
if "escape" in theseKeys:
endExpNow = True
if len(theseKeys) > 0: # at least one key was pressed
response.keys = theseKeys[-1] # just the last key pressed
response.rt = response.clock.getTime()
# was this 'correct'?
if (response.keys == str(corrAns)) or (response.keys == corrAns):
response.corr = 1
else:
response.corr = 0
# a response ends the routine
continueRoutine = False
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# check for quit (the Esc key)
if endExpNow or event.getKeys(keyList=["escape"]):
core.quit()
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "trial"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# check responses
if response.keys in ['', [], None]: # No response was made
response.keys=None
# was no response the correct answer?!
if str(corrAns).lower() == 'none':
response.corr = 1 # correct non-response
else:
response.corr = 0 # failed to respond (incorrectly)
# store data for trials (TrialHandler)
trials.addData('response.keys',response.keys)
trials.addData('response.corr', response.corr)
if response.keys != None: # we had a response
trials.addData('response.rt', response.rt)
# the Routine "trial" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
thisExp.nextEntry()
# completed 1 repeats of 'trials'
# ------Prepare to start Routine "thanks"-------
t = 0
thanksClock.reset() # clock
frameN = -1
continueRoutine = True
# update component parameters for each repeat
thank = event.BuilderKeyResponse()
# keep track of which components have finished
thanksComponents = [text, thank]
for thisComponent in thanksComponents:
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# -------Start Routine "thanks"-------
while continueRoutine:
# get current time
t = thanksClock.getTime()
frameN = frameN + 1 # number of | |
only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:param status: The status of the database transparent data encryption.
Possible values include: 'Enabled', 'Disabled'
:type status: str or
~azure.mgmt.synapse.models.TransparentDataEncryptionStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'status': {'key': 'properties.status', 'type': 'TransparentDataEncryptionStatus'},
}
def __init__(self, *, status=None, **kwargs) -> None:
super(TransparentDataEncryption, self).__init__(**kwargs)
self.location = None
self.status = status
class UpdateIntegrationRuntimeNodeRequest(Model):
"""Update integration runtime node request.
:param concurrent_jobs_limit: The number of concurrent jobs permitted to
run on the integration runtime node. Values between 1 and
maxConcurrentJobs(inclusive) are allowed.
:type concurrent_jobs_limit: int
"""
_validation = {
'concurrent_jobs_limit': {'minimum': 1},
}
_attribute_map = {
'concurrent_jobs_limit': {'key': 'concurrentJobsLimit', 'type': 'int'},
}
def __init__(self, *, concurrent_jobs_limit: int=None, **kwargs) -> None:
super(UpdateIntegrationRuntimeNodeRequest, self).__init__(**kwargs)
self.concurrent_jobs_limit = concurrent_jobs_limit
class UpdateIntegrationRuntimeRequest(Model):
"""Update integration runtime request.
:param auto_update: Enables or disables the auto-update feature of the
self-hosted integration runtime. See
https://go.microsoft.com/fwlink/?linkid=854189. Possible values include:
'On', 'Off'
:type auto_update: str or
~azure.mgmt.synapse.models.IntegrationRuntimeAutoUpdate
:param update_delay_offset: The time offset (in hours) in the day, e.g.,
PT03H is 3 hours. The integration runtime auto update will happen on that
time.
:type update_delay_offset: str
"""
_attribute_map = {
'auto_update': {'key': 'autoUpdate', 'type': 'str'},
'update_delay_offset': {'key': 'updateDelayOffset', 'type': 'str'},
}
def __init__(self, *, auto_update=None, update_delay_offset: str=None, **kwargs) -> None:
super(UpdateIntegrationRuntimeRequest, self).__init__(**kwargs)
self.auto_update = auto_update
self.update_delay_offset = update_delay_offset
class VirtualNetworkProfile(Model):
"""Virtual Network Profile.
:param compute_subnet_id: Subnet ID used for computes in workspace
:type compute_subnet_id: str
"""
_attribute_map = {
'compute_subnet_id': {'key': 'computeSubnetId', 'type': 'str'},
}
def __init__(self, *, compute_subnet_id: str=None, **kwargs) -> None:
super(VirtualNetworkProfile, self).__init__(**kwargs)
self.compute_subnet_id = compute_subnet_id
class VulnerabilityAssessmentRecurringScansProperties(Model):
"""Properties of a Vulnerability Assessment recurring scans.
:param is_enabled: Recurring scans state.
:type is_enabled: bool
:param email_subscription_admins: Specifies that the schedule scan
notification will be is sent to the subscription administrators. Default
value: True .
:type email_subscription_admins: bool
:param emails: Specifies an array of e-mail addresses to which the scan
notification is sent.
:type emails: list[str]
"""
_attribute_map = {
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'email_subscription_admins': {'key': 'emailSubscriptionAdmins', 'type': 'bool'},
'emails': {'key': 'emails', 'type': '[str]'},
}
def __init__(self, *, is_enabled: bool=None, email_subscription_admins: bool=True, emails=None, **kwargs) -> None:
super(VulnerabilityAssessmentRecurringScansProperties, self).__init__(**kwargs)
self.is_enabled = is_enabled
self.email_subscription_admins = email_subscription_admins
self.emails = emails
class VulnerabilityAssessmentScanError(Model):
"""Properties of a vulnerability assessment scan error.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(VulnerabilityAssessmentScanError, self).__init__(**kwargs)
self.code = None
self.message = None
class VulnerabilityAssessmentScanRecord(ProxyResource):
"""A vulnerability assessment scan record.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:ivar scan_id: The scan ID.
:vartype scan_id: str
:ivar trigger_type: The scan trigger type. Possible values include:
'OnDemand', 'Recurring'
:vartype trigger_type: str or
~azure.mgmt.synapse.models.VulnerabilityAssessmentScanTriggerType
:ivar state: The scan status. Possible values include: 'Passed', 'Failed',
'FailedToRun', 'InProgress'
:vartype state: str or
~azure.mgmt.synapse.models.VulnerabilityAssessmentScanState
:ivar start_time: The scan start time (UTC).
:vartype start_time: datetime
:ivar end_time: The scan end time (UTC).
:vartype end_time: datetime
:ivar errors: The scan errors.
:vartype errors:
list[~azure.mgmt.synapse.models.VulnerabilityAssessmentScanError]
:ivar storage_container_path: The scan results storage container path.
:vartype storage_container_path: str
:ivar number_of_failed_security_checks: The number of failed security
checks.
:vartype number_of_failed_security_checks: int
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'scan_id': {'readonly': True},
'trigger_type': {'readonly': True},
'state': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'errors': {'readonly': True},
'storage_container_path': {'readonly': True},
'number_of_failed_security_checks': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'scan_id': {'key': 'properties.scanId', 'type': 'str'},
'trigger_type': {'key': 'properties.triggerType', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'start_time': {'key': 'properties.startTime', 'type': 'iso-8601'},
'end_time': {'key': 'properties.endTime', 'type': 'iso-8601'},
'errors': {'key': 'properties.errors', 'type': '[VulnerabilityAssessmentScanError]'},
'storage_container_path': {'key': 'properties.storageContainerPath', 'type': 'str'},
'number_of_failed_security_checks': {'key': 'properties.numberOfFailedSecurityChecks', 'type': 'int'},
}
def __init__(self, **kwargs) -> None:
super(VulnerabilityAssessmentScanRecord, self).__init__(**kwargs)
self.scan_id = None
self.trigger_type = None
self.state = None
self.start_time = None
self.end_time = None
self.errors = None
self.storage_container_path = None
self.number_of_failed_security_checks = None
class Workspace(TrackedResource):
"""A workspace.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:param tags: Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives
:type location: str
:param default_data_lake_storage: Workspace default data lake storage
account details
:type default_data_lake_storage:
~azure.mgmt.synapse.models.DataLakeStorageAccountDetails
:param sql_administrator_login_password: <PASSWORD>
:type sql_administrator_login_password: str
:ivar managed_resource_group_name: Workspace managed resource group
:vartype managed_resource_group_name: str
:ivar provisioning_state: Resource provisioning state
:vartype provisioning_state: str
:param sql_administrator_login: Login for workspace SQL active directory
administrator
:type sql_administrator_login: str
:param virtual_network_profile: Virtual Network profile
:type virtual_network_profile:
~azure.mgmt.synapse.models.VirtualNetworkProfile
:param connectivity_endpoints: Connectivity endpoints
:type connectivity_endpoints: dict[str, str]
:param managed_virtual_network: Setting this to 'default' will ensure that
all compute for this workspace is in a virtual network managed on behalf
of the user.
:type managed_virtual_network: str
:param private_endpoint_connections: Private endpoint connections to the
workspace
:type private_endpoint_connections:
list[~azure.mgmt.synapse.models.PrivateEndpointConnection]
:param identity: Identity of the workspace
:type identity: ~azure.mgmt.synapse.models.ManagedIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'managed_resource_group_name': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'default_data_lake_storage': {'key': 'properties.defaultDataLakeStorage', 'type': 'DataLakeStorageAccountDetails'},
'sql_administrator_login_password': {'key': 'properties.sqlAdministratorLoginPassword', 'type': 'str'},
'managed_resource_group_name': {'key': 'properties.managedResourceGroupName', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'sql_administrator_login': {'key': 'properties.sqlAdministratorLogin', 'type': 'str'},
'virtual_network_profile': {'key': 'properties.virtualNetworkProfile', 'type': 'VirtualNetworkProfile'},
'connectivity_endpoints': {'key': 'properties.connectivityEndpoints', 'type': '{str}'},
'managed_virtual_network': {'key': 'properties.managedVirtualNetwork', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
'identity': {'key': 'identity', 'type': 'ManagedIdentity'},
}
def __init__(self, *, location: str, tags=None, default_data_lake_storage=None, sql_administrator_login_password: str=None, sql_administrator_login: str=None, virtual_network_profile=None, connectivity_endpoints=None, managed_virtual_network: str=None, private_endpoint_connections=None, identity=None, **kwargs) -> None:
super(Workspace, self).__init__(tags=tags, location=location, **kwargs)
self.default_data_lake_storage = default_data_lake_storage
self.sql_administrator_login_password = <PASSWORD>
self.managed_resource_group_name = None
self.provisioning_state = None
self.sql_administrator_login = sql_administrator_login
self.virtual_network_profile = virtual_network_profile
self.connectivity_endpoints = connectivity_endpoints
self.managed_virtual_network = managed_virtual_network
self.private_endpoint_connections = private_endpoint_connections
self.identity = identity
class WorkspaceAadAdminInfo(ProxyResource):
"""Workspace active directory administrator.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Fully qualified resource Id for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
:vartype id: str
:ivar name: The name of the resource
:vartype name: str
:ivar type: The type of the resource. Ex-
Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts.
:vartype type: str
:param tenant_id: Tenant ID of the workspace active directory
administrator
:type tenant_id: str
:param login: Login of the workspace active directory administrator
:type login: str
:param administrator_type: Workspace active directory administrator type
:type administrator_type: str
:param sid: Object ID of the workspace active directory administrator
:type sid: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
| |
# -*- coding:utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from itertools import chain, product
from logging import getLogger
from limpyd.contrib.collection import ExtendedCollectionManager
from limpyd.exceptions import ImplementationError
from limpyd.fields import SingleValueField, HashField, MultiValuesField
from limpyd.indexes import BaseIndex, NumberRangeIndex, TextRangeIndex, EqualIndex, _MultiFieldsIndexMixin
from limpyd.utils import cached_property, unique_key
logger = getLogger(__name__)
class MultiIndexes(BaseIndex):
"""An index that is a proxy to many ones
This must not be used directly as a class, but a new index class must be
created by using the ``create`` class method
Attributes
----------
index_classes: list
The index classes composing this multi-indexes class
key: str
A key to avoid collision with another index/multi-index
that will be passed to a field.
Examples
--------
>>> multi_index = MultiIndexes.compose([MyIndex, MyOtherIndex])
>>> class MyModel(RedisModel):
... field = StringField(indexes=[multi_index])
"""
index_classes = []
@classmethod
def compose(cls, index_classes, key=None, transform=None, name=None):
"""Create a new class with the given index classes
Parameters
-----------
index_classes: list
The list of index classes to be used in the multi-index class to create
name: str
The name of the new multi-index class. If not set, it will be the same
as the current class
key: str
A key to augment the default key of each index, to avoid collision.
transform: callable
None by default, can be set to a function that will transform the value to be indexed.
This callable can accept one (`value`) or two (`self`, `value`) arguments
"""
attrs = {}
if index_classes:
attrs['index_classes'] = index_classes
klass = type(str(name or cls.__name__), (cls, ), attrs)
# let the ``configure`` method manage some fields
configure_attrs = {}
if key is not None:
configure_attrs['key'] = key
if transform is not None:
configure_attrs['transform'] = transform
if configure_attrs:
klass = klass.configure(**configure_attrs)
return klass
@cached_property
def _indexes(self):
"""Instantiate the indexes only when asked
Returns
-------
list
A list of all indexes, tied to the field.
"""
return [index_class(field=self.field) for index_class in self.index_classes]
def can_handle_suffix(self, suffix):
"""Tell if one of the managed indexes can be used for the given filter prefix
For parameters, see BaseIndex.can_handle_suffix
"""
for index in self._indexes:
if index.can_handle_suffix(suffix):
return True
return False
def _reset_rollback_cache(self, pk):
"""Reset attributes used to potentially rollback the indexes
For the parameters, seen BaseIndex._reset_rollback_cache
"""
for index in self._indexes:
index._reset_rollback_cache(pk)
def _rollback(self, pk):
"""Restore the index in its previous state
For the parameters, seen BaseIndex._rollback
"""
for index in self._indexes:
index._rollback(pk)
def get_unique_index(self):
"""Returns the first index handling uniqueness
Returns
-------
BaseIndex
The first index capable of handling uniqueness
Raises
------
IndexError
If not index is capable of handling uniqueness
"""
return [index for index in self._indexes if index.handle_uniqueness][0]
@property
def handle_uniqueness(self):
"""Tell if at least one of the indexes can handle uniqueness
Returns
-------
bool
``True`` if this multi-index can handle uniqueness.
"""
try:
self.get_unique_index()
except IndexError:
return False
else:
return True
def prepare_args(self, args, transform=True):
"""Prepare args to be used by a sub-index
Parameters
----------
args: list
The while list of arguments passed to add, check_uniqueness, get_filtered_keys...
transform: bool
If ``True``, the last entry in `args`, ie the value, will be transformed.
Else it will be kept as is.
"""
updated_args = list(args)
if transform:
updated_args[-1] = self.transform_value(updated_args[-1])
if self.key:
updated_args.insert(-1, self.key)
return updated_args
def check_uniqueness(self, pk, *args):
"""For a unique index, check if the given args are not used twice
For the parameters, seen BaseIndex.check_uniqueness
"""
self.get_unique_index().check_uniqueness(pk, *self.prepare_args(args, transform=False))
def add(self, pk, *args, **kwargs):
"""Add the instance tied to the field to all the indexes
For the parameters, seen BaseIndex.add
"""
check_uniqueness = kwargs.pop('check_uniqueness', False)
args = self.prepare_args(args)
for index in self._indexes:
index.add(pk, *args, check_uniqueness=check_uniqueness and index.handle_uniqueness, **kwargs)
if check_uniqueness and index.handle_uniqueness:
check_uniqueness = False
def remove(self, pk, *args, **kwargs):
"""Remove the instance tied to the field from all the indexes
For the parameters, seen BaseIndex.remove
"""
args = self.prepare_args(args)
for index in self._indexes:
index.remove(pk, *args, **kwargs)
def get_filtered_keys(self, suffix, *args, **kwargs):
"""Returns the index keys to be used by the collection for the given args
For the parameters, see BaseIndex.get_filtered_keys
"""
args = self.prepare_args(args, transform=False)
for index in self._indexes:
if index.can_handle_suffix(suffix):
return index.get_filtered_keys(suffix, *args, **kwargs)
def get_all_storage_keys(self):
"""Returns the keys to be removed by `clear` in aggressive mode
For the parameters, see BaseIndex.get_all_storage_keys
"""
keys = set()
for index in self._indexes:
keys.update(index.get_all_storage_keys())
return keys
# This is a multi-indexes managing the different parts of a date in the format YYYY-MM-SS
DateIndexParts = MultiIndexes.compose([
NumberRangeIndex.configure(prefix='year', transform=lambda value: value[:4], handle_uniqueness=False, name='YearIndex'),
NumberRangeIndex.configure(prefix='month', transform=lambda value: value[5:7], handle_uniqueness=False, name='MonthIndex'),
NumberRangeIndex.configure(prefix='day', transform=lambda value: value[8:10], handle_uniqueness=False, name='DayIndex'),
], name='DateIndexParts')
# A simple TextRangeIndex to filter on a date in the format YYYY-MM-SS
DateRangeIndex = TextRangeIndex.configure(key='date', transform=lambda value: value[:10], name='DateRangeIndex')
# A full usable index for fields holding dates (without time)
DateIndex = MultiIndexes.compose([DateRangeIndex, DateIndexParts], name='DateIndex')
# This is a multi-indexes managing the different parts of a tine in the format HH:MM:SS
TimeIndexParts = MultiIndexes.compose([
NumberRangeIndex.configure(prefix='hour', transform=lambda value: value[0:2], handle_uniqueness=False, name='HourIndex'),
NumberRangeIndex.configure(prefix='minute', transform=lambda value: value[3:5], handle_uniqueness=False, name='MinuteIndex'),
NumberRangeIndex.configure(prefix='second', transform=lambda value: value[6:8], handle_uniqueness=False, name='SecondIndex'),
], name='TimeIndexParts')
# A simple TextRangeIndex to filter on a date in the format HH:MM:SS
TimeRangeIndex = TextRangeIndex.configure(key='time', transform=lambda value: value[:8], name='TimeRangeIndex')
# A full usable index for fields holding times (without date)
TimeIndex = MultiIndexes.compose([TimeRangeIndex, TimeIndexParts], name='TimeIndex')
# A full usable index for fields holding dates+times, without filtering on hour/min/sec
# but only full field, full date and full time, and year, month, day
DateSimpleTimeIndex = MultiIndexes.compose([
TextRangeIndex.configure(key='full', name='FullDateTimeRangeIndex'),
DateRangeIndex.configure(prefix='date'),
DateIndexParts,
TimeRangeIndex.configure(prefix='time', transform=lambda value: value[11:]) # pass only time
], name='DateSimpleTimeIndex', transform=lambda value: value[:19])
# A full usable index for fields holding dates+times, with full filtering capabilities
DateTimeIndex = MultiIndexes.compose([
DateSimpleTimeIndex,
TimeIndexParts.configure(transform=lambda value: value[11:]),
], name='DateTimeIndex')
# And a simple datetime index without parts
SimpleDateTimeIndex = MultiIndexes.compose([
TextRangeIndex.configure(key='full', name='FullDateTimeRangeIndex'),
DateRangeIndex.configure(prefix='date'),
TimeRangeIndex.configure(prefix='time', transform=lambda value: value[11:]) # pass only time
], name='SimpleDateTimeIndex', transform=lambda value: value[:19])
class _BaseRelatedIndex(BaseIndex):
"""Index attached to another index on another field
This index does not handle data on its own for its field: when data is added/removed, it will
ask the tied index to update its data.
Configurable attributes
-----------------------
These are class attributes that can be changed via ``configure``:
related_field : RedisField
The field on the model that define the related index
related_index_class : Type[BaseIndex]
The index class defined for the indexed field
"""
related_field = None
related_index_class = None
configurable_attrs = BaseIndex.configurable_attrs | {'related_field', 'related_index_class'}
def __init__(self, field):
"""Tie ``related_field`` from the class to the right instance."""
super(_BaseRelatedIndex, self).__init__(field)
self.related_field = field._model.get_field(self.related_field.name)
@cached_property
def related_index(self):
"""Get (and cache) the related index to use
Returns
-------
_BaseRelatedIndex
The index instance tied to the related field.
"""
return self.related_field.get_index(
index_class=self.related_index_class,
key=self.related_index_class.key,
prefix=self.related_index_class.prefix,
)
@classmethod
def handle_configurable_attrs(cls, related_index_class, related_field, **kwargs):
"""Handle attributes that can be passed to ``configure``.
This method handle the ``related_field`` and `'related_index_class`` attribute added in this
index class.
Parameters
----------
related_field : RedisField
The field on the model that define the related index
related_index_class : Type[BaseIndex]
The related index defined for the indexed field
For the other parameters, see ``BaseIndex.handle_configurable_attrs``.
"""
name, attrs, kwargs = super(_BaseRelatedIndex, cls).handle_configurable_attrs(**kwargs)
attrs['related_field'] = related_field
attrs['related_index_class'] = related_index_class
return name, attrs, kwargs
class _ScoredEqualIndex_RelatedIndex(_BaseRelatedIndex):
"""Index attached to the "score field" of ``ScoredEqualIndex``
This index does not handle data on its own for its field: when data is added/removed, it will
if ask the tied ``ScoredEqualIndex`` to update the score of the indexed value of the related
field.
"""
def add(self, pk, *args, **kwargs):
"""Do not save anything but ask the related index to update the score of its saved value"""
self.related_index.score_updated(pk, float(args[-1]) if args[-1] is not None else None)
self._get_rollback_cache(pk)['indexed_values'].add(tuple(args))
def remove(self, pk, *args, **kwargs):
"""Do not remove anything but ask the related index to deindex its saved value"""
self.related_index.score_updated(pk, None)
self._get_rollback_cache(pk)['deindexed_values'].add(tuple(args))
class ScoredEqualIndex(EqualIndex):
"""Index acting like an EqualIndex but indexing values with a score from another field
It allows filtering on a value and getting results automatically sorted by the related field
in a single redis call.
Notes
-----
- The scored field must be a subclass of ``SingleValueField``
- The model must use ``ExtendedCollectionManager``
- If the related field has no value, the instance will not be present in the index.
| |
<reponame>tvorogme/dataops
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import status_params
import utils
import ambari_simplejson as json
import pwd
from resource_management.libraries.functions import format
from resource_management.libraries.functions.default import default
from resource_management.libraries.functions.expect import expect
from resource_management.libraries.functions import get_klist_path
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.format_jvm_option import format_jvm_option
from resource_management.libraries.functions.hdfs_utils import is_https_enabled_in_hdfs
from resource_management.libraries.functions import is_empty
from resource_management.libraries.functions import namenode_ha_utils
from resource_management.libraries.functions.namenode_ha_utils import get_properties_for_all_nameservices, \
namenode_federation_enabled
import sys, os
script_path = os.path.realpath(__file__).split(
'/services')[0] + '/../../../stack-hooks/before-INSTALL/scripts/ranger'
sys.path.append(script_path)
from setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config
config = Script.get_config()
tmp_dir = Script.get_tmp_dir()
stack_root = Script.get_stack_root()
import os
import multiprocessing
cpu_count = multiprocessing.cpu_count()
mem_bytes = os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')
mem_gib = int(mem_bytes / (1024 ** 3))
men_mib = int(mem_bytes / (1024 ** 2))
# hadoop_heapsize = config['configurations']['hadoop-env']['hadoop_heapsize']
# namenode_heapsize = config['configurations']['hadoop-env']['namenode_heapsize']
# dtnode_heapsize = config['configurations']['hadoop-env']['dtnode_heapsize']
dtnode_heapsize = max(20480, int(abs(men_mib * 0.2)))
namenode_heapsize = max(204800, int(abs(men_mib * 0.8)))
hadoop_heapsize = max(204800, int(abs(men_mib * 0.4)))
with open('/proc/mounts', 'r') as f:
namenode_mounts = [
line.split()[1] + '/hdfs/namenode' for line in f.readlines()
if line.split()[0].startswith('/dev')
and line.split()[1] not in ['/boot', '/var/log', '/']
]
with open('/proc/mounts', 'r') as f:
checkpoint_mounts = [
line.split()[1] + '/hdfs/checkpoint' for line in f.readlines()
if line.split()[0].startswith('/dev')
and line.split()[1] not in ['/boot', '/var/log', '/']
]
def detect_ssd(dev):
result = '[DISK]'
rotational_file = '/sys/block/' + dev.split('/')[2][0:-1] + '/queue/rotational'
if os.path.exists(rotational_file):
with open(rotational_file) as f:
if f.read() == 0:
result = '[SSD]'
return result
with open('/proc/mounts', 'r') as f:
datanode_mounts = [
detect_ssd(line.split()[0]) + line.split()[1] + '/hdfs/data' for line in f.readlines()
if line.split()[0].startswith('/dev')
and line.split()[1] not in ['/boot', '/var/log', '/']
]
dfs_name_dir = ','.join(namenode_mounts)
dfs_checkpoint_dirs = ','.join(checkpoint_mounts)
fs_checkpoint_dirs = checkpoint_mounts
ramdisk = '/dev/shm'
dfs_data_dirs = ','.join(datanode_mounts) + ',[RAM_DISK]' + ramdisk + '/hdfs/data'
jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']
datanode_max_locked_memory = config['configurations']['hdfs-site']['dfs.datanode.max.locked.memory']
install_dir = stack_root + '/hadoop'
download_url = config['configurations']['hadoop-env']['download_url']
filename = download_url.split('/')[-1]
version_dir = filename.replace('.tar.gz', '').replace('.tgz', '')
stack_name = status_params.stack_name
stack_root = Script.get_stack_root()
upgrade_direction = default("/commandParams/upgrade_direction", None)
rolling_restart = default("/commandParams/rolling_restart", False)
rolling_restart_safemode_exit_timeout = default(
"/configurations/cluster-env/namenode_rolling_restart_safemode_exit_timeout",
None)
agent_stack_retry_on_unavailability = config['ambariLevelParams'][
'agent_stack_retry_on_unavailability']
agent_stack_retry_count = expect("/ambariLevelParams/agent_stack_retry_count",
int)
# there is a stack upgrade which has not yet been finalized; it's currently suspended
upgrade_suspended = default("roleParams/upgrade_suspended", False)
# New Cluster Stack Version that is defined during the RESTART of a Stack Upgrade
version = default("/commandParams/version", None)
# The desired role is only available during a Non-Rolling Upgrade in HA.
# The server calculates which of the two NameNodes will be the active, and the other the standby since they
# are started using different commands.
desired_namenode_role = default("/commandParams/desired_namenode_role", None)
stack_supports_ranger_kerberos = True
stack_supports_ranger_audit_db = False
stack_supports_zk_security = True
security_enabled = config['configurations']['cluster-env']['security_enabled']
hdfs_user = status_params.hdfs_user
root_user = "root"
current_user = pwd.getpwuid(os.getuid()).pw_name
hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
namenode_pid_file = status_params.namenode_pid_file
zkfc_pid_file = status_params.zkfc_pid_file
# Some datanode settings
dfs_dn_addr = default('/configurations/hdfs-site/dfs.datanode.address', None)
dfs_dn_http_addr = default(
'/configurations/hdfs-site/dfs.datanode.http.address', None)
dfs_dn_https_addr = default(
'/configurations/hdfs-site/dfs.datanode.https.address', None)
dfs_http_policy = default('/configurations/hdfs-site/dfs.http.policy', None)
dfs_dn_ipc_address = config['configurations']['hdfs-site'][
'dfs.datanode.ipc.address']
secure_dn_ports_are_in_use = False
hdfs_tmp_dir = default("/configurations/hadoop-env/hdfs_tmp_dir", "/tmp")
namenode_backup_dir = default("/configurations/hadoop-env/namenode_backup_dir",
"/tmp/upgrades")
# hadoop default parameters
mapreduce_libs_path = install_dir + "/share/hadoop/mapreduce/*"
hadoop_libexec_dir = install_dir + "/libexec/"
hadoop_bin = install_dir + "/sbin/"
hadoop_bin_dir = install_dir + "/bin/"
hadoop_home = install_dir
hadoop_secure_dn_user = hdfs_user
hadoop_conf_dir = '/etc/hadoop'
hadoop_conf_secure_dir = os.path.join(hadoop_conf_dir, "secure")
hadoop_lib_home = install_dir + '/lib/'
# hadoop parameters for stacks that support rolling_upgrade
if not security_enabled:
hadoop_secure_dn_user = '""'
else:
dfs_dn_port = utils.get_port(dfs_dn_addr)
dfs_dn_http_port = utils.get_port(dfs_dn_http_addr)
dfs_dn_https_port = utils.get_port(dfs_dn_https_addr)
# We try to avoid inability to start datanode as a plain user due to usage of root-owned ports
if dfs_http_policy == "HTTPS_ONLY":
secure_dn_ports_are_in_use = utils.is_secure_port(
dfs_dn_port) or utils.is_secure_port(dfs_dn_https_port)
elif dfs_http_policy == "HTTP_AND_HTTPS":
secure_dn_ports_are_in_use = utils.is_secure_port(
dfs_dn_port) or utils.is_secure_port(
dfs_dn_http_port) or utils.is_secure_port(dfs_dn_https_port)
else: # params.dfs_http_policy == "HTTP_ONLY" or not defined:
secure_dn_ports_are_in_use = utils.is_secure_port(
dfs_dn_port) or utils.is_secure_port(dfs_dn_http_port)
if secure_dn_ports_are_in_use:
hadoop_secure_dn_user = hdfs_user
else:
hadoop_secure_dn_user = '""'
hadoop_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
datanode_secure_pid_file = format(
"{hadoop_pid_dir}/hadoop-{hdfs_user}-{root_user}-datanode.pid")
datanode_unsecure_pid_file = format(
"{hadoop_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
possible_datanode_pid_files = [
datanode_secure_pid_file, datanode_unsecure_pid_file
]
if secure_dn_ports_are_in_use:
datanode_pid_file = datanode_secure_pid_file
else:
datanode_pid_file = datanode_unsecure_pid_file
ambari_libs_dir = "/var/lib/ambari-agent/lib"
limits_conf_dir = "/etc/security/limits.d"
hdfs_user_nofile_limit = default(
"/configurations/hadoop-env/hdfs_user_nofile_limit", "1048576")
hdfs_user_nproc_limit = default(
"/configurations/hadoop-env/hdfs_user_nproc_limit", "65536")
jsvc_path = '/usr/bin/jsvc'
execute_path = os.environ['PATH'] + os.pathsep + hadoop_bin_dir
ulimit_cmd = "ulimit -l 8589934592000 ;"
# security params
smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab']
hdfs_user_keytab = config['configurations']['hadoop-env']['hdfs_user_keytab']
falcon_user = config['configurations']['falcon-env']['falcon_user']
# exclude file
if 'all_decommissioned_hosts' in config['commandParams']:
hdfs_exclude_file = config['commandParams'][
'all_decommissioned_hosts'].split(",")
else:
hdfs_exclude_file = []
exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
slave_hosts = default("/clusterHostInfo/datanode_hosts", [])
include_file_path = default("/configurations/hdfs-site/dfs.hosts", None)
hdfs_include_file = None
manage_include_files = default(
"/configurations/hdfs-site/manage.include.files", False)
if include_file_path and manage_include_files:
hdfs_include_file = slave_hosts
update_files_only = default("/commandParams/update_files_only", False)
command_phase = default("/commandParams/phase", "")
klist_path_local = get_klist_path(
default('/configurations/kerberos-env/executable_search_paths', None))
kinit_path_local = get_kinit_path(
default('/configurations/kerberos-env/executable_search_paths', None))
# hosts
hostname = config['agentLevelParams']['hostname']
rm_host = default("/clusterHostInfo/resourcemanager_hosts", [])
slave_hosts = default("/clusterHostInfo/datanode_hosts", [])
oozie_servers = default("/clusterHostInfo/oozie_server_hosts", [])
hcat_server_hosts = default("/clusterHostInfo/webhcat_server_hosts", [])
hive_server_host = default("/clusterHostInfo/hive_server_hosts", [])
hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
hs_host = default("/clusterHostInfo/historyserver_hosts", [])
jtnode_host = default("/clusterHostInfo/jtnode_hosts", [])
namenode_host = default("/clusterHostInfo/namenode_hosts", [])
nm_host = default("/clusterHostInfo/nodemanager_hosts", [])
ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", [])
journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
has_ganglia_server = not len(ganglia_server_hosts) == 0
has_namenodes = not len(namenode_host) == 0
has_jobtracker = not len(jtnode_host) == 0
has_resourcemanager = not len(rm_host) == 0
has_histroryserver = not len(hs_host) == 0
has_hbase_masters = not len(hbase_master_hosts) == 0
has_slaves = not len(slave_hosts) == 0
has_oozie_server = not len(oozie_servers) == 0
has_hcat_server_host = not len(hcat_server_hosts) == 0
has_hive_server_host = not len(hive_server_host) == 0
has_journalnode_hosts = not len(journalnode_hosts) == 0
has_zkfc_hosts = not len(zkfc_hosts) == 0
has_falcon_host = not len(falcon_host) == 0
is_namenode_master = hostname in namenode_host
is_jtnode_master = hostname in jtnode_host
is_rmnode_master = hostname in rm_host
is_hsnode_master = hostname in hs_host
is_hbase_master = hostname in hbase_master_hosts
is_slave = hostname in slave_hosts
if has_ganglia_server:
ganglia_server_host = ganglia_server_hosts[0]
# users and groups
yarn_user = config['configurations']['yarn-env']['yarn_user']
hbase_user = config['configurations']['hbase-env']['hbase_user']
oozie_user = config['configurations']['oozie-env']['oozie_user']
webhcat_user = config['configurations']['hive-env']['webhcat_user']
hive_user = config['configurations']['hive-env']['hive_user']
smoke_user = config['configurations']['cluster-env']['smokeuser']
smokeuser_principal = config['configurations']['cluster-env'][
'smokeuser_principal_name']
mapred_user = config['configurations']['mapred-env']['mapred_user']
hdfs_principal_name = default('/configurations/hadoop-env/hdfs_principal_name',
None)
user_group = config['configurations']['cluster-env']['user_group']
root_group = "root"
proxyuser_group = config['configurations']['hadoop-env']['proxyuser_group']
# hadoop params
hdfs_log_dir_prefix = config['configurations']['hadoop-env'][
'hdfs_log_dir_prefix']
hadoop_root_logger = config['configurations']['hadoop-env'][
'hadoop_root_logger']
nfs_file_dump_dir = config['configurations']['hdfs-site']['nfs.file.dump.dir']
dfs_domain_socket_path = config['configurations']['hdfs-site'][
'dfs.domain.socket.path']
dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
hdfs_site = config['configurations']['hdfs-site']
if namenode_federation_enabled(hdfs_site):
jn_edits_dirs = get_properties_for_all_nameservices(
hdfs_site, 'dfs.journalnode.edits.dir').values()
else:
jn_edits_dirs = [jn_edits_dir]
hdfs_log_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
namenode_dirs_created_stub_dir = hdfs_log_dir
namenode_dirs_stub_filename = "namenode_dirs_created"
smoke_hdfs_user_dir = format("/user/{smoke_user}")
smoke_hdfs_user_mode = 0770
hdfs_service_check_test_file = format('{tmp_dir}/hdfs-service-check')
hdfs_namenode_format_disabled = default(
"/configurations/cluster-env/hdfs_namenode_format_disabled", False)
hdfs_namenode_formatted_mark_suffix = "/namenode-formatted/"
hdfs_namenode_bootstrapped_mark_suffix = "/namenode-bootstrapped/"
namenode_formatted_old_mark_dirs = [
"/var/run/hadoop/hdfs/namenode-formatted",
format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted"),
"/var/lib/hdfs/namenode/formatted"
]
dfs_name_dirs = dfs_name_dir.split(",")
namenode_formatted_mark_dirs = []
namenode_bootstrapped_mark_dirs = []
for dn_dir in dfs_name_dirs:
tmp_format_mark_dir = format(
"{dn_dir}{hdfs_namenode_formatted_mark_suffix}")
tmp_bootstrap_mark_dir = format(
"{dn_dir}{hdfs_namenode_bootstrapped_mark_suffix}")
namenode_formatted_mark_dirs.append(tmp_format_mark_dir)
namenode_bootstrapped_mark_dirs.append(tmp_bootstrap_mark_dir)
# Use the namenode RPC address if configured, otherwise, fallback to the default file system
namenode_address = None
if 'dfs.namenode.rpc-address' in config['configurations']['hdfs-site']:
namenode_rpcaddress = config['configurations']['hdfs-site'][
'dfs.namenode.rpc-address']
namenode_address = format("hdfs://{namenode_rpcaddress}")
else:
namenode_address = config['configurations']['core-site']['fs.defaultFS']
data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist"
# HDFS High Availability properties
dfs_ha_enabled = False
dfs_ha_nameservices = default(
'/configurations/hdfs-site/dfs.internal.nameservices', None)
if dfs_ha_nameservices is None:
dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.nameservices',
None)
dfs_ha_namenode_ids_all_ns = get_properties_for_all_nameservices(
hdfs_site, 'dfs.ha.namenodes')
dfs_ha_automatic_failover_enabled = default(
"/configurations/hdfs-site/dfs.ha.automatic-failover.enabled", False)
# hostnames of the active HDFS HA Namenodes (only used when HA is enabled)
if command_phase == "INITIAL_START":
dfs_ha_namenode_active = namenode_ha_utils.get_initial_active_namenodes(
default("/configurations/cluster-env", {}))
dfs_ha_initial_cluster_id = default(
'/configurations/cluster-env/dfs_ha_initial_cluster_id', None)
# temporary backward compatibility for CI
if not dfs_ha_namenode_active:
dfs_ha_namenode_active = namenode_ha_utils.get_initial_active_namenodes(
default("/configurations/hadoop-env", {}))
dfs_ha_initial_cluster_id = default(
'/configurations/hadoop-env/dfs_ha_initial_cluster_id', None)
else:
dfs_ha_namenode_active = frozenset()
dfs_ha_initial_cluster_id = 'xiaomatech'
mount_table_xml_inclusion_file_full_path = None
mount_table_content = None
if 'viewfs-mount-table' in config['configurations']:
xml_inclusion_file_name = 'viewfs-mount-table.xml'
mount_table = config['configurations']['viewfs-mount-table']
if 'content' in mount_table and mount_table['content'].strip():
mount_table_xml_inclusion_file_full_path = os.path.join(
hadoop_conf_dir, xml_inclusion_file_name)
mount_table_content = mount_table['content']
ha_zookeeper_quorum = config['configurations']['core-site'][
'ha.zookeeper.quorum']
jaas_file = os.path.join(hadoop_conf_secure_dir, 'hdfs_jaas.conf')
zk_namespace = default('/configurations/hdfs-site/ha.zookeeper.parent-znode',
'/hadoop-ha')
# Values for the current Host
namenode_id = None
namenode_rpc = None
dfs_ha_namemodes_ids_list = []
other_namenode_id = None
for ns, dfs_ha_namenode_ids in dfs_ha_namenode_ids_all_ns.iteritems():
found = False
if not is_empty(dfs_ha_namenode_ids):
dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
if dfs_ha_namenode_ids_array_len > 1:
dfs_ha_enabled = True
if dfs_ha_enabled:
for nn_id in dfs_ha_namemodes_ids_list:
nn_host = config['configurations']['hdfs-site'][format(
'dfs.namenode.rpc-address.{ns}.{nn_id}')]
if hostname in nn_host:
namenode_id = nn_id
namenode_rpc = nn_host
found = True
# With HA enabled namenode_address is recomputed
namenode_address = format('hdfs://{ns}')
# Calculate the namenode id of the other namenode. This is needed during RU to initiate an HA failover using ZKFC.
if namenode_id is not None and len(dfs_ha_namemodes_ids_list) == 2:
other_namenode_id = list(
set(dfs_ha_namemodes_ids_list) - set([namenode_id]))[0]
if found:
break
if dfs_http_policy is not None and dfs_http_policy.upper() == "HTTPS_ONLY":
https_only = True
journalnode_address = default(
'/configurations/hdfs-site/dfs.journalnode.https-address', None)
else:
https_only = False
journalnode_address = default(
'/configurations/hdfs-site/dfs.journalnode.http-address', None)
if journalnode_address:
journalnode_port = journalnode_address.split(":")[1]
if security_enabled:
dn_principal_name = config['configurations']['hdfs-site'][
'dfs.datanode.kerberos.principal']
dn_keytab = config['configurations']['hdfs-site'][
'dfs.datanode.keytab.file']
dn_principal_name = dn_principal_name.replace('_HOST', hostname.lower())
dn_kinit_cmd = format(
"{kinit_path_local} -kt {dn_keytab} {dn_principal_name};")
nn_principal_name = config['configurations']['hdfs-site'][
'dfs.namenode.kerberos.principal']
nn_keytab = config['configurations']['hdfs-site'][
'dfs.namenode.keytab.file']
nn_principal_name = nn_principal_name.replace('_HOST', hostname.lower())
nn_kinit_cmd = format(
"{kinit_path_local} -kt {nn_keytab} {nn_principal_name};")
jn_principal_name = default(
"/configurations/hdfs-site/dfs.journalnode.kerberos.principal", None)
if jn_principal_name:
jn_principal_name = jn_principal_name.replace('_HOST',
hostname.lower())
jn_keytab = default(
"/configurations/hdfs-site/dfs.journalnode.keytab.file", None)
hdfs_kinit_cmd = format(
"{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name};")
else:
dn_kinit_cmd = ""
nn_kinit_cmd | |
<filename>tf_verify/optimizer.py
'''
@author: <NAME>
'''
from deepzono_nodes import *
from deeppoly_nodes import *
from functools import reduce
import numpy as np
operations_for_neuron_count = ["Relu", "Sigmoid", "Tanh", "MaxPool"]
class Optimizer:
def __init__(self, operations, resources):
"""
Arguments
---------
operations : list
list of dicts, each dict contains a mapping from a domain (like deepzono, refinezono or deeppoly) to a tuple with resources (like matrices, biases ...)
resources : list
list of str, each one being a type of operation (like "MatMul", "Conv2D", "Add" ...)
"""
self.operations = operations
self.resources = resources
def get_neuron_count(self):
total_neurons = 0
for op, res in zip(self.operations, self.resources):
if op in operations_for_neuron_count:
total_neurons += np.prod(res['deepzono'][-1])
return total_neurons
def get_deepzono(self, nn, specLB, specUB = None):
"""
This function will go through self.operations and self.resources and creates a list of Deepzono-Nodes which then can be run by an Analyzer object.
It is assumed that self.resources[i]['deepzono'] holds the resources for the operation of type self.operations[i]
Arguments
---------
specLB : numpy.ndarray
1D array with the lower bound of the input spec
specUB : numpy.ndarray
1D array with the upper bound of the input spec
Return
------
execute_list : list
list of Deepzono-Nodes that can be run by an Analyzer object
"""
execute_list = []
output_info = []
domain = 'deepzono'
nbr_op = len(self.operations)
i = 0
while i < nbr_op:
if self.operations[i] == "Placeholder":
input_names, output_name, output_shape = self.resources[i][domain]
if specUB is None:
execute_list.append(DeepzonoInputZonotope(specLB, input_names, output_name, output_shape))
else:
execute_list.append(DeepzonoInput(specLB, specUB, input_names, output_name, output_shape))
i += 1
elif self.operations[i] == "MatMul":
if i != nbr_op-1 and self.operations[i+1] in ["Add", "BiasAdd"]:
matrix, m_input_names, _, _ = self.resources[i][domain]
bias, _, output_name, b_output_shape = self.resources[i+1][domain]
nn.weights.append(matrix)
nn.biases.append(bias)
nn.layertypes.append('Affine')
nn.numlayer+= 1
execute_list.append(DeepzonoAffine(matrix, bias, m_input_names, output_name, b_output_shape))
i += 2
else:
#self.resources[i][domain].append(refine)
execute_list.append(DeepzonoMatmul(*self.resources[i][domain]))
i += 1
elif self.operations[i] == "Gemm":
matrix, bias, m_input_names, b_output_name, b_output_shape = self.resources[i][domain]
nn.weights.append(matrix)
nn.biases.append(bias)
nn.layertypes.append('Affine')
nn.numlayer+= 1
execute_list.append(DeepzonoAffine(matrix, bias, m_input_names, b_output_name, b_output_shape))
i += 1
elif self.operations[i] == "Conv2D":
if i != nbr_op-1 and self.operations[i+1] == "BiasAdd":
filters, image_shape, strides, pad_top, pad_left, c_input_names, _, _ = self.resources[i][domain]
bias, _, b_output_name, b_output_shape = self.resources[i+1][domain]
nn.numfilters.append(filters.shape[3])
nn.filter_size.append([filters.shape[0], filters.shape[1]])
nn.input_shape.append([image_shape[0],image_shape[1],image_shape[2]])
nn.strides.append([strides[0],strides[1]])
nn.padding.append([pad_top, pad_left])
nn.out_shapes.append(b_output_shape)
nn.filters.append(filters)
nn.biases.append(bias)
execute_list.append(DeepzonoConvbias(image_shape, filters, bias, strides, pad_top, pad_left, c_input_names, b_output_name, b_output_shape))
i += 2
else:
filters, image_shape, strides, pad_top, pad_left, input_names, output_name, output_shape = self.resources[i][domain]
nn.numfilters.append(filters.shape[3])
nn.filter_size.append([filters.shape[0], filters.shape[1]])
nn.input_shape.append([image_shape[0],image_shape[1],image_shape[2]])
nn.strides.append([strides[0],strides[1]])
nn.padding.append([pad_top, pad_left])
nn.out_shapes.append(output_shape)
nn.filters.append(filters)
execute_list.append(DeepzonoConv(image_shape, filters, strides, pad_top, pad_left, input_names, output_name, output_shape))
i += 1
nn.layertypes.append('Conv2DNoReLU')
nn.numlayer+=1
elif self.operations[i] == "Conv":
filters, bias, image_shape, strides, pad_top, pad_left, c_input_names, output_name, b_output_shape = self.resources[i][domain]
nn.numfilters.append(filters.shape[3])
nn.filter_size.append([filters.shape[0], filters.shape[1]])
nn.input_shape.append([image_shape[0],image_shape[1],image_shape[2]])
nn.strides.append([strides[0],strides[1]])
nn.out_shapes.append(b_output_shape)
nn.padding.append([pad_top, pad_left])
nn.filters.append(filters)
nn.biases.append(bias)
nn.layertypes.append('Conv2DNoReLU')
nn.numlayer+=1
execute_list.append(DeepzonoConvbias(image_shape, filters, bias, strides, pad_top, pad_left, c_input_names, output_name, b_output_shape))
i += 1
elif self.operations[i] == "Add":
#self.resources[i][domain].append(refine)
execute_list.append(DeepzonoAdd(*self.resources[i][domain]))
nn.layertypes.append('Add')
nn.numlayer += 1
i += 1
elif self.operations[i] == "Sub":
#self.resources[i][domain].append(refine)
execute_list.append(DeepzonoSub(*self.resources[i][domain]))
nn.layertypes.append('Sub')
nn.numlayer += 1
i += 1
elif self.operations[i] == "Mul":
#self.resources[i][domain].append(refine)
execute_list.append(DeepzonoMul(*self.resources[i][domain]))
nn.layertypes.append('Mul')
nn.numlayer += 1
i += 1
elif self.operations[i] == "MaxPool" or self.operations[i] == "AveragePool" or self.operations[i] == "AvgPool":
image_shape, window_size, strides, pad_top, pad_left, input_names, output_name, output_shape = self.resources[i][domain]
nn.pool_size.append(window_size)
nn.input_shape.append([image_shape[0],image_shape[1],image_shape[2]])
nn.strides.append([strides[0],strides[1]])
nn.out_shapes.append(output_shape)
nn.padding.append([pad_top, pad_left])
nn.layertypes.append('MaxPooling2D')
nn.numlayer+=1
is_maxpool = (self.operations[i]=="MaxPool")
execute_list.append(DeepzonoPool(image_shape, window_size, strides, pad_top, pad_left, input_names, output_name, output_shape, is_maxpool))
i += 1
elif self.operations[i] == "Resadd":
#self.resources[i][domain].append(refine)
execute_list.append(DeepzonoResadd(*self.resources[i][domain]))
nn.layertypes.append('Resaddnorelu')
nn.numlayer += 1
i += 1
elif self.operations[i] == "Relu":
#self.resources[i][domain].append(refine)
if nn.layertypes[-1]=='Affine':
nn.layertypes[-1] = 'ReLU'
if nn.layertypes[-1][-6:].lower() == 'norelu':
nn.layertypes[-1] = nn.layertypes[-1][:-6]
execute_list.append(DeepzonoRelu(*self.resources[i][domain]))
i += 1
elif self.operations[i] == "Sigmoid":
execute_list.append(DeepzonoSigmoid(*self.resources[i][domain]))
nn.layertypes.append('Sigmoid')
nn.numlayer += 1
i += 1
elif self.operations[i] == "Tanh":
execute_list.append(DeepzonoTanh(*self.resources[i][domain]))
nn.layertypes.append('Tanh')
nn.numlayer += 1
i += 1
elif self.operations[i] == "Gather":
image_shape, indexes, axis, input_names, output_name, output_shape = self.resources[i][domain]
calculated_indexes = self.get_gather_indexes(image_shape, indexes, axis)
execute_list.append(DeepzonoGather(calculated_indexes, input_names, output_name, output_shape))
nn.layertypes.append('Gather')
nn.numlayer += 1
i += 1
elif self.operations[i] == "Reshape":
execute_list.append(DeepzonoGather(*self.resources[i][domain]))
nn.layertypes.append('Gather')
nn.numlayer += 1
i += 1
else:
assert 0, "the optimizer for Deepzono doesn't know of the operation type " + self.operations[i]
# for testing, getting the corresponding layer in the tensorflow or onnx model
output_info.append(self.resources[i-1][domain][-2:])
use_dict = self.deepzono_get_dict(execute_list)
self.set_predecessors(nn, execute_list)
execute_list = self.deepzono_forward_pass(execute_list, use_dict)
return execute_list, output_info
def deepzono_get_dict(self, ir_list):
"""
Returns a dict mapping output-names to the number of times that output is used by the nodes in the ir_list.
This functions is a helper function for organizing the sections of an abstract elements when we have a ResNet or later an RNN.
Arguments
---------
ir_list : iterable
list of Deepzono-Nodes
Return
------
use_dict : dict
mapping from a name to the number of times that node-output is used
"""
use_dict = {}
for node in ir_list:
for input_name in node.input_names:
use_dict[input_name] += 1
use_dict[node.output_name] = 0
return use_dict
def deepzono_forward_pass(self, ir_list, use_dict):
"""
This function plans which Deepzono-Node-output occupies which section of an abstract element. If a DeepzonoDuplicate-Node should be needed, then this function will add it.
This is needed when we have a ResNet or later RNNs.
Arguments
---------
ir_list : list
list of Nodes, where each node has the fields output_length, input_names, and output_name (see DeepzonoNodes.py for examples)
use_dict : dict
maps the output_name of each node in ir_list to the number of times the node's output will be used
Return
------
ir_list : list
the ir_list with updated and potentionally added nodes
"""
def get_index(active_abstracts, in_name, index_store):
index = 0
while True:
index = index + active_abstracts[index:].index(in_name)
if not index in index_store:
break
index += 1
return index
active_abstracts = []
abstract_length = []
i = 0
while i < len(ir_list):
node = ir_list[i]
index_store = []
node.abstract_information = []
for in_name in node.input_names:
index = get_index(active_abstracts, in_name, index_store)
length = abstract_length[index]
offset = reduce(lambda x,y: x+y, abstract_length[:index], 0)
node.abstract_information += [offset, length]
index_store.append(index)
if len(index_store) != 0:
active_abstracts[index_store[0]] = node.output_name
abstract_length[index_store[0]] = node.output_length
for j in range(1,len(index_store)):
index = index_store[j]
del active_abstracts[index]
del abstract_length[index]
else:
active_abstracts.append(node.output_name)
abstract_length.append(node.output_length)
node.abstract_information = [0, node.output_length]
i += 1
if use_dict[node.output_name] > 1:
for j in range(1, use_dict[node.output_name]):
ir_list.insert(i, DeepzonoDuplicate(node.abstract_information[0], node.output_length))
active_abstracts.append(node.output_name)
abstract_length.append(node.output_length)
i += 1
return ir_list
def get_deeppoly(self, nn, specLB, specUB, lexpr_weights, lexpr_cst, lexpr_dim, uexpr_weights, uexpr_cst, uexpr_dim, expr_size):
"""
This function will go through self.operations and self.resources and create a list of Deeppoly-Nodes which then can be run by an Analyzer object.
It is assumed that self.resources[i]['deeppoly'] holds the resources for an operation of type self.operations[i].
self.operations should only contain a combination of the following 4 basic sequences:
- Placholder (only at the beginning)
- MatMul -> Add -> Relu
- Conv2D -> Add -> Relu (not as last layer)
- MaxPool/AveragePool (only as intermediate layer)
Arguments
---------
specLB : numpy.ndarray
1D array with the lower bound of the input spec
specUB : numpy.ndarray
1D array with the upper bound of the input spec
Return
------
execute_list : list
list of Deeppoly-Nodes that can be run by an Analyzer object
"""
execute_list = []
output_info = []
domain = 'deeppoly'
i = 0
while i < len(self.operations):
#print(self.operations[i])
if self.operations[i] == "Placeholder":
input_names, output_name, output_shape = self.resources[i][domain]
execute_list.append(DeeppolyInput(specLB, specUB, input_names, output_name, output_shape,
lexpr_weights, lexpr_cst, lexpr_dim, uexpr_weights, uexpr_cst, uexpr_dim, expr_size))
i += 1
# Tensorflow operation
elif self.operations[i] == "MatMul" and self.operations[i+1] in ["Add", "BiasAdd"]:
matrix, input_names, _,_ = self.resources[i][domain]
bias,_, output_name, output_shape = self.resources[i+1][domain]
if i != len(self.operations) - 2 and self.operations[i + 2] in ["Relu", "Sigmoid", "Tanh"]:
_,output_name, output_shape = self.resources[i+2][domain]
nn.weights.append(matrix)
nn.biases.append(bias)
nn.layertypes.append('Affine')
nn.numlayer+= 1
if i == 1:
if (self.operations[i + 2] == "Relu"):
nn.layertypes[-1] = 'ReLU'
execute_list.append(DeeppolyReluNodeFirst(matrix, bias, input_names, output_name, output_shape))
elif (self.operations[i + 2] == "Sigmoid"):
execute_list.append(DeeppolySigmoidNodeFirst(matrix, bias, input_names, output_name, output_shape))
elif (self.operations[i + 2] == "Tanh"):
execute_list.append(DeeppolyTanhNodeFirst(matrix, bias, input_names, output_name, output_shape))
else:
assert 0
i += 3
elif i == len(self.operations) - 2:
execute_list.append(DeeppolyReluNodeLast(matrix, bias, False, input_names, output_name, output_shape))
i += 2
elif i == len(self.operations) - 3:
if(self.operations[i+2] == "Relu"):
nn.layertypes[-1] = 'ReLU'
execute_list.append(DeeppolyReluNodeLast(matrix, bias, True, input_names, output_name, output_shape))
elif(self.operations[i+2] == "Sigmoid"):
execute_list.append(DeeppolySigmoidNodeLast(matrix, bias, True, input_names, output_name, output_shape))
elif(self.operations[i+2] == | |
""" Renames the instance.
Args:
new_name: the new name of the instance
"""
if not self.writability:
raise FSError("current file is not writable, check the permission")
if os.path.exists(os.path.join(os.path.dirname(self._path), new_name)):
raise FSError("file exists cannot use same name.")
new_name = os.path.join(os.path.dirname(self._path), new_name)
os.rename(self._path, new_name)
self._path = new_name
class phile(_BaseFileSystem):
"""phile class inherits from _BaseFileSytem, please check with
BaseFileSystem class for more information about the methods and properties
that can be used.
This class defines the base file object, can be inherited and expanded by
other file based classes as well.
"""
def __init__(self, in_path = None):
super(phile, self).__init__(in_path)
self.__size_md5 = 4096
def is_lnk(self):
"""Chechs if the current file is a .lnk file"""
def copy(self, target_file):
"""Copy the current file object to the target.
Args:
target_file: the target full path of the file.
"""
if os.path.exists(target_file):
raise FSError('%s exists.' % target_file)
shutil.copy2(self._path, target_file)
@property
def md5(self):
""" Return file md5.
The below code was downloaded from the internet, some part have been
modified to make it work with filesystem.py module. For getting the
original info, please check with url below.
http://www.cnblogs.com/mmix2009/p/3229679.html
"""
myhash = hashlib.md5()
f = file(self._path, 'rb')
while True:
b = f.read(self.__size_md5)
if not b:
break
myhash.update(b)
f.close()
return myhash.hexdigest()
def move(self):
pass
def owner(self):
""""""
class _Diff(object):
""" _Diff stores the diff info between two directories.
"""
def __init__(self):
self.dirA = None
self.dirB = None
self.addition = []
self.removal = []
self.change = []
class directory(_BaseFileSystem):
"""directory class inherits from _BaseFileSytem, please check with
BaseFileSystem class for more information about the methods and properties
that can be used.
This class defines the directory object, can be inherited and expanded by
other directory based classes as well.
"""
_walk_err_collection = []
def __init__(self, in_path, do_walk = False):
"""Initializes the directory object.
Args:
in_path: the full path of the directory object.
do_walk: If true the iterator will go through the entire directory
tree, otherwise just iterates the current dir.
"""
super(directory, self).__init__(in_path)
self._content = os.listdir(self._path + os.sep)
self._do_walk = do_walk
def __iter__(self):
"""Yields the information from the current directory.
The inistance of directory class can be enumerable, use `for` keyword
to iterate the instance. There are 2 options for the return content,
one is _do_walk = True, which returns all the file and direcotry in
the entire tree structure, similar with calling the os.walk() function
but with objects; otherwise returns files and directories just in the
current instance's folder, simplar with calling the os.listdir() but
with objects.
Yields: A tuple, which contains ($path, $object)
$path : full path of a file or a directory
$object: the instance of phile class or directory class, the obj
can be None if it gets unexpected exceptions.
"""
if not self._content:
raise FSError("No file has been found, please check the directory")
if self._do_walk:
for ch in self._walk():
yield ch
else:
for ch in self._no_walk():
yield ch
def _walk(self):
"""Walks through the entire directory tree, returns info in tuple.
Args:
top_path: the top path being walked through. Basically it should
be the current instance's path, object._path
Returns:
($path, $object)
"""
for w in os.walk(self._path):
path, dirs, files = w
if files:
for full in [os.path.join(path, f) for f in files]:
obj = self._get_obj(full, phile)
yield (full, obj)
if dirs:
for full in [os.path.join(path, d) for d in dirs]:
obj = self._get_obj(full, directory)
yield (full, obj)
def _no_walk(self):
"""Lists files and directoried in the current instance's folder.
Returns:
($path, $object)
"""
# directory._walk_err_collection = []
for full in [self._path + os.sep + i for i in self._content]:
if os.path.isfile(full):
cls = phile
elif os.path.isdir(full):
cls = directory
else:
cls = None
obj = self._get_obj(full, cls)
yield (full, obj)
def _get_obj(self, full, cls):
"""Returns (None, error_info) with error info if there is any
Exception during the object creationm, otherwise returns (obj, None).
"""
try:
return cls(full)
except Exception, e:
directory._walk_err_collection.append((full, e))
return None
def __add__(self, other):
"""Add other file(s) to the current directory. Update the self._content
after adding them.
Args:
other: can be a single file name in string or a list of file names.
Returns:
a list of new file object (copied).
"""
result, ret = self._validate_other(other)
new_file_list = []
if not result:
raise FSError("Please check your input file - %s" % str(other))
for o in ret:
new_path = self._path + os.sep + os.path.basename(o)
if os.path.abspath(new_path) != os.path.abspath(o):
shutil.copy2(o, new_path)
new_file_list.append(phile(new_path))
else:
pass # ignore same filename
self._update()
return new_file_list
def add(self, other):
"""Calls __add__(other)"""
return self.__add__(other)
def __sub__(self, other):
""" Removes other file(s) from the current directory. Update the
self._content after removing them.
Args:
other: can be a single file name (or phile object) or a list of
file names (or phile objects).
"""
result, ret = self._validate_other(other)
if not result:
raise FSError("Please check your input file - %s" % str(other))
for r in ret:
if os.path.basename(r) not in self._content:
raise FSError("%s cannot be found in %s" % (r, self._path))
for o in ret:
os.remove(o)
self._update()
def sub(self, other):
"""Calls __sub__(other)"""
self.__sub__(other)
def _validate_other(self, other):
"""Check the validation of the input file(s).
Args:
other: file(s) or file object(s)
Returns:
a tuple of ($check_result, $a_list_of_file_path), if check result
is False, None path would be returned.
"""
if type(other) == list:
if not other:
return False, None # no files in the list.
ret = []
for check_result, file_name in self._validate_file_list(other):
if check_result:
ret.append(file_name)
else:
return False, None
return True, ret
else:
result, path = self._validate_single_file(other)
return result, [path]
def _validate_single_file(self, other):
"""Check if a single input is a valid file
Args:
other: input when + with other object
Returns:
True if it's valid file name or a phile object else False.
"""
if type(other) == str:
return os.path.isfile(other), other
elif isinstance(other, phile):
return os.path.isfile(other.path), other.path
else:
return False, None
def _validate_file_list(self, other):
for o in other:
yield self._validate_single_file(o)
def _update(self):
"""Refreshes the content info from the directory"""
self._content = os.listdir(self._path)
def _parent(self):
return self._path + os.sep + os.pardir # return parent dir in string
# uses _search()
def count(self, kw = None, do_walk = False):
"""Counts the contents in the directory filered by kw
Args:
kw : result will be filtered by keyword
do_walk : count entire tree if set to True, otherwise only iterates
the current path.
Returns:
The number of matches
"""
if not kw and not do_walk:
return len(self._content)
elif kw and not do_walk:
return len([ch for ch in self._content if kw == ch])
elif kw and do_walk:
return len(self._search(kw, True, self._walk))
def _search(self, kw, is_match_full_name, generator):
""" Searches the content from the current directory.
Args:
kw: the keword.
is_match_full_name : make mode, it true, finds the result that
exactly matches the kw, otherwise only see if the result
contains the kw.
generator: _walk or _no_walk method in this class.
Returns:
a list of matched object.
"""
ret = []
for yie_path, yie_obj in generator.__call__():
if not yie_obj:
continue
if is_match_full_name:
if yie_obj.name == kw:
ret.append(yie_obj)
else:
if kw in yie_obj.name:
ret.append(yie_obj)
return ret
# uses _search()
#TODO: add filter for sep file and dir.
def search(self, kw, do_walk = False):
"""Searches file or directory with keyword in the name.
Example:
>>> # search 'a' from a dir, returns objects with 'a' in its path name.
>>> my_dir = directory('/test')
>>> my_dir.search('a', )
[phile('/test/abc.txt'), directory('/test/a')]
Args:
kw: the keyword.
do_walk : if go through the entire tree.
Returns:
a list of objects that matches condition.
"""
if do_walk:
generator = self._walk
else:
generator = self._no_walk
return self._search(kw, False, generator)
# uses _search()
def has(self, kw):
""" Find file or folder in the current directory. It's same to call
search(kw = keyword, do_walk | |
# -*- coding: utf-8 -*-
"""
Functions for transforming between color spaces.
Change log:
2015/10/09 -- compand, inverse_compand added; <EMAIL>
"""
import copy
import numpy
import imageutils
import whitepoint
#
# module constants
#
_lab_epsilon = 0.008856
_lab_kappa = 903.3
#
# module utilities
#
def _safe_divide(num, denom, replace=0):
"""Safe division when elements in the denominator might be zeros.
Returns the division of the numerator by the denominator, but replaces
results which have a zero in the denominator by a specified value. The
default is to replace bad divisions with zeros.
"""
#consider moving to a utility module; copy over tests from colorunittests.py
num = _to_npa(num)
denom = _to_npa(denom)
assert(num.shape == denom.shape)
zero_flag = denom == 0.0
if zero_flag.any():
denom_copy = copy.copy(denom)
denom_copy[zero_flag] = 1.0
div = num / denom_copy
div[zero_flag] = replace
else:
div = num / denom
return div
def _to_npa(value):
"""Converts a scalar or list to a numpy array."""
#consider moving to a utility module; copy over tests from colorunittests.py
if isinstance(value, numpy.ndarray):
return value
elif isinstance(value, (list, tuple)):
#note: any /ordered/ iterable should be allowed to work, as long as it
#is numeric
#TODO: include checks for other numeric iterables
return numpy.array(value)
elif isinstance(value, (int, float)):
return numpy.array([value])
else:
raise TypeError
def _bilevel_func(x, func_a, func_b, threshold, thresh_var=None):
"""Applies two different functions to an input depending on a threshold.
For inputs larger than the threshold value, func_a is applied; it is
assumed that func_a is a default. Inputs less than or equal to the
threshold value are then replaced by func_b's outputs.
"""
x_npa = _to_npa(x)
if thresh_var is None:
thresh_var_npa = x_npa
else:
thresh_var_npa = _to_npa(thresh_var)
#if x is a numpy array, _to_npa returns x instead of a new variable
#constructed from x. in that case, it is possible that func_a or func_b can
#modify the input. an x.copy() is used to guard against unexpectedly
#changing x.
res = func_a(x_npa.copy())
small_flag = thresh_var_npa <= threshold
if small_flag.any():
res[small_flag] = func_b(x_npa[small_flag])
return res
#
# transforms!
#
def compand(rgb_linear):
"""Compand from linear RGB to sRGB.
The input is linear RGB, and ranges from [0..1]. The output is a uint8 sRGB
image.
"""
assert(isinstance(rgb_linear, numpy.ndarray))
f_a = lambda x: 1.055 * x ** (1/2.4) - 0.055
f_b = lambda x: x * 12.92
sRGB_double = _bilevel_func(rgb_linear, f_a, f_b, 0.0031308)
return imageutils.float2uint8(sRGB_double)
def inverse_compand(img):
"""Convert from sRGB to linear RGB by inverting the sRGB companding.
The input image should be uint8. The output is linear and is in the range
of [0..1].
"""
assert(isinstance(img, numpy.ndarray))
assert(img.dtype == numpy.uint8) #TODO: throw a TypeError instead
sRGB_double = img / 255. #convert to normalized float
f_a = lambda x: ((x + 0.055) / 1.055) ** 2.4
f_b = lambda x: x / 12.92
return _bilevel_func(sRGB_double, f_a, f_b, 0.0405)
def xyz2xy(vec):
"""Calculate the (x,y) chromaticity coordinates from an XYZ triplet."""
assert(isinstance(vec, numpy.ndarray))
assert(len(vec) == 3)
return vec[0:2] / float(numpy.sum(vec))
def xyz2xyy(vector):
"""Convert from XYZ to xyY.
Note: xyY is sometimes called xyL."""
assert(isinstance(vector, numpy.ndarray))
assert(len(vector) == 3)
#TODO: be careful about the stacking if vector is vert or horiz
return numpy.hstack((vector[0:2] / float(numpy.sum(vector)), vector[1]))
def xyy2xyz(vector):
"""Convert from xyY to XYZ.
Note: xyY is sometimes called xyL."""
#x = vector[0], y = vector[1], Y = vector[2]
assert(isinstance(vector, numpy.ndarray))
assert(len(vector) == 3)
return numpy.array([vector[0]*vector[2] / vector[1],
vector[2],
(1.0 - vector[0] - vector[1]) * vector[2] / vector[1]])
def xy2xyz(vector):
"""Convert (x,y) coordinates to an XYZ triplet, assuming Y=1."""
assert(isinstance(vector, numpy.ndarray))
assert(len(vector) == 2)
return xyy2xyz(numpy.hstack((vector, 1)))
def lab_inverse_compand(v):
"""Inverse companding used when converting XYZ to Lab and Luv.
Note: this is the cube-root companding used to return the f_xyz functions
that are then used directly to compute L*a*b*. The input is the X, Y, or Z
value, normalized against the whitepoint used to encode the XYZ colorspace.
"""
#values from the CIE standard; <NAME>bloom notes that these lead to
#a discontinuity due to truncation
f_a = lambda x: x ** (1 / 3.)
f_b = lambda x: (_lab_kappa * x + 16.) / 116.
return _bilevel_func(v, f_a, f_b, _lab_epsilon)
def xyz2lab(xyz_img, white_ref=whitepoint.D50):
"""Converts from XYZ to CIELAB (aka, L*a*b*).
The white_ref is the whitepoint of the XYZ color space, and defaults to
D50. Use any other whitepoint.WhitePoint object as a reference if needed.
The whitepoint values whould be on the same order as the XYZ values. For
example, if XYZ ranges from [0..1], the whitepoint should have values close
to 1.
"""
#default is D50 whitepoint for XYZ colors; Lab is device independent
assert(isinstance(xyz_img, numpy.ndarray))
#compute the XYZ relative to the whitepoint; note that this assumes the
#whitepoint and the XYZ have the same scale.
X, Y, Z = imageutils.split3(xyz_img)
xr = X / white_ref.X
yr = Y / white_ref.Y
zr = Z / white_ref.Z
#note: xr, yr, zr are scaled so that they are close to [0..1] range;
#it is possible to have values >1, that's not an error.
fy = lab_inverse_compand(yr)
L = 116.0 * fy - 16.0
a = 500.0 * (lab_inverse_compand(xr) - fy)
b = 200.0 * (fy - lab_inverse_compand(zr))
return imageutils.cat3(L, a, b)
def _lab_finv(V):
f_a = lambda f: f ** 3.0
f_b = lambda f: (116. * f - 16) / _lab_kappa
threshold = _lab_epsilon ** (1 / 3.)
return _bilevel_func(V, f_a, f_b, threshold)
def _lab_yinv(L):
f_a = lambda x: ((x + 16.) / 116.) ** 3.
f_b = lambda x: x / _lab_kappa
threshold = _lab_epsilon * _lab_kappa
return _bilevel_func(L, f_a, f_b, threshold)
def lab2xyz(lab_img, white_ref=whitepoint.D50):
"""Converts CIELAB's L*a*b* to XYZ.
The white_ref is the whitepoint of the XYZ color space; use any
whitepoint.WhitePoint object as a reference if needed. The default is D50.
"""
assert(isinstance(lab_img, numpy.ndarray))
L, a, b = imageutils.split3(lab_img)
fy = (L + 16.) / 116.
fx = a / 500. + fy
fz = fy - b / 200.
xr = _lab_finv(fx)
zr = _lab_finv(fz)
yr = _lab_yinv(L)
return imageutils.cat3(xr * white_ref.X,
yr * white_ref.Y,
zr * white_ref.Z)
def _uprime(X, Y, Z):
"""Calculates the u' value used in XYZ<->Luv."""
return _safe_divide(4. * X, X + 15. * Y + 3. * Z)
def _vprime(X, Y, Z):
"""Calculates the v' value used in XYZ<->Luv."""
return _safe_divide(9. * Y, X + 15. * Y + 3. * Z)
def xyz2luv(xyz_img, white_ref=whitepoint.D50):
"""Converts XYZ to CIELUV (aka, L*u*v*).
A whitepoint reference of D50 is assumed for the XYZ values. Any other
whitepoint, as a whitepoint.WhitePoint object, can be used -- and should
have the same scale as the XYZ values.
"""
assert(isinstance(xyz_img, numpy.ndarray))
X, Y, Z = imageutils.split3(xyz_img)
yr = Y / white_ref.Y
uprime = _uprime(X, Y, Z)
vprime = _vprime(X, Y, Z)
uprime_ref = _uprime(*white_ref.XYZ)
vprime_ref = _vprime(*white_ref.XYZ)
f_a = lambda y: 116. * y ** (1 / 3.) - 16.
f_b = lambda y: y * _lab_kappa
L = _bilevel_func(yr, f_a, f_b, _lab_epsilon)
u = 13.0 * L * (uprime - uprime_ref)
v = 13.0 * L * (vprime - vprime_ref)
return imageutils.cat3(L, u, v)
def luv2xyz(luv_img, white_ref=whitepoint.D50):
"""Converts CIELUV to XYZ.
The white_ref is the whitepoint of the XYZ colorspace, and defaults to D50.
Use any other whitepoint.WhitePoint object as needed.
"""
#equation from wikipedia->CIELUV
assert(isinstance(luv_img, numpy.ndarray))
L, u, v = imageutils.split3(luv_img)
f_a = lambda x: ((x + 16.) / 116.) ** 3.
f_b = lambda x: x / _lab_kappa
threshold = _lab_kappa * _lab_epsilon
Y = white_ref.Y * _bilevel_func(L, f_a, f_b, threshold)
u_ref = _uprime(*white_ref.XYZ)
v_ref = _vprime(*white_ref.XYZ)
uprime = _safe_divide(u, 13. * L) + u_ref
vprime = _safe_divide(v, 13. * L) + v_ref
X = Y * _safe_divide(9. * uprime, 4. * vprime)
Z = Y * _safe_divide(12. - 3. * uprime - 20. * vprime, 4. * vprime)
return imageutils.cat3(X, Y, Z)
def uv2xy(vector):
assert(len(vector) == 2)
u = vector[0]
v = vector[1]
denom = 6. * u - 16. * v + 12.
x = _safe_divide(9. * u, denom)
y = _safe_divide(4. * v, denom)
return numpy.array([x, y]).flatten()
def xy2uv(vector):
assert(len(vector) == 2)
x = vector[0]
y = vector[1]
denom = -2. * x + 12. * y + | |
permutation refer.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> Permutation.print_cyclic = True
>>> from sympy.combinatorics.permutations import Cycle
>>> from sympy.combinatorics.polyhedron import Polyhedron
>>> from sympy.combinatorics.perm_groups import PermutationGroup
The permutations corresponding to motion of the front, right and
bottom face of a 2x2 Rubik's cube are defined:
>>> F = Permutation(2, 19, 21, 8)(3, 17, 20, 10)(4, 6, 7, 5)
>>> R = Permutation(1, 5, 21, 14)(3, 7, 23, 12)(8, 10, 11, 9)
>>> D = Permutation(6, 18, 14, 10)(7, 19, 15, 11)(20, 22, 23, 21)
These are passed as permutations to PermutationGroup:
>>> G = PermutationGroup(F, R, D)
>>> G.order()
3674160
The group can be supplied to a Polyhedron in order to track the
objects being moved. An example involving the 2x2 Rubik's cube is
given there, but here is a simple demonstration:
>>> a = Permutation(2, 1)
>>> b = Permutation(1, 0)
>>> G = PermutationGroup(a, b)
>>> P = Polyhedron(list('ABC'), pgroup=G)
>>> P.corners
(A, B, C)
>>> P.rotate(0) # apply permutation 0
>>> P.corners
(A, C, B)
>>> P.reset()
>>> P.corners
(A, B, C)
Or one can make a permutation as a product of selected permutations
and apply them to an iterable directly:
>>> P10 = G.make_perm([0, 1])
>>> P10('ABC')
['C', 'A', 'B']
See Also
========
sympy.combinatorics.polyhedron.Polyhedron,
sympy.combinatorics.permutations.Permutation
References
==========
[1] <NAME>., <NAME>., <NAME>.
"Handbook of Computational Group Theory"
[2] <NAME>.
"Permutation Group Algorithms"
[3] http://en.wikipedia.org/wiki/Schreier_vector
[4] http://en.wikipedia.org/wiki/Nielsen_transformation
#Product_replacement_algorithm
[5] <NAME>, <NAME>, <NAME>,
<NAME>, and E.A.O'Brien. "Generating Random
Elements of a Finite Group"
[6] http://en.wikipedia.org/wiki/Block_%28permutation_group_theory%29
[7] http://www.algorithmist.com/index.php/Union_Find
[8] http://en.wikipedia.org/wiki/Multiply_transitive_group#Multiply_transitive_groups
[9] http://en.wikipedia.org/wiki/Center_%28group_theory%29
[10] http://en.wikipedia.org/wiki/Centralizer_and_normalizer
[11] http://groupprops.subwiki.org/wiki/Derived_subgroup
[12] http://en.wikipedia.org/wiki/Nilpotent_group
[13] http://www.math.colostate.edu/~hulpke/CGT/cgtnotes.pdf
"""
def __new__(cls, *args, **kwargs):
"""The default constructor. Accepts Cycle and Permutation forms.
Removes duplicates unless ``dups`` keyword is False.
"""
args = list(args[0] if is_sequence(args[0]) else args)
if not args:
raise ValueError('must supply one or more permutations '
'to define the group')
if any(isinstance(a, Cycle) for a in args):
args = [Permutation(a) for a in args]
if has_variety(a.size for a in args):
degree = kwargs.pop('degree', None)
if degree is None:
degree = max(a.size for a in args)
for i in range(len(args)):
if args[i].size != degree:
args[i] = Permutation(args[i], size=degree)
if kwargs.pop('dups', True):
args = uniq([Permutation._af_new(list(a)) for a in args])
obj = Basic.__new__(cls, *args, **kwargs)
obj._generators = args
obj._order = None
obj._center = []
obj._is_abelian = None
obj._is_transitive = None
obj._is_sym = None
obj._is_alt = None
obj._is_primitive = None
obj._is_nilpotent = None
obj._is_solvable = None
obj._is_trivial = None
obj._transitivity_degree = None
obj._max_div = None
obj._r = len(obj._generators)
obj._degree = obj._generators[0].size
# these attributes are assigned after running schreier_sims
obj._base = []
obj._stabilizer_cosets = []
obj._stabilizer_cosets_n = []
obj._stabilizer_gens = []
obj._strong_gens = []
obj._basic_orbits = []
obj._transversals = []
# these attributes are assigned after running _random_pr_init
obj._random_gens = []
return obj
def __getitem__(self, i):
return self._generators[i]
def __len__(self):
return len(self._generators)
def __eq__(self, other):
"""Return True if self and other have the same generators.
Examples
========
>>> from sympy.combinatorics import Permutation
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> p = Permutation(0, 1, 2, 3, 4, 5)
>>> G = PermutationGroup([p, p**2])
>>> H = PermutationGroup([p**2, p])
>>> G.generators == H.generators
False
>>> G == H
True
"""
if not isinstance(other, PermutationGroup):
return False
return set(self.generators) == set(other.generators)
def __hash__(self):
return super(PermutationGroup, self).__hash__()
def __mul__(self, other):
"""Return the direct product of two permutation groups as a permutation
group.
This implementation realizes the direct product by shifting
the index set for the generators of the second group: so if we have
G acting on n1 points and H acting on n2 points, G*H acts on n1 + n2
points.
Examples
========
>>> from sympy.combinatorics.perm_groups import PermutationGroup
>>> from sympy.combinatorics.named_groups import CyclicGroup
>>> G = CyclicGroup(5)
>>> H = G*G
>>> H
PermutationGroup([
Permutation(9)(0, 1, 2, 3, 4),
Permutation(5, 6, 7, 8, 9)])
>>> H.order()
25
"""
gens1 = [perm.array_form for perm in self.generators]
gens2 = [perm.array_form for perm in other.generators]
n1 = self.degree
n2 = other.degree
start = range(n1)
end = range(n1, n1 + n2)
for i in range(len(gens2)):
gens2[i] = [x + n1 for x in gens2[i]]
gens2 = [start + gen for gen in gens2]
gens1 = [gen + end for gen in gens1]
together = gens1 + gens2
gens = [_af_new(x) for x in together]
return PermutationGroup(gens)
def _random_pr_init(self, r, n, _random_prec_n=None):
r"""Initialize random generators for the product replacement algorithm.
The implementation uses a modification of the original product
replacement algorithm due to Leedham-Green, as described in [1],
pp. 69-71; also, see [2], pp. 27-29 for a detailed theoretical
analysis of the original product replacement algorithm, and [4].
The product replacement algorithm is used for producing random,
uniformly distributed elements of a group ``G`` with a set of generators
``S``. For the initialization ``_random_pr_init``, a list ``R`` of
``\max\{r, |S|\}`` group generators is created as the attribute
``G._random_gens``, repeating elements of ``S`` if necessary, and the
identity element of ``G`` is appended to ``R`` - we shall refer to this
last element as the accumulator. Then the function ``random_pr()``
is called ``n`` times, randomizing the list ``R`` while preserving
the generation of ``G`` by ``R``. The function ``random_pr()`` itself
takes two random elements ``g, h`` among all elements of ``R`` but
the accumulator and replaces ``g`` with a randomly chosen element
from ``\{gh, g(~h), hg, (~h)g\}``. Then the accumulator is multiplied
by whatever ``g`` was replaced by. The new value of the accumulator is
then returned by ``random_pr()``.
The elements returned will eventually (for ``n`` large enough) become
uniformly distributed across ``G`` ([5]). For practical purposes however,
the values ``n = 50, r = 11`` are suggested in [1].
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: it changes the attribute
self._random_gens
See Also
========
random_pr
"""
deg = self.degree
random_gens = self.generators[:]
k = len(random_gens)
if k < r:
for i in range(k, r):
random_gens.append(random_gens[i - k])
acc = _af_new(range(deg))
random_gens.append(acc)
self._random_gens = random_gens
# handle randomized input for testing purposes
if _random_prec_n == None:
for i in range(n):
self.random_pr()
else:
for i in range(n):
self.random_pr(_random_prec = _random_prec_n[i])
def _union_find_merge(self, first, second, ranks, parents, not_rep):
"""Merges two classes in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. The class merging process uses union by rank as an
optimization. ([7])
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, the list of class sizes, ``ranks``, and the list of
elements that are not representatives, ``not_rep``, are changed due to
class merging.
See Also
========
minimal_block, _union_find_rep
References
==========
[1] <NAME>., <NAME>., <NAME>.
"Handbook of computational group theory"
[7] http://www.algorithmist.com/index.php/Union_Find
"""
rep_first = self._union_find_rep(first, parents)
rep_second = self._union_find_rep(second, parents)
if rep_first != rep_second:
# union by rank
if ranks[rep_first] >= ranks[rep_second]:
new_1, new_2 = rep_first, rep_second
else:
new_1, new_2 = rep_second, rep_first
total_rank = ranks[new_1] + ranks[new_2]
if total_rank > self.max_div:
return -1
parents[new_2] = new_1
ranks[new_1] = total_rank
not_rep.append(new_2)
return 1
return 0
def _union_find_rep(self, num, parents):
"""Find representative of a class in a union-find data structure.
Used in the implementation of Atkinson's algorithm as suggested in [1],
pp. 83-87. After the representative of the class to which ``num``
belongs is found, path compression is performed as an optimization
([7]).
Notes
=====
THIS FUNCTION HAS SIDE EFFECTS: the list of class representatives,
``parents``, is altered due to path compression.
See Also
========
minimal_block, _union_find_merge
References
==========
[1] <NAME>., <NAME>., <NAME>.
"Handbook of computational group theory"
[7] http://www.algorithmist.com/index.php/Union_Find
"""
rep, parent = num, parents[num]
while parent != rep:
rep = parent
parent = parents[rep]
# path compression
temp, parent = num, parents[num]
while parent != rep:
parents[temp] = rep
temp = parent
parent = parents[temp]
return rep
@property
def base(self):
"""Return a base from the Schreier-Sims algorithm.
For a permutation group ``G``, a base is a sequence | |
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
],
flag_groups = [flag_group(flags = ["-Os", "-DNDEBUG"])],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
],
flag_groups = [flag_group(flags = ["-g", "-DDEBUG"])],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-std=gnu++11", "-stdlib=libc++"])],
),
],
)
elif (ctx.attr.cpu == "tvos_arm64" or
ctx.attr.cpu == "tvos_x86_64"):
default_compile_flags_feature = feature(
name = "default_compile_flags",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
],
flag_groups = [
flag_group(
flags = [
"-DCOMPILER_GCC3",
"-DCOMPILER_GCC4",
"-Dunix",
"-DOS_TVOS",
"-DU_HAVE_NL_LANGINFO_CODESET=0",
"-DU_HAVE_STD_STRING",
"-D__STDC_FORMAT_MACROS",
"-fcolor-diagnostics",
],
),
],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
],
flag_groups = [flag_group(flags = ["-O0", "-DDEBUG"])],
with_features = [with_feature_set(features = ["fastbuild"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
],
flag_groups = [
flag_group(
flags = ["-Os", "-DNDEBUG", "-DNS_BLOCK_ASSERTIONS=1"],
),
],
with_features = [with_feature_set(features = ["opt"])],
),
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
],
flag_groups = [flag_group(flags = ["-g", "-DDEBUG"])],
with_features = [with_feature_set(features = ["dbg"])],
),
flag_set(
actions = [
ACTION_NAMES.linkstamp_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.lto_backend,
ACTION_NAMES.clif_match,
],
flag_groups = [flag_group(flags = ["-std=gnu++11", "-stdlib=libc++"])],
),
],
)
else:
default_compile_flags_feature = None
generate_linkmap_feature = feature(
name = "generate_linkmap",
flag_sets = [
flag_set(
actions = ["objc-executable", "objc++-executable"],
flag_groups = [
flag_group(
flags = ["-Xlinker -map", "-Xlinker %{linkmap_exec_path}"],
),
],
),
],
)
input_param_flags_feature = feature(
name = "input_param_flags",
flag_sets = [
flag_set(
actions = _NON_OBJC_LINK_ACTIONS +
[ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
flags = ["-L%{library_search_directories}"],
iterate_over = "library_search_directories",
expand_if_available = "library_search_directories",
),
],
),
flag_set(
actions = _NON_OBJC_LINK_ACTIONS +
[ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
flags = ["%{libopts}"],
iterate_over = "libopts",
expand_if_available = "libopts",
),
],
),
flag_set(
actions = _NON_OBJC_LINK_ACTIONS +
[ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
flags = ["-Wl,-force_load,%{whole_archive_linker_params}"],
iterate_over = "whole_archive_linker_params",
expand_if_available = "whole_archive_linker_params",
),
],
),
flag_set(
actions = _NON_OBJC_LINK_ACTIONS +
[ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
flags = ["%{linker_input_params}"],
iterate_over = "linker_input_params",
expand_if_available = "linker_input_params",
),
],
),
flag_set(
actions = _NON_OBJC_LINK_ACTIONS +
[ACTION_NAMES.cpp_link_static_library],
flag_groups = [
flag_group(
iterate_over = "libraries_to_link",
flag_groups = [
flag_group(
flags = ["-Wl,--start-lib"],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "object_file_group",
),
),
flag_group(
iterate_over = "libraries_to_link.object_files",
flag_groups = [
flag_group(
flags = ["%{libraries_to_link.object_files}"],
expand_if_false = "libraries_to_link.is_whole_archive",
),
flag_group(
flags = ["-Wl,-force_load,%{libraries_to_link.object_files}"],
expand_if_true = "libraries_to_link.is_whole_archive",
),
],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "object_file_group",
),
),
flag_group(
flags = ["-Wl,--end-lib"],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "object_file_group",
),
),
flag_group(
flag_groups = [
flag_group(
flags = ["%{libraries_to_link.name}"],
expand_if_false = "libraries_to_link.is_whole_archive",
),
flag_group(
flags = ["-Wl,-force_load,%{libraries_to_link.name}"],
expand_if_true = "libraries_to_link.is_whole_archive",
),
],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "object_file",
),
),
flag_group(
flag_groups = [
flag_group(
flags = ["%{libraries_to_link.name}"],
expand_if_false = "libraries_to_link.is_whole_archive",
),
flag_group(
flags = ["-Wl,-force_load,%{libraries_to_link.name}"],
expand_if_true = "libraries_to_link.is_whole_archive",
),
],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "interface_library",
),
),
flag_group(
flag_groups = [
flag_group(
flags = ["%{libraries_to_link.name}"],
expand_if_false = "libraries_to_link.is_whole_archive",
),
flag_group(
flags = ["-Wl,-force_load,%{libraries_to_link.name}"],
expand_if_true = "libraries_to_link.is_whole_archive",
),
],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "static_library",
),
),
flag_group(
flag_groups = [
flag_group(
flags = ["-l%{libraries_to_link.name}"],
expand_if_false = "libraries_to_link.is_whole_archive",
),
flag_group(
flags = ["-Wl,-force_load,-l%{libraries_to_link.name}"],
expand_if_true = "libraries_to_link.is_whole_archive",
),
],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "dynamic_library",
),
),
flag_group(
flag_groups = [
flag_group(
flags = ["-l:%{libraries_to_link.name}"],
expand_if_false = "libraries_to_link.is_whole_archive",
),
flag_group(
flags = ["-Wl,-force_load,-l:%{libraries_to_link.name}"],
expand_if_true = "libraries_to_link.is_whole_archive",
),
],
expand_if_equal = variable_with_value(
name = "libraries_to_link.type",
value = "versioned_dynamic_library",
),
),
],
expand_if_available = "libraries_to_link",
),
],
),
],
)
xcode_5_0_feature = feature(
name = "xcode_5.0",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
"c++-header-preprocessing",
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
],
flag_groups = [
flag_group(
flags = ["-DXCODE_FEATURE_FOR_TESTING=xcode_5.0"],
),
],
),
],
)
user_link_flags_feature = feature(
name = "user_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _ALL_LINK_ACTIONS,
flag_groups = [
flag_group(
flags = ["%{user_link_flags}"],
iterate_over = "user_link_flags",
expand_if_available = "user_link_flags",
),
],
),
],
)
pic_feature = feature(
name = "pic",
enabled = True,
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.preprocess_assemble,
],
flag_groups = [
flag_group(flags = ["-fPIC"], expand_if_available = "pic"),
],
),
],
)
generate_dsym_file_feature = feature(
name = "generate_dsym_file",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
"objc-executable",
"objc++-executable",
],
flag_groups = [flag_group(flags = ["-g", "-DDUMMY_GENERATE_DSYM_FILE"])],
),
flag_set(
actions = ["objc-executable", "objc++-executable"],
flag_groups = [
flag_group(
flags = [
"DSYM_HINT_LINKED_BINARY=%{linked_binary}",
"DSYM_HINT_DSYM_PATH=%{dsym_path}",
],
),
],
),
],
)
no_enable_modules_feature = feature(
name = "no_enable_modules",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.objc_compile, ACTION_NAMES.objcpp_compile],
flag_groups = [flag_group(flags = ["-fmodule-maps"])],
),
],
requires = [feature_set(features = ["use_objc_modules"])],
)
autofdo_feature = feature(
name = "autofdo",
flag_sets = [
flag_set(
actions = [ACTION_NAMES.c_compile, ACTION_NAMES.cpp_compile],
flag_groups = [
flag_group(
flags = [
"-fauto-profile=%{fdo_profile_path}",
"-fprofile-correction",
],
expand_if_available = "fdo_profile_path",
),
],
),
],
provides = ["profile"],
)
if (ctx.attr.cpu == "darwin_x86_64"):
link_cocoa_feature = feature(
name = "link_cocoa",
flag_sets = [
flag_set(
actions = _OBJC_LINK_ACTIONS,
flag_groups = [flag_group(flags = ["-framework Cocoa"])],
),
],
)
elif (ctx.attr.cpu == "ios_arm64" or
ctx.attr.cpu == "ios_armv7" or
ctx.attr.cpu == "ios_i386" or
ctx.attr.cpu == "ios_x86_64" or
ctx.attr.cpu == "tvos_arm64" or
ctx.attr.cpu == "tvos_x86_64" or
ctx.attr.cpu == "watchos_armv7k" or
ctx.attr.cpu == "watchos_i386" or
ctx.attr.cpu == "x64_windows"):
link_cocoa_feature = feature(name = "link_cocoa")
else:
link_cocoa_feature = None
objc_actions_feature = feature(
name = "objc_actions",
implies = [
"objc-compile",
"objc++-compile",
"objc-fully-link",
"objc-archive",
"objc-executable",
"objc++-executable",
"assemble",
"preprocess-assemble",
"c-compile",
"c++-compile",
"c++-header-parsing",
"c++-link-static-library",
"c++-link-dynamic-library",
"c++-link-nodeps-dynamic-library",
"c++-link-executable",
],
)
objc_arc_feature = feature(
name = "objc_arc",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
],
flag_groups = [
flag_group(
flags = ["-fobjc-arc"],
expand_if_available = "objc_arc",
),
],
),
],
)
apple_env_feature = feature(
name = "apple_env",
env_sets = [
env_set(
actions = [
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
"objc-archive",
"objc-fully-link",
ACTION_NAMES.cpp_link_executable,
ACTION_NAMES.cpp_link_dynamic_library,
ACTION_NAMES.cpp_link_nodeps_dynamic_library,
ACTION_NAMES.cpp_link_static_library,
"objc-executable",
"objc++-executable",
],
env_entries = [
env_entry(
key = "XCODE_VERSION_OVERRIDE",
value = "%{xcode_version_override_value}",
),
env_entry(
key = "APPLE_SDK_VERSION_OVERRIDE",
value = "%{apple_sdk_version_override_value}",
),
env_entry(
key = "APPLE_SDK_PLATFORM",
value = "%{apple_sdk_platform_value}",
),
],
),
],
)
user_compile_flags_feature = feature(
name = "user_compile_flags",
flag_sets = [
flag_set(
actions = [
ACTION_NAMES.assemble,
ACTION_NAMES.preprocess_assemble,
ACTION_NAMES.c_compile,
ACTION_NAMES.cpp_compile,
ACTION_NAMES.cpp_header_parsing,
ACTION_NAMES.cpp_module_compile,
ACTION_NAMES.cpp_module_codegen,
ACTION_NAMES.objc_compile,
ACTION_NAMES.objcpp_compile,
],
flag_groups = [
flag_group(
flags = ["%{user_compile_flags}"],
iterate_over = "user_compile_flags",
expand_if_available = "user_compile_flags",
),
],
),
],
)
if (ctx.attr.cpu == "tvos_arm64"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _ALL_LINK_ACTIONS,
flag_groups = [
flag_group(
flags = ["-lc++", "-target", "arm64-apple-tvos"],
),
],
),
],
)
elif (ctx.attr.cpu == "watchos_armv7k"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _ALL_LINK_ACTIONS,
flag_groups = [
flag_group(
flags = ["-lc++", "-target", "armv7-apple-watchos"],
),
],
),
],
)
elif (ctx.attr.cpu == "ios_x86_64"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _ALL_LINK_ACTIONS,
flag_groups = [
flag_group(
flags = ["-lc++", "-target", "x86_64-apple-ios"],
),
],
),
],
)
elif (ctx.attr.cpu == "tvos_x86_64"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _ALL_LINK_ACTIONS,
flag_groups = [
flag_group(
flags = ["-lc++", "-target", "x86_64-apple-tvos"],
),
],
),
],
)
elif (ctx.attr.cpu == "ios_arm64"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _ALL_LINK_ACTIONS,
flag_groups = [
flag_group(flags = ["-lc++", "-target", "arm64-apple-ios"]),
],
),
],
)
elif (ctx.attr.cpu == "ios_armv7"):
default_link_flags_feature = feature(
name = "default_link_flags",
enabled = True,
flag_sets = [
flag_set(
actions = _ALL_LINK_ACTIONS,
flag_groups = [
flag_group(flags = ["-lc++", "-target", "armv7-apple-ios"]),
],
),
],
)
elif (ctx.attr.cpu == "ios_i386" | |
import sys
import os
from .constants import VERSION, DEFAULT_PROCESSES, CONFIG_FILE_NAMES, PROJECT_BOUNDARIES
from .backports import Backports
from .features import Features
from .config import Config
from .printing import nprint
from . import formats
class Arguments:
def __init__(self, args):
self.__args = args
@staticmethod
def print_usage(full=False):
print("Vermin {}".format(VERSION))
print("Usage: {} [options] <python source files and folders..>".format(sys.argv[0]))
print("\nConcurrently detect the minimum Python versions needed to run code.")
if not full:
print("\nFor full help and options, use `-h` or `--help`.")
print("\nHeuristics are employed to determine which files to analyze:\n"
" - 'py', 'py3', 'pyw', 'pyj', 'pyi' are always scanned\n"
" - 'pyc', 'pyd', 'pxd', 'pyx', 'pyo' are ignored (including various other files)\n"
" - Magic lines with 'python' are accepted, like: #!/usr/bin/env python\n"
" - Files that cannot be opened for reading as text devices are ignored")
print("\nHowever, files directly specified are always attempted parsing, even without\n"
"accepted extensions or heuristics.")
print("\nResults interpretation:")
print(" ~2 No known reason it won't work with py2.")
print(" !2 It is known that it won't work with py2.")
print(" 2.5, !3 Works with 2.5+ but it is known it won't work with py3.")
print(" ~2, 3.4 No known reason it won't work with py2, works with 3.4+")
print("\nIncompatible versions notices mean that several files were detected incompatible\n"
"with py2 and py3 simultaneously. In such cases the results might be inconclusive.")
print("\nA config file is automatically tried detected from the current working directory\n"
"where Vermin is run, following parent folders until either the root or project\n"
"boundary files/folders are reached. However, if --config-file is specified, no config\n"
"is auto-detected and loaded.")
if full:
print("\nConfig file names being looked for: {}\n"
"Project boundary files/folders: {}".
format(", ".join(["'{}'".format(fn) for fn in CONFIG_FILE_NAMES]),
", ".join(["'{}'".format(pb) for pb in PROJECT_BOUNDARIES])))
print("\nOptions:")
print(" --quiet | -q\n"
" Quiet mode. If used together with --violations, quiet mode is preserved\n"
" while showing only violations: no descriptive text, tips, or verdicts.\n")
print(" --no-quiet (default)\n"
" Disable quiet mode.\n")
print(" -v.. Verbosity level 1 to 4. -v, -vv, -vvv, and -vvvv shows increasingly more\n"
" information.\n"
" -v will show the individual versions required per file.\n"
" -vv will also show which modules, functions etc. that constitutes\n"
" the requirements.\n"
" -vvv will also show line/col numbers.\n"
" -vvvv will also show user-defined symbols being ignored.\n")
print(" --target=V | -t=V\n"
" Target version that files must abide by. Can be specified once or twice.\n"
" A '-' can be appended to match target version or smaller, like '-t=3.5-'.\n"
" If not met Vermin will exit with code 1. Note that the amount of target\n"
" versions must match the amount of minimum required versions detected.\n"
" However, if used in conjunction with --violations, and no rules are\n"
" triggered, it will exit with code 0.\n")
print(" --no-target (default)\n"
" Don't expect certain target version(s).\n")
print(" --processes=N | -p=N\n"
" Use N concurrent processes to detect and analyze files. Defaults to all\n"
" cores ({}).\n".format(DEFAULT_PROCESSES))
print(" --ignore | -i\n"
" Ignore incompatible versions and warnings. However, if no compatible\n"
" versions are found then incompatible versions will be shown in the end to\n"
" not have an absence of results.\n")
print(" --no-ignore (default)\n"
" Don't ignore incompatible versions and warnings.\n")
print(" --dump | -d\n"
" Dump AST node visits.\n")
print(" --no-dump (default)\n"
" Don't dump AST node visits.")
print("\n --help | -h\n"
" Shows this information and exists.")
print("\n --version | -V\n"
" Shows version number and exits.")
print("\n --config-file <path> | -c <path>\n"
" Loads config file unless --no-config-file is specified. Any additional\n"
" arguments supplied are applied on top of that config. See configuration\n"
" section above for more information.")
print("\n --no-config-file\n"
" No automatic config file detection and --config-file argument is disallowed.")
print("\n --hidden\n"
" Analyze 'hidden' files and folders starting with '.'.")
print("\n --no-hidden (default)\n"
" Don't analyze hidden files and folders unless specified directly.")
print("\n --versions\n"
" In the end, print all unique versions required by the analysed code.")
print("\n --show-tips (default)\n"
" Show helpful tips at the end, like those relating to backports or usage of\n"
" unevaluated generic/literal annotations.")
print("\n --no-tips\n"
" Don't show tips.")
print("\n --violations | --lint\n"
" Show only results that violate versions described by --target arguments,\n"
" which are required to be specified. Verbosity mode is automatically set to\n"
" at least 2 in order to show violations in output text, but can be increased\n"
" if necessary.\n\n"
" If no rules are triggered while used in conjunction with --target, an exit\n"
" code 0 will still be yielded due to inconclusivity.\n\n"
" Can be used together with --quiet such that only the violations are shown:\n"
" no descriptive text, tips, or verdicts.")
print("\n --no-violations | --no-lint (default)\n"
" Show regular results.")
print("\n --pessimistic\n"
" Pessimistic mode: syntax errors are interpreted as the major Python version\n"
" in use being incompatible.")
print("\n --no-pessimistic (default)\n"
" Disable pessimistic mode.")
print("\n --eval-annotations\n"
" Instructs parser that annotations will be manually evaluated in code, which\n"
" changes minimum versions in certain cases. Otherwise, function and variable\n"
" annotations are not evaluated at definition time. Apply this argument if\n"
" code uses `typing.get_type_hints` or `eval(obj.__annotations__)` or\n"
" otherwise forces evaluation of annotations.")
print("\n --no-eval-annotations (default)\n"
" Disable annotations evaluation.")
print("\n --parse-comments (default)\n"
" Parse for comments to influence exclusion of code for analysis via\n"
" \"# novm\" and \"# novermin\".")
print("\n --no-parse-comments\n"
" Don't parse for comments. Not parsing comments can sometimes yield a speedup\n"
" of 30-40%+.")
print("\n --scan-symlink-folders\n"
" Scan symlinks to folders to include in analysis.")
print("\n --no-symlink-folders (default)\n"
" Don't scan symlinks to folders to include in analysis. Symlinks\n"
" to non-folders or top-level folders will always be scanned.")
print("\n --format <name> | -f <name>\n"
" Format to show results and output in.\n"
" Supported formats:\n{}".format(formats.help_str(10)))
print("\n [--exclude <name>] ...\n"
" Exclude full names, like 'email.parser.FeedParser', from analysis. Useful to\n"
" ignore conditional logic that can trigger incompatible results.\n\n"
" Examples:\n"
" Exclude 'foo.bar.baz' module/member: --exclude 'foo.bar.baz'\n"
" Exclude 'foo' kwarg: --exclude 'somemodule.func(foo)'\n"
" Exclude 'bar' codecs error handler: --exclude 'ceh=bar'\n"
" Exclude 'baz' codecs encoding: --exclude 'ce=baz'")
print("\n [--exclude-file <file name>] ...\n"
" Exclude full names like --exclude but from a specified file instead. Each\n"
" line constitutes an exclusion with the same format as with --exclude.")
print("\n --no-exclude (default)\n"
" Use no excludes. Clears any excludes specified before this.")
print("\n [--backport <name>] ...\n"
" Some features are sometimes backported into packages, in repositories such\n"
" as PyPi, that are widely used but aren't in the standard language. If such a\n"
" backport is specified as being used, the results will reflect that instead."
"\n\n"
" Supported backports:\n{}".format(Backports.str(10)))
print("\n --no-backport (default)\n"
" Use no backports. Clears any backports specified before this.")
print("\n [--feature <name>] ...\n"
" Some features are disabled by default due to being unstable:\n{}".
format(Features.str(10)))
print("\n --no-feature (default)\n"
" Use no features. Clears any features specified before this.")
def parse(self, config, detect_folder=None):
assert(config is not None)
if len(self.__args) == 0:
return {"code": 1, "usage": True, "full": False}
path_pos = 0
versions = False
fmt = None
detected_config = Config.detect_config_file(detect_folder)
argument_config = None
no_config_file = False
# Preparsing step. Help and version arguments quit immediately and config file parsing must be
# done first such that other arguments can override its settings.
for i in range(len(self.__args)):
arg = self.__args[i]
if arg in ("--help", "-h"):
return {"code": 0, "usage": True, "full": True}
if arg in ("--version", "-V"):
print(VERSION)
sys.exit(0)
if arg == "--no-config-file":
no_config_file = True
detected_config = None
if arg in ("--config-file", "-c"):
if (i + 1) >= len(self.__args):
print("Requires config file path! Example: --config-file /path/to/vermin.ini")
return {"code": 1}
argument_config = os.path.abspath(self.__args[i + 1])
if no_config_file and argument_config:
print("--config-file cannot be used together with --no-config-file!")
return {"code": 1}
# Load potential config file if detected | |
self.strategy.fetch_for_product(line.product)
if info.availability.is_available_to_buy:
raise
pass
return total
# ==========
# Properties
# ==========
@property
def is_empty(self):
"""
Test if this basket is empty
"""
return self.id is None or self.num_lines == 0
@property
def is_tax_known(self):
"""
Test if tax values are known for this basket
"""
return all([line.is_tax_known for line in self.all_lines()])
@property
def total_excl_tax(self):
"""
Return total line price excluding tax
"""
return self._get_total('line_price_excl_tax_incl_discounts')
@property
def total_tax(self):
"""Return total tax for a line"""
return self._get_total('line_tax')
@property
def total_incl_tax(self):
"""
Return total price inclusive of tax and discounts
"""
return self._get_total('line_price_incl_tax_incl_discounts')
@property
def total_incl_tax_excl_discounts(self):
"""
Return total price inclusive of tax but exclusive discounts
"""
return self._get_total('line_price_incl_tax')
@property
def total_discount(self):
return self._get_total('discount_value')
@property
def offer_discounts(self):
"""
Return basket discounts from non-voucher sources. Does not include
shipping discounts.
"""
return self.offer_applications.offer_discounts
@property
def voucher_discounts(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.voucher_discounts
@property
def has_shipping_discounts(self):
return len(self.shipping_discounts) > 0
@property
def shipping_discounts(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.shipping_discounts
@property
def post_order_actions(self):
"""
Return discounts from vouchers
"""
return self.offer_applications.post_order_actions
@property
def grouped_voucher_discounts(self):
"""
Return discounts from vouchers but grouped so that a voucher which
links to multiple offers is aggregated into one object.
"""
return self.offer_applications.grouped_voucher_discounts
@property
def total_excl_tax_excl_discounts(self):
"""
Return total price excluding tax and discounts
"""
return self._get_total('line_price_excl_tax')
@property
def num_lines(self):
"""Return number of lines"""
return self.all_lines().count()
@property
def num_items(self):
"""Return number of items"""
return sum(line.quantity for line in self.lines.all())
@property
def num_items_without_discount(self):
num = 0
for line in self.all_lines():
num += line.quantity_without_discount
return num
@property
def num_items_with_discount(self):
num = 0
for line in self.all_lines():
num += line.quantity_with_discount
return num
@property
def time_before_submit(self):
if not self.date_submitted:
return None
return self.date_submitted - self.date_created
@property
def time_since_creation(self, test_datetime=None):
if not test_datetime:
test_datetime = now()
return test_datetime - self.date_created
@property
def contains_a_voucher(self):
if not self.id:
return False
return self.vouchers.exists()
@property
def is_submitted(self):
return self.status == self.SUBMITTED
@property
def can_be_edited(self):
"""
Test if a basket can be edited
"""
return self.status in self.editable_statuses
@property
def currency(self):
# Since all lines should have the same currency, return the currency of
# the first one found.
for line in self.all_lines():
return line.price_currency
# =============
# Query methods
# =============
def contains_voucher(self, code):
"""
Test whether the basket contains a voucher with a given code
"""
if self.id is None:
return False
try:
self.vouchers.get(code=code)
except ObjectDoesNotExist:
return False
else:
return True
def product_quantity(self, product):
"""
Return the quantity of a product in the basket
The basket can contain multiple lines with the same product, but
different options and stockrecords. Those quantities are summed up.
"""
matching_lines = self.lines.filter(product=product)
quantity = matching_lines.aggregate(Sum('quantity'))['quantity__sum']
return quantity or 0
def line_quantity(self, product, stockrecord, options=None):
"""
Return the current quantity of a specific product and options
"""
ref = self._create_line_reference(product, stockrecord, options)
try:
return self.lines.get(line_reference=ref).quantity
except ObjectDoesNotExist:
return 0
@python_2_unicode_compatible
class AbstractLine(models.Model):
"""A line of a basket (product and a quantity)
Common approaches on ordering basket lines:
a) First added at top. That's the history-like approach; new items are
added to the bottom of the list. Changing quantities doesn't impact
position.
Oscar does this by default. It just sorts by Line.pk, which is
guaranteed to increment after each creation.
b) Last modified at top. That means items move to the top when you add
another one, and new items are added to the top as well. Amazon
mostly does this, but doesn't change the position when you update
the quantity in the basket view.
To get this behaviour, add a date_updated field, change
Meta.ordering and optionally do something similar on wishlist lines.
Order lines should already be created in the order of the basket
lines, and are sorted by their primary key, so no changes should be
necessary there.
"""
basket = models.ForeignKey(
'basket.Basket',
on_delete=models.CASCADE,
related_name='lines',
verbose_name=_("Basket"))
# This is to determine which products belong to the same line
# We can't just use product.id as you can have customised products
# which should be treated as separate lines. Set as a
# SlugField as it is included in the path for certain views.
line_reference = SlugField(
_("Line Reference"), max_length=128, db_index=True)
product = models.ForeignKey(
'catalogue.Product',
on_delete=models.CASCADE,
related_name='basket_lines',
verbose_name=_("Product"))
# We store the stockrecord that should be used to fulfil this line.
stockrecord = models.ForeignKey(
'partner.StockRecord',
on_delete=models.CASCADE,
related_name='basket_lines')
quantity = models.PositiveIntegerField(_('Quantity'), default=1)
# We store the unit price incl tax of the product when it is first added to
# the basket. This allows us to tell if a product has changed price since
# a person first added it to their basket.
price_currency = models.CharField(
_("Currency"), max_length=12, default=get_default_currency)
price_excl_tax = models.DecimalField(
_('Price excl. Tax'), decimal_places=2, max_digits=12,
null=True)
price_incl_tax = models.DecimalField(
_('Price incl. Tax'), decimal_places=2, max_digits=12, null=True)
# Track date of first addition
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
def __init__(self, *args, **kwargs):
super(AbstractLine, self).__init__(*args, **kwargs)
# Instance variables used to persist discount information
self._discount_excl_tax = D('0.00')
self._discount_incl_tax = D('0.00')
self.consumer = LineOfferConsumer(self)
class Meta:
abstract = True
app_label = 'basket'
# Enforce sorting by order of creation.
ordering = ['date_created', 'pk']
unique_together = ("basket", "line_reference")
verbose_name = _('Basket line')
verbose_name_plural = _('Basket lines')
def __str__(self):
return _(
u"Basket #%(basket_id)d, Product #%(product_id)d, quantity"
u" %(quantity)d") % {'basket_id': self.basket.pk,
'product_id': self.product.pk,
'quantity': self.quantity}
def save(self, *args, **kwargs):
if not self.basket.can_be_edited:
raise PermissionDenied(
_("You cannot modify a %s basket") % (
self.basket.status.lower(),))
return super(AbstractLine, self).save(*args, **kwargs)
# =============
# Offer methods
# =============
def clear_discount(self):
"""
Remove any discounts from this line.
"""
self._discount_excl_tax = D('0.00')
self._discount_incl_tax = D('0.00')
self.consumer = LineOfferConsumer(self)
def discount(self, discount_value, affected_quantity, incl_tax=True,
offer=None):
"""
Apply a discount to this line
"""
if incl_tax:
if self._discount_excl_tax > 0:
raise RuntimeError(
"Attempting to discount the tax-inclusive price of a line "
"when tax-exclusive discounts are already applied")
self._discount_incl_tax += discount_value
else:
if self._discount_incl_tax > 0:
raise RuntimeError(
"Attempting to discount the tax-exclusive price of a line "
"when tax-inclusive discounts are already applied")
self._discount_excl_tax += discount_value
self.consume(affected_quantity, offer=offer)
def consume(self, quantity, offer=None):
"""
Mark all or part of the line as 'consumed'
Consumed items are no longer available to be used in offers.
"""
self.consumer.consume(quantity, offer=offer)
def get_price_breakdown(self):
"""
Return a breakdown of line prices after discounts have been applied.
Returns a list of (unit_price_incl_tax, unit_price_excl_tax, quantity)
tuples.
"""
if not self.is_tax_known:
raise RuntimeError("A price breakdown can only be determined "
"when taxes are known")
prices = []
if not self.discount_value:
prices.append((self.unit_price_incl_tax, self.unit_price_excl_tax,
self.quantity))
else:
# Need to split the discount among the affected quantity
# of products.
item_incl_tax_discount = (
self.discount_value / int(self.consumer.consumed()))
item_excl_tax_discount = item_incl_tax_discount * self._tax_ratio
item_excl_tax_discount = item_excl_tax_discount.quantize(D('0.01'))
prices.append((self.unit_price_incl_tax - item_incl_tax_discount,
self.unit_price_excl_tax - item_excl_tax_discount,
self.consumer.consumed()))
if self.quantity_without_discount:
prices.append((self.unit_price_incl_tax,
self.unit_price_excl_tax,
self.quantity_without_discount))
return prices
# =======
# Helpers
# =======
@property
def _tax_ratio(self):
if not self.unit_price_incl_tax:
return 0
return self.unit_price_excl_tax / self.unit_price_incl_tax
# ===============
# Offer Discounts
# ===============
def has_offer_discount(self, offer):
return self.consumer.consumed(offer) > 0
def quantity_with_offer_discount(self, offer):
return self.consumer.consumed(offer)
def quantity_without_offer_discount(self, offer):
return self.consumer.available(offer)
def is_available_for_offer_discount(self, offer):
return self.consumer.available(offer) > 0
# ==========
# Properties
# ==========
@property
def has_discount(self):
return bool(self.consumer.consumed())
@property
def quantity_with_discount(self):
return self.consumer.consumed()
@property
def quantity_without_discount(self):
return self.consumer.available()
@property
def is_available_for_discount(self):
# deprecated
return self.consumer.available() > 0
@property
def discount_value(self):
# Only one of the incl- and excl- discounts should be non-zero
return max(self._discount_incl_tax, self._discount_excl_tax)
@property
def purchase_info(self):
"""
Return the stock/price info
"""
if not hasattr(self, '_info'):
# Cache the PurchaseInfo instance.
self._info = self.basket.strategy.fetch_for_line(
self, self.stockrecord)
return self._info
@property
def is_tax_known(self):
return self.purchase_info.price.is_tax_known
@property
def unit_effective_price(self):
"""
The price to use for offer calculations
"""
return self.purchase_info.price.effective_price
@property
def unit_price_excl_tax(self):
return self.purchase_info.price.excl_tax
@property
def unit_price_incl_tax(self):
return self.purchase_info.price.incl_tax
@property
def unit_tax(self):
return self.purchase_info.price.tax
@property
def line_price_excl_tax(self):
if self.unit_price_excl_tax is not None:
return self.quantity * self.unit_price_excl_tax
@property
def line_price_excl_tax_incl_discounts(self):
if self._discount_excl_tax and self.line_price_excl_tax is not None:
return self.line_price_excl_tax - self._discount_excl_tax
if self._discount_incl_tax and self.line_price_incl_tax is not None:
# This is a tricky situation. We | |
# Code for initialization of NMF, copied with little modification from scikit-learn
# Original source: https://github.com/scikit-learn/scikit-learn/blob/7e1e6d09bcc2eaeba98f7e737aac2ac782f0e5f1/sklearn/decomposition/_nmf.py#L229
import numpy as np
from scipy import linalg
import warnings
from math import sqrt
import numbers
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
Parameters
----------
seed : None, int or instance of RandomState
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, numbers.Integral):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError(
"%r cannot be used to seed a numpy.random.RandomState instance" % seed
)
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Faster than norm(x) ** 2.
Parameters
----------
x : array-like
Returns
-------
float
The Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array).
"""
x = np.ravel(x, order="K")
if np.issubdtype(x.dtype, np.integer):
warnings.warn(
"Array type is integer, np.dot may overflow. "
"Data should be float type to avoid this issue",
UserWarning,
)
return np.dot(x, x)
def norm(x):
"""Dot product-based Euclidean norm implementation.
See: http://fseoane.net/blog/2011/computing-the-vector-norm/
Parameters
----------
x : array-like
Vector for which to compute the norm.
"""
return sqrt(squared_norm(x))
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u : ndarray
u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
v : ndarray
u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
The input v should really be called vt to be consistent with scipy's
output.
u_based_decision : bool, default=True
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, range(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[range(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def randomized_range_finder(
A, *, size, n_iter, power_iteration_normalizer="auto", random_state=None
):
"""Compute an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix.
size : int
Size of the return array.
n_iter : int
Number of power iterations used to stabilize the result.
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
Q : ndarray
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) https://arxiv.org/pdf/0909.4061.pdf
An implementation of a randomized algorithm for principal component
analysis
<NAME> al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
if A.dtype.kind == "f":
# Ensure f32 is preserved as f32
Q = Q.astype(A.dtype, copy=False)
# Deal with "auto" mode
if power_iteration_normalizer == "auto":
if n_iter <= 2:
power_iteration_normalizer = "none"
else:
power_iteration_normalizer = "LU"
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == "none":
Q = np.dot(A, Q)
Q = np.dot(A.T, Q)
elif power_iteration_normalizer == "LU":
Q, _ = linalg.lu(np.dot(A, Q), permute_l=True)
Q, _ = linalg.lu(np.dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == "QR":
Q, _ = linalg.qr(np.dot(A, Q), mode="economic")
Q, _ = linalg.qr(np.dot(A.T, Q), mode="economic")
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(np.dot(A, Q), mode="economic")
return Q
def randomized_svd(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
transpose="auto",
flip_sign=True,
random_state="warn",
):
"""Computes a truncated randomized SVD.
This method solves the fixed-rank approximation problem described in the
Halko et al paper (problem (1.5), p5).
Parameters
----------
M : {ndarray, sparse matrix}
Matrix to decompose.
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
.. versionchanged:: 0.18
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : bool or 'auto', default='auto'
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : bool, default=True
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, default='warn'
The seed of the pseudo random number generator to use when
shuffling the data, i.e. getting the random vectors to initialize
the algorithm. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
.. versionchanged:: 1.2
The previous behavior (`random_state=0`) is deprecated, and
from v1.2 the default value will be `random_state=None`. Set
the value of `random_state` | |
<gh_stars>1-10
"""
=============
python-timely
=============
Operations on time vectors with variable calendar definitions.
Notes
-----
The terminology and structure is inspired by the shapely package and was
motivated by the need to deal with various time representation in the
climate modelling community.
The notion of years is assumed to be uniform across all cases. How the years
are divided into cycles (usually months) and each cycle into days can vary.
There is always 24 hours in a day, 60 minutes in an hour and 60 seconds in a
minute.
The basic unit is a 6-element time vector. However, the precision with which a
specific time or period is given is left to the user and decimals are allowed
in the last specified time element. For example, each of the following time
vectors can be used as a date:
[2007] # The beginning of 2007
[2007.25] # 1/4 of the way into year 2007
[1980, 11] # The beginning of November 1980
[1943, 5, 3, 0, 5.5] # Equivalent to [1943, 5, 3, 0, 5, 30]
[2002, 2, 28, 12, 30, 0.0] # Fully explicit time
In order to be able to select a period (e.g. a given year) across various
calendars, we let the boundaries of periods be inclusive or exclusive.
For instance, the period [2001,2002[ is unambiguous while the period
[2001-1-1,2001-12-31] would cause problem in a context where one calendar
does not have a 31st of December in it.
The following aspects of time are not supported:
- Time zones
- Leap seconds
Overview on creation of timely objects (and useful shortcuts):
deltat = DeltaT([yyyy,mm,dd,hh,mn,ss])
e.g. DeltaT([0,0,0,12]) = 12 hours
DeltaT([0,0,0,0,0.5]) = 30 seconds
date = Date([yyyy,mm,dd,hh,mn,ss])
e.g. Date([2010,1,10]) = January 10th, 2010, 0:00:00
e.g. Date([2010]) = January 1st 2010, 0:00:00
multidate = MultiDate([[yyyy,mm,dd,hh,mn,ss],[yyyy,mm,dd,hh,mn,ss]])
period = Period([[yyyy,mm,dd,hh,mn,ss],[yyyy,mm,dd,hh,mn,ss]])
period = explicit_period(date1,date2)
period = implicit_period([yyyy,mm])
e.g. implicit_period([2010]) = The year 2010
e.g. implicit_period([2010,2]) = The month of February 2010
timeseries = Timeseries([[yyyy,mm,dd,hh,mn,ss],[yyyy,mm,dd,hh,mn,ss]])
timeseries = period.regular_sample(deltat)
"""
import warnings
import numpy as np
import numpy.ma as ma
# Default threshold for rounding seconds in time representation
threshold = 0.001
# Integer type for dates without decimals
dummy = np.array([0])
myint = dummy.dtype
# Float type for dates that convert to decimals
myfloat = 'float64'
class TimelyError(Exception):
pass
# Vectorize functions on lists
def _index(some_list, value):
return some_list.index(value)
#
_Vindex = np.vectorize(_index)
#
def _get_item(some_list, item):
return some_list[item]
#
_Vget_item = np.vectorize(_get_item)
#
#
# Built-in functions for calendar definition
#
# Cycles in year
#
def months_of_gregorian_calendar(year=0):
"""Months of the Gregorian calendar.
Parameters
----------
year : int, optional
(dummy value).
Returns
-------
out : dict
integers as keys, months of the Gregorian calendar as values.
Notes
-----
Appropriate for use as 'year_cycles' function in :class:`Calendar`.
This module has a built-in calendar with months only:
:data:`CalMonthsOnly`.
"""
return {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June',
7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November',
12: 'December'}
#
def temperate_seasons(year=0):
"""Temperate seasons.
Parameters
----------
year : int, optional
(dummy value).
Returns
-------
out : dict
integers as keys, temperate seasons as values.
Notes
-----
Appropriate for use as 'year_cycles' function in :class:`Calendar`.
This module has a built-in calendar with seasons only:
:data:`CalSeasons`.
"""
return {1: 'Spring', 2: 'Summer', 3: 'Autumn', 4: 'Winter'}
#
def year_cycle(year=0):
"""Year cycle.
Parameters
----------
year : int, optional
(dummy value).
Returns
-------
out : dict
integer (1) as key, 'Year' as value.
Notes
-----
Appropriate for use as 'year_cycles' function in :class:`Calendar`,
this allows to essentially have a direct division of the years in
days, without months, weeks or other subdivisions.
For example, see built-in calendar :data:`Cal365NoMonths`.
"""
return {1: 'Year'}
#
# Days in cycle
#
def days_in_month_360(month=0, year=0):
"""Days of the month (360 days calendar).
Parameters
----------
month : int, optional
(dummy value).
year : int, optional
(dummy value).
Returns
-------
out : list of int
days of the month.
Notes
-----
Appropriate for use as 'days_in_cycle' function in :class:`Calendar`.
This module has a built-in 360 days calendar with months:
:data:`Cal360`.
"""
return range(1, 31)
#
def days_in_month_365(month, year=0):
"""Days of the month (365 days calendar).
Parameters
----------
month : int
numerical value of the month (1 to 12).
year : int, optional
(dummy value).
Returns
-------
out : list of int
days of the month.
Notes
-----
Appropriate for use as 'days_in_cycle' function in :class:`Calendar`.
This module has a built-in 365 days calendar with months:
:data:`Cal365`.
"""
try:
month_float = float(month)
month_int = int(month)
except (TypeError, ValueError) as e:
raise TimelyError("Month value is not numerical.")
if month_int != month_float:
raise TimelyError("Month value is not an integer.")
elif (month_int < 1) or (month_int > 12):
raise TimelyError("Month value has to be between 1 and 12.")
days_in_months = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
return range(1, days_in_months[month_int - 1] + 1)
#
def days_in_month_366(month, year=0):
"""Days of the month (366 days calendar).
Parameters
----------
month : int
numerical value of the month (1 to 12).
year : int, optional
(dummy value).
Returns
-------
out : list of int
days of the month.
Notes
-----
Appropriate for use as 'days_in_cycle' function in :class:`Calendar`.
This module has a built-in 366 days calendar with months:
:data:`Cal366`.
"""
try:
month_float = float(month)
month_int = int(month)
except (TypeError, ValueError) as e:
raise TimelyError("Month value is not numerical.")
if month_int != month_float:
raise TimelyError("Month value is not an integer.")
elif (month_int < 1) or (month_int > 12):
raise TimelyError("Month value has to be between 1 and 12.")
days_in_months = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
return range(1, days_in_months[month_int - 1] + 1)
#
def days_in_month_julian(month, year):
"""Days of the month (Julian calendar).
Parameters
----------
month : int
numerical value of the month (1 to 12).
year : int
Returns
-------
out : list of int
days of the month.
Notes
-----
Leap year every 4 years.
Appropriate for use as 'days_in_cycle' function in :class:`Calendar`.
This module has a built-in julian calendar: :data:`CalJulian`.
"""
try:
year_float = float(year)
year_int = int(year)
except (TypeError, ValueError) as e:
raise TimelyError("Year value is not numerical.")
if year_int != year_float:
raise TimelyError("Year value is not an integer.")
if (year_int % 4) == 0:
return days_in_month_366(month, year_int)
else:
return days_in_month_365(month, year_int)
#
def days_in_month_proleptic_gregorian(month, year):
"""Days of the month (Proleptic gregorian calendar).
Parameters
----------
month : int
numerical value of the month (1 to 12).
year : int
Returns
-------
out : list of int
days of the month.
Notes
-----
Leap year every 4 years, except every 100 years,
but still every 400 years.
Appropriate for use as 'days_in_cycle' function in :class:`Calendar`.
This module has a built-in proleptic gregorian calendar:
:data:`CalProleptic`.
"""
try:
year_float = float(year)
year_int = int(year)
except (TypeError, ValueError) as e:
raise TimelyError("Year value is not numerical.")
if year_int != year_float:
raise TimelyError("Year value is not an integer.")
if ((year_int % 100) == 0) and ((year_int % 400) != 0):
return days_in_month_365(month, year_int)
else:
return days_in_month_julian(month, year_int)
#
def days_in_month_gregorian(month, year):
"""Days of the month (Gregorian calendar).
Parameters
----------
month : int
numerical value of the month (1 to 12).
year : int
Returns
-------
out : list of int
days of the month.
Notes
-----
Leap year every 4 years, except every 100 years,
but still every 400 years.
Transition to Julian calendar before 1582.
October 5 to October 14 of 1582 do not exist.
Appropriate for use as 'days_in_cycle' function in :class:`Calendar`.
This module has a built-in gregorian calendar: :data:`CalGregorian`.
"""
try:
year_float = float(year)
year_int = int(year)
except (TypeError, ValueError) as e:
raise TimelyError("Year value is not numerical.")
if year_int != year_float:
raise TimelyError("Year value is not an integer.")
try:
month_float = float(month)
month_int = int(month)
except (TypeError, ValueError) as e:
raise TimelyError("Month value is not numerical.")
if month_int != month_float:
raise TimelyError("Month value is not an integer.")
if (year_int > 1582) or ((year_int == 1582) and (month_int > 10)):
return days_in_month_proleptic_gregorian(month_int, year_int)
elif (year_int == 1582) and (month_int == 10):
return [1, 2, 3, 4, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]
else:
return days_in_month_julian(month_int, year_int)
#
def days_in_year_365(cycle=1, year=0):
"""Days of the year | |
#!/usr/bin/env python3
#Copyright 2021 <NAME>
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#This is a blackjack game called BlackPack. It has a few cool features, including using multiple decks, autoshuffling, and betting.
#This is my ONLINE blackjack game.
#It's written in python3 and depends on random and socket, this is the linux version, it should however run on windows or macos when opened in a python interpreter.
#It can be installed on linux by placing the file in /usr/bin or any prefered bin folder, and can be ran by calling BlackPackO in bash.
from random import randint
import xml.etree.ElementTree as ET
import socket
HOST = '127.0.0.1'
SERV = 65533
LIST = 65532
#[Noah Panepinto (Dec.16 2021 {01:39})]
#Here I define several characters that will be appended to strings to chang their colours when printed.
class colours:
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDER = '\033[4m'
NO_UNDER = '\033[24m'
REVERSE = '\033[7m'
FOREWARD = '\033[27m'
FORE_DARK_BLACK = '\033[30m'
FORE_DARK_RED = '\033[31m'
FORE_DARK_GREEN = '\033[32m'
FORE_DARK_ORANGE = '\033[33m'
FORE_DARK_BLUE = '\033[34m'
FORE_DARK_MAGENTA = '\033[35m'
FORE_DARK_CYAN = '\033[36m'
FORE_DARK_WHITE = '\033[37m'
FORE_BRIGHT_BLACK = '\033[90m'
FORE_BRIGHT_RED = '\033[91m'
FORE_BRIGHT_GREEN = '\033[92m'
FORE_BRIGHT_ORANGE = '\033[93m'
FORE_BRIGHT_BLUE = '\033[94m'
FORE_BRIGHT_MAGENTA = '\033[95m'
FORE_BRIGHT_CYAN = '\033[96m'
FORE_BRIGHT_WHITE = '\033[97m'
BACK_ENDC = '\033[0m'
BACK_DARK_BLACK = '\033[40m'
BACK_DARK_RED = '\033[41m'
BACK_DARK_GREEN = '\033[42m'
BACK_DARK_ORANGE = '\033[43m'
BACK_DARK_BLUE = '\033[44m'
BACK_DARK_MAGENTA = '\033[45m'
BACK_DARK_CYAN = '\033[46m'
BACK_DARK_WHITE = '\033[47m'
BACK_BRIGHT_BLACK = '\033[1000m'
BACK_BRIGHT_RED = '\033[101m'
BACK_BRIGHT_GREEN = '\033[102m'
BACK_BRIGHT_ORANGE = '\033[103m'
BACK_BRIGHT_BLUE = '\033[104m'
BACK_BRIGHT_MAGENTA = '\033[105m'
BACK_BRIGHT_CYAN = '\033[106m'
BACK_BRIGHT_WHITE = '\033[107m'
#[<NAME> (Oct.3 2021 {01:39})]
#Here I define the classes that will represent Standard Cards (the BaseCard class) and Ace Cards (the AceCard class)
#Both the AceCard class is derived from the BaseCard class and as a result contain the same five non static (instance) values):
#self.value; An integer value representing the total added to your hand under normal circumstances:
#Used for comparing whether a hand of two cards can be split or not.
#self.suite; A string value representing the suite of a card, "Spades", "Clubs", "Hearts", or "Diamonds".
#self.card; A string value representing the number or face of a card, "Two", "Eight", "Ace", "King" etc.
#self.name; A string value representing the name of a card:
#It is equal to "X of Y" where X is self.card and Y is self.suite.
#self.face; An array of string values which is used to visually represent a card when printed.
#Both the BaseCard class and AceCard class contain one function:
#GetValue(); A function which returns the value a card will add to a hand:
#Takes in the current value of your hand as input value score.
#Returns self.value.
#The AceCard class differs from the BaseCard class in three ways:
#self.value is not supplied upon instantiation and is always equal to 11.
#self.card is not supplied upon instantiation and is always equal to "Ace".
#GetValue() will return 1 if score is greater than 10, and 11 if score is less than or equal to 10.
class BaseCard:
def __init__(self, v, n, f):
self.value = v
self.name = n
self.face = f
def GetValue(self, score):
return(self.value)
def __str__(self):
r = ""
i = 0
for line in self.face:
if not i == 0:
r += '\n'
r += line
i += 1
return r
class AceCard(BaseCard):
def __init__(self, n, f):
self.name = n
self.face = f
def GetValue(self, score):
if( score > 10 ):
return 1
else:
return 11
#[<NAME> (Dec.16 2021 {01:30})]
#Here I define a static class containing all of the data for a full deck of cards, including the string representation of the back of a card.
#The Cards class contains two static (class) values:
#Cards.fd; an array containing BaseCard and AceCard Class Objects representing a full standard deck of playing cards.
#Cards.boc; an array of strings representing the back of a standard playing card.
#The Cards class contains one function, Load() which reads an XML File and populates Cards.fd and Cards.boc with the data within the XML File.
class Cards:
fd = []
boc = []
def Load(xmlFile):
full = []
back = []
tree = ET.parse(xmlFile)
root = tree.getroot()
i = 0
for item in root.findall('card'):
if item.find('ID').text == 'back':
faceX = item.find('face')
for line in faceX.findall('line'):
back.append(line.text)
elif item.find('ID').text == 'ace':
faceX = item.find('face')
face = []
for line in faceX.findall('line'):
face.append(line.text)
name = item.find('description').text
full.append(AceCard(name, face))
elif item.find('ID').text == 'base':
value = int(item.find('value').text)
faceX = item.find('face')
face = []
for line in faceX.findall('line'):
face.append(line.text)
name = item.find('description').text
full.append(BaseCard(value, name, face))
else: i += 1
if i > 0:
print('invalid Cards.xml')
exit(0)
Cards.fd = full
Cards.boc = back
#[<NAME> (Oct.3 2021 {01:39})]
#Here I define the class which will represent a deck of cards (the Deck class) which contains two static (class) values:
#inPile; An Array of AceCard and BaseCard classes which represents the pile in which cards are shuffled and waiting to be dealt to players.
#outPile; An array of AceCard and BaseCard classes which represents the pile in which cards that have been discarded and are waiting to be reshuffled into inPile.
#The Deck Class contains no non static (instance) values.
#The Deck Class contains one function:
#shuffle(); A function which takes all cards in outPile and moves them into inPile.
class Deck:
inPile = []
outPile = []
def __init__(self, decks):
i = 0
while i < decks:
self.inPile.extend(Cards.fd)
i += 1
def shuffle(self):
print("\nShufflin' the deck!")
while len(self.outPile) > 0:
self.inPile.append(self.outPile.pop(0))
#[<NAME> (Oct.3 2021 {01:39})]
#Here I define the the class which will represent a hand of cards held by a player (the Hand class) which contains three non static (instance) values:
#self.score; The integer value which represents the point value of the hand, this is what you're trying to get to 21.
#self.cards; An Array of BaseCards and AceCards that the hand contains.
#self.doubled; A boolean value that checks whether a hand has been double downed, and can resultingly no longer hit.
#The Hand class also contains six functions:
#print(); A function which prints the visual representations of the cards that the Hand class contains,
#the data printed is contained in BaseCard.face, and AceCard.face.
#printHalf(); A function which prints the visual representation of the first card in the hand and then boc.
#evalAceLast(); A function which evaluates and populates self.score,
#it does this by running the GetValue() function on all BaseCard classes within self.cards followed by doing the same for all AceCard classes within self.cards.
#The order of operations is important to make sure that the AceCard classes return the correct value.
#Returns self.score.
#hit(); A function which takes a random card from the inPile value of a Deck class object and adds it to self.cards.
#Takes in a Deck class object from which to take a card.
#deal(); A function which calls the hit() function twice.
#Takes in a Deck class object which is needed for passing to the hit() function.
#clear(); A function which takes all cards in self.cards and moves them to the outPile value of a Deck class object.
#Takes in a Deck class object to which it gives cards.
class Hand:
def __init__(self):
self.score = 0
self.cards = []
self.doubled = False
def print(self):
i = 0
while (i < 8):
line = ""
for card in self.cards:
line += (card.face[i] + " ")
print (line)
i += 1
def printHalf(self):
i = 0
while (i < 8):
line = ""
line += (self.cards[0].face[i] + " ")
line += (Cards.boc[i] + " ")
print (line)
i += 1
def evalAceLast(self):
r = 0
for card in self.cards:
if card.GetValue(0) != 11:
r += card.GetValue(r)
for card in self.cards:
if card.GetValue(0) == 11:
r += card.GetValue(r)
self.score = r
return(self.score)
def hit(self, deck):
size = len(deck.inPile)
if (size == 0):
deck.shuffle()
size = len(deck.inPile)
index = randint(0, size - 1)
self.cards.append(deck.inPile.pop(index))
def deal(self, deck):
self.hit(deck)
self.hit(deck)
def clear(self, deck):
size = len(self.cards)
i = 0
while (i < size):
deck.outPile.append(self.cards.pop(0))
i += 1
self.doubled = False
#[<NAME> (Oct.3 2021 {01:39})]
#Here I define a class which represents the result of a finished hand (the HandResult class), which is used to determine whether the player won or | |
channel_matcher:
# skip url in canonical str if channel already included
continue
value = text_type(self._match_components[key])
if any(s in value for s in ', ='):
brackets.append("%s='%s'" % (key, value))
else:
brackets.append("%s=%s" % (key, value))
if brackets:
builder.append('[%s]' % ','.join(brackets))
return ''.join(builder)
def __json__(self):
return self.__str__()
def conda_build_form(self):
builder = []
name = self.get_exact_value('name')
assert name
builder.append(name)
build = self.get_raw_value('build')
version = self.get_raw_value('version')
if build:
assert version
builder += [version, build]
elif version:
builder.append(version)
return ' '.join(builder)
def __eq__(self, other):
if isinstance(other, MatchSpec):
return self._hash_key == other._hash_key
else:
return False
def __hash__(self):
return hash(self._hash_key)
@memoizedproperty
def _hash_key(self):
return self._match_components, self.optional, self.target
def __contains__(self, field):
return field in self._match_components
def _build_components(self, **kwargs):
not_fields = set(kwargs) - MatchSpec.FIELD_NAMES_SET
if not_fields:
raise InvalidMatchSpec(self._original_spec_str,
'Cannot match on field(s): %s' % not_fields)
_make_component = MatchSpec._make_component
return frozendict(_make_component(key, value) for key, value in iteritems(kwargs))
@staticmethod
def _make_component(field_name, value):
if hasattr(value, 'match'):
matcher = value
return field_name, matcher
_MATCHER_CACHE = MatchSpec._MATCHER_CACHE
cache_key = (field_name, value)
cached_matcher = _MATCHER_CACHE.get(cache_key)
if cached_matcher:
return field_name, cached_matcher
if field_name in _implementors:
matcher = _implementors[field_name](value)
else:
matcher = ExactStrMatch(text_type(value))
_MATCHER_CACHE[(field_name, value)] = matcher
return field_name, matcher
@property
def name(self):
return self.get_exact_value('name') or '*'
#
# Remaining methods are for back compatibility with conda-build. Do not remove
# without coordination with the conda-build team.
#
@property
def strictness(self):
# With the old MatchSpec, strictness==3 if name, version, and
# build were all specified.
s = sum(f in self._match_components for f in ('name', 'version', 'build'))
if s < len(self._match_components):
return 3
elif not self.get_exact_value('name') or 'build' in self._match_components:
return 3
elif 'version' in self._match_components:
return 2
else:
return 1
@property
def spec(self):
return self.conda_build_form()
@property
def version(self):
# in the old MatchSpec object, version was a VersionSpec, not a str
# so we'll keep that API here
return self._match_components.get('version')
@property
def fn(self):
val = self.get_raw_value('fn') or self.get_raw_value('url')
if val:
val = basename(val)
assert val
return val
@classmethod
def merge(cls, match_specs, union=False):
match_specs = sorted(tuple(cls(s) for s in match_specs if s), key=str)
name_groups = groupby(attrgetter('name'), match_specs)
unmergeable = name_groups.pop('*', []) + name_groups.pop(None, [])
merged_specs = []
mergeable_groups = tuple(concat(
itervalues(groupby(lambda s: s.optional, group))
for group in itervalues(name_groups)
))
for group in mergeable_groups:
target_groups = groupby(attrgetter('target'), group)
target_groups.pop(None, None)
if len(target_groups) > 1:
raise ValueError("Incompatible MatchSpec merge:%s" % dashlist(group))
merged_specs.append(
reduce(lambda x, y: x._merge(y, union), group) if len(group) > 1 else group[0]
)
return tuple(concatv(merged_specs, unmergeable))
@classmethod
def union(cls, match_specs):
return cls.merge(match_specs, union=True)
def _merge(self, other, union=False):
if self.optional != other.optional or self.target != other.target:
raise ValueError("Incompatible MatchSpec merge: - %s\n - %s" % (self, other))
final_components = {}
component_names = set(self._match_components) | set(other._match_components)
for component_name in component_names:
this_component = self._match_components.get(component_name)
that_component = other._match_components.get(component_name)
if this_component is None and that_component is None:
continue
elif this_component is None:
final_components[component_name] = that_component
elif that_component is None:
final_components[component_name] = this_component
else:
if union:
try:
final = this_component.union(that_component)
except (AttributeError, ValueError):
final = '%s|%s' % (this_component, that_component)
else:
final = this_component.merge(that_component)
final_components[component_name] = final
return self.__class__(optional=self.optional, target=self.target, **final_components)
def _parse_version_plus_build(v_plus_b):
"""This should reliably pull the build string out of a version + build string combo.
Examples:
>>> _parse_version_plus_build("=1.2.3 0")
('=1.2.3', '0')
>>> _parse_version_plus_build("1.2.3=0")
('1.2.3', '0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build(">=1.0 , < 2.0 =py34_0")
('>=1.0,<2.0', 'py34_0')
>>> _parse_version_plus_build("=1.2.3 ")
('=1.2.3', None)
>>> _parse_version_plus_build(">1.8,<2|==1.7")
('>1.8,<2|==1.7', None)
>>> _parse_version_plus_build("* openblas_0")
('*', 'openblas_0')
>>> _parse_version_plus_build("* *")
('*', '*')
"""
parts = re.search(r'((?:.+?)[^><!,|]?)(?:(?<![=!|,<>~])(?:[ =])([^-=,|<>~]+?))?$', v_plus_b)
if parts:
version, build = parts.groups()
build = build and build.strip()
else:
version, build = v_plus_b, None
return version and version.replace(' ', ''), build
def _parse_legacy_dist(dist_str):
"""
Examples:
>>> _parse_legacy_dist("_license-1.1-py27_1.tar.bz2")
('_license', '1.1', 'py27_1')
>>> _parse_legacy_dist("_license-1.1-py27_1")
('_license', '1.1', 'py27_1')
"""
dist_str, _ = strip_pkg_extension(dist_str)
name, version, build = dist_str.rsplit('-', 2)
return name, version, build
def _parse_channel(channel_val):
if not channel_val:
return None, None
chn = Channel(channel_val)
channel_name = chn.name or chn.base_url
return channel_name, chn.subdir
_PARSE_CACHE = {}
def _parse_spec_str(spec_str):
cached_result = _PARSE_CACHE.get(spec_str)
if cached_result:
return cached_result
original_spec_str = spec_str
# pre-step for ugly backward compat
if spec_str.endswith('@'):
feature_name = spec_str[:-1]
return {
'name': '*',
'track_features': (feature_name,),
}
# Step 1. strip '#' comment
if '#' in spec_str:
ndx = spec_str.index('#')
spec_str, _ = spec_str[:ndx], spec_str[ndx:]
spec_str.strip()
# Step 1.b strip ' if ' anticipating future compatibility issues
spec_split = spec_str.split(' if ', 1)
if len(spec_split) > 1:
log.debug("Ignoring conditional in spec %s", spec_str)
spec_str = spec_split[0]
# Step 2. done if spec_str is a tarball
if is_package_file(spec_str):
# treat as a normal url
if not is_url(spec_str):
spec_str = unquote(path_to_url(expand(spec_str)))
channel = Channel(spec_str)
if channel.subdir:
name, version, build = _parse_legacy_dist(channel.package_filename)
result = {
'channel': channel.canonical_name,
'subdir': channel.subdir,
'name': name,
'version': version,
'build': build,
'fn': channel.package_filename,
'url': spec_str,
}
else:
# url is not a channel
if spec_str.startswith('file://'):
# We must undo percent-encoding when generating fn.
path_or_url = url_to_path(spec_str)
else:
path_or_url = spec_str
return {
'name': '*',
'fn': basename(path_or_url),
'url': spec_str,
}
return result
# Step 3. strip off brackets portion
brackets = {}
m3 = re.match(r'.*(?:(\[.*\]))', spec_str)
if m3:
brackets_str = m3.groups()[0]
spec_str = spec_str.replace(brackets_str, '')
brackets_str = brackets_str[1:-1]
m3b = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', brackets_str)
for match in m3b:
key, _, value, _ = match.groups()
if not key or not value:
raise InvalidMatchSpec(original_spec_str, "key-value mismatch in brackets")
brackets[key] = value
# Step 4. strip off parens portion
m4 = re.match(r'.*(?:(\(.*\)))', spec_str)
parens = {}
if m4:
parens_str = m4.groups()[0]
spec_str = spec_str.replace(parens_str, '')
parens_str = parens_str[1:-1]
m4b = re.finditer(r'([a-zA-Z0-9_-]+?)=(["\']?)([^\'"]*?)(\2)(?:[, ]|$)', parens_str)
for match in m4b:
key, _, value, _ = match.groups()
parens[key] = value
if 'optional' in parens_str:
parens['optional'] = True
# Step 5. strip off '::' channel and namespace
m5 = spec_str.rsplit(':', 2)
m5_len = len(m5)
if m5_len == 3:
channel_str, namespace, spec_str = m5
elif m5_len == 2:
namespace, spec_str = m5
channel_str = None
elif m5_len:
spec_str = m5[0]
channel_str, namespace = None, None
else:
raise NotImplementedError()
channel, subdir = _parse_channel(channel_str)
if 'channel' in brackets:
b_channel, b_subdir = _parse_channel(brackets.pop('channel'))
if b_channel:
channel = b_channel
if b_subdir:
subdir = b_subdir
if 'subdir' in brackets:
subdir = brackets.pop('subdir')
# Step 6. strip off package name from remaining version + build
m3 = re.match(r'([^ =<>!~]+)?([><!=~ ].+)?', spec_str)
if m3:
name, spec_str = m3.groups()
if name is None:
raise InvalidMatchSpec(original_spec_str, "no package name found in '%s'" % spec_str)
else:
raise InvalidMatchSpec(original_spec_str, "no package name found")
# Step 7. otherwise sort out version + build
spec_str = spec_str and spec_str.strip()
# This was an attempt to make MatchSpec('numpy-1.11.0-py27_0') work like we'd want. It's
# not possible though because plenty of packages have names with more than one '-'.
# if spec_str is None and name.count('-') >= 2:
# name, version, build = _parse_legacy_dist(name)
if spec_str:
if '[' in spec_str:
raise InvalidMatchSpec(original_spec_str, "multiple brackets sections not allowed")
version, build = _parse_version_plus_build(spec_str)
# translate version '=1.2.3' to '1.2.3*'
# is it a simple version starting with '='? i.e. '=1.2.3'
if version[0] == '=':
test_str = version[1:]
if version[:2] == '==' and build is None:
version = version[2:]
elif not any(c in test_str for c in "=,|"):
if build is None and test_str[-1] != '*':
version = test_str + '*'
else:
version = test_str
else:
version, build = None, None
# Step 8. now compile components together
components = {}
components['name'] = name if name else '*'
if channel is not None:
components['channel'] = channel
if subdir is not None:
components['subdir'] = subdir
if namespace is not None:
# components['namespace'] = namespace
pass
if version is not None:
components['version'] = version
if build is not None:
components['build'] = build
# anything in brackets will now strictly override key as set in other area of spec str
components.update(brackets)
components['_original_spec_str'] = original_spec_str
_PARSE_CACHE[original_spec_str] = components
return components
@with_metaclass(ABCMeta)
class MatchInterface(object):
def __init__(self, value):
self._raw_value = value
@abstractmethod
def match(self, other):
raise NotImplementedError()
def matches(self, value):
return self.match(value)
@property
def raw_value(self):
return self._raw_value
@abstractproperty
def exact_value(self):
"""If the match value is an exact specification, returns | |
int
Updated list of indices of flagged antennas
"""
# get nsamples and check for small auto ants
if correct_van_vleck:
self.history += " Applied Van Vleck correction."
# calculate number of samples going into real or imaginary part
# factor of two comes from variables being circularly-symmetric
nsamples = self.channel_width[0] * self.integration_time[0] * 2
# look for small auto data and flag
flagged_ant_inds = self._flag_small_auto_ants(
nsamples, flag_small_auto_ants, ant_1_inds, ant_2_inds, flagged_ant_inds
)
else:
nsamples = None
# get digital gains
if remove_dig_gains:
self.history += " Divided out digital gains."
# get gains for included coarse channels
# During commissioning a shift in the bit selection in the digital
# receiver was implemented which changed the data scaling by
# a factor of 64. To be compatible with the earlier scaling scheme,
# the digital gains are divided by a factor of 64 here.
# For a more detailed explanation, see PR #908.
dig_gains = dig_gains[:, spw_inds] / 64
else:
dig_gains = None
# get pfb response shape
if remove_coarse_band:
self.history += " Divided out pfb coarse channel bandpass."
cb_array = self._get_pfb_shape(avg_factor)
else:
cb_array = None
# apply corrections to each coarse band
for i in range(len(spw_inds)):
self._correct_coarse_band(
i,
ant_1_inds,
ant_2_inds,
cb_array,
dig_gains,
nsamples,
num_fine_chans,
correct_van_vleck,
remove_coarse_band,
remove_dig_gains,
)
return flagged_ant_inds
def read_mwa_corr_fits(
self,
filelist,
use_aoflagger_flags=None,
remove_dig_gains=True,
remove_coarse_band=True,
correct_cable_len=False,
correct_van_vleck=False,
cheby_approx=True,
flag_small_auto_ants=True,
phase_to_pointing_center=False,
propagate_coarse_flags=True,
flag_init=True,
edge_width=80e3,
start_flag="goodtime",
end_flag=0.0,
flag_dc_offset=True,
remove_flagged_ants=True,
background_lsts=True,
read_data=True,
data_array_dtype=np.complex64,
nsample_array_dtype=np.float32,
run_check=True,
check_extra=True,
run_check_acceptability=True,
strict_uvw_antpos_check=False,
):
"""
Read in MWA correlator gpu box files.
The default settings remove some of the instrumental effects in the bandpass
by dividing out the digital gains and the coarse band shape.
If the desired output is raw correlator data, set remove_dig_gains=False,
remove_coarse_band=False, correct_cable_len=False, and
phase_to_pointing_center=False.
Parameters
----------
filelist : list of str
The list of MWA correlator files to read from. Must include at
least one fits file and only one metafits file per data set.
Can also be a list of lists to read multiple data sets.
axis : str
Axis to concatenate files along. This enables fast concatenation
along the specified axis without the normal checking that all other
metadata agrees. This method does not guarantee correct resulting
objects. Please see the docstring for fast_concat for details.
Allowed values are: 'blt', 'freq', 'polarization'. Only used if
multiple files are passed.
use_aoflagger_flags : bool
Option to use aoflagger mwaf flag files. Defaults to true if aoflagger
flag files are submitted.
remove_dig_gains : bool
Option to divide out digital gains.
remove_coarse_band : bool
Option to divide out coarse band shape.
correct_cable_len : bool
Option to apply a cable delay correction.
correct_van_vleck : bool
Option to apply a van vleck correction.
cheby_approx : bool
Only used if correct_van_vleck is True. Option to implement the van
vleck correction with a chebyshev polynomial approximation.
flag_small_auto_ants : bool
Only used if correct_van_vleck is True. Option to completely flag any
antenna for which the autocorrelation falls below a threshold found by
the Van Vleck correction to indicate bad data. Specifically, the
threshold used is 0.5 * integration_time * channel_width. If set to False,
only the times and frequencies at which the auto is below the
threshold will be flagged for the antenna.
phase_to_pointing_center : bool
Option to phase to the observation pointing center.
propagate_coarse_flags : bool
Option to propagate flags for missing coarse channel integrations
across frequency.
flag_init: bool
Set to True in order to do routine flagging of coarse channel edges,
start or end integrations, or the center fine channel of each coarse
channel. See associated keywords.
edge_width: float
Only used if flag_init is True. The width to flag on the edge of
each coarse channel, in hz. Errors if not equal to integer multiple
of channel_width. Set to 0 for no edge flagging.
start_flag: float or str
Only used if flag_init is True. The number of seconds to flag at the
beginning of the observation. Set to 0 for no flagging. Default is
'goodtime', which uses information in the metafits file to determine
the length of time that should be flagged. Errors if input is not a
float or 'goodtime'. Errors if float input is not equal to an
integer multiple of the integration time.
end_flag: floats
Only used if flag_init is True. The number of seconds to flag at the
end of the observation. Set to 0 for no flagging. Errors if not
equal to an integer multiple of the integration time.
flag_dc_offset: bool
Only used if flag_init is True. Set to True to flag the center fine
channel of each coarse channel.
remove_flagged_ants : bool
Option to perform a select to remove antennas flagged in the metafits
file. If correct_van_vleck and flag_small_auto_ants are both True then
antennas flagged by the Van Vleck correction are also removed.
background_lsts : bool
When set to True, the lst_array is calculated in a background thread.
read_data : bool
Read in the visibility, nsample and flag data. If set to False, only
the metadata will be read in. Setting read_data to False results in
a metadata only object.
data_array_dtype : numpy dtype
Datatype to store the output data_array as. Must be either
np.complex64 (single-precision real and imaginary) or np.complex128
(double-precision real and imaginary).
nsample_array_dtype : numpy dtype
Datatype to store the output nsample_array as. Must be either
np.float64 (double-precision), np.float32 (single-precision), or
np.float16 (half-precision). Half-precision is only recommended for
cases where no sampling or averaging of baselines will occur,
because round-off errors can be quite large (~1e-3).
run_check : bool
Option to check for the existence and proper shapes of parameters
after after reading in the file (the default is True,
meaning the check will be run).
check_extra : bool
Option to check optional parameters as well as required ones (the
default is True, meaning the optional parameters will be checked).
run_check_acceptability : bool
Option to check acceptable range of the values of parameters after
reading in the file (the default is True, meaning the acceptable
range check will be done).
strict_uvw_antpos_check : bool
Option to raise an error rather than a warning if the check that
uvws match antenna positions does not pass.
Raises
------
ValueError
If required files are missing or multiple files metafits files are
included in filelist.
If files from different observations are included in filelist.
If files in fileslist have different fine channel widths
If file types other than fits, metafits, and mwaf files are included
in filelist.
"""
metafits_file = None
ppds_file = None
obs_id = None
file_dict = {}
start_time = 0.0
end_time = 0.0
included_file_nums = []
included_flag_nums = []
aoflagger_warning = False
num_fine_chans = 0
# do datatype checks
if data_array_dtype not in (np.complex64, np.complex128):
raise ValueError("data_array_dtype must be np.complex64 or np.complex128")
if nsample_array_dtype not in (np.float64, np.float32, np.float16):
raise ValueError(
"nsample_array_dtype must be one of: np.float64, np.float32, np.float16"
)
# do start_flag check
if not isinstance(start_flag, (int, float)):
if start_flag != "goodtime":
raise ValueError("start_flag must be int or float or 'goodtime'")
# set future array shapes
self._set_future_array_shapes()
# iterate through files and organize
# create a list of included file numbers
# find the first and last times that have data
for filename in filelist:
# update filename attribute
basename = os.path.basename(filename)
self.filename = uvutils._combine_filenames(self.filename, [basename])
self._filename.form = (len(self.filename),)
if filename.lower().endswith(".metafits"):
# force only one metafits file
if metafits_file is not None:
raise ValueError("multiple metafits files in filelist")
metafits_file = filename
elif filename.lower().endswith(".fits"):
# check if ppds file
try:
fits.getheader(filename, extname="ppds")
ppds_file = filename
except Exception:
# check obsid
head0 = fits.getheader(filename, 0)
if obs_id is None:
obs_id = head0["OBSID"]
else:
if head0["OBSID"] != obs_id:
raise ValueError(
"files from different observations submitted "
"in same list"
)
# check headers for first and last times containing data
headstart = fits.getheader(filename, 1)
headfin = fits.getheader(filename, -1)
first_time = headstart["TIME"] + | |
counting of the number of annotated FOVs from dragonfly-automation datasets
# (the 'dad' appendix stands for dragonfly-automation datasets)
dad_pmls = ['PML%04d' % ind for ind in range(196, 999)]
fov_counts_query = fov_counts_query.filter(models.MicroscopyFOV.pml_id == sa.any_(dad_pmls))
fov_counts_dad = pd.DataFrame([row._asdict() for row in fov_counts_query.all()])
fov_counts_dad.rename(
columns={column: '%s_dad' % column for column in fov_counts_dad.columns},
inplace=True
)
if fov_counts_dad.shape[0]:
fov_counts = pd.merge(
fov_counts, fov_counts_dad, left_on='id', right_on='id_dad', how='left'
)
# the list of pulldown_ids with saved cytoscape networks
pulldowns_with_saved_networks = [
row[0] for row in Session.query(models.MassSpecPulldownNetwork.pulldown_id).all()
]
cell_line_payloads = []
for line in lines:
payload = payloads.generate_cell_line_payload(line, included_fields)
# append the FOV counts (for the internal version of the frontend)
fov_count = fov_counts.loc[fov_counts.id == line.id].iloc[0]
if fov_count.shape[0]:
payload['fov_counts'] = json.loads(fov_count.to_json())
# append a flag for the existence of a saved pulldown network
pulldown_id = payload['best_pulldown']['id']
if pulldown_id is not None:
payload['best_pulldown']['has_saved_network'] = (
pulldown_id in pulldowns_with_saved_networks
)
cell_line_payloads.append(payload)
return flask.jsonify(cell_line_payloads)
class CellLineResource(Resource):
def parse_listlike_arg(self, name, allowed_values, sep=','):
'''
Parse and validate a list-like URL parameter
'''
error = None
arg = flask.request.args.get(name)
values = arg.split(sep) if arg else []
if not set(values).issubset(allowed_values):
error = flask.abort(404, 'Invalid value passed to the %s parameter' % name)
return values, error
@staticmethod
def get_cell_line(cell_line_id):
return (
flask.current_app.Session.query(models.CellLine)
.filter(models.CellLine.id == cell_line_id)
.one_or_none()
)
class CellLine(CellLineResource):
'''
The cell line metadata for a single cell line
'''
@cache.cached(key_prefix=cache_key)
def get(self, cell_line_id):
line = self.get_cell_line(cell_line_id)
optional_fields, error = self.parse_listlike_arg('fields', allowed_values=['best-fov'])
if error:
return error
payload = payloads.generate_cell_line_payload(line, optional_fields)
return flask.jsonify(payload)
class InteractorResource(Resource):
@classmethod
def get_protein_groups(cls, ensg_id):
'''
Get all of the significant protein groups associated with the ENSG ID
'''
protein_groups = (
flask.current_app.Session.query(models.MassSpecProteinGroup)
.join(models.ProteinGroupEnsemblAssociation)
.filter(models.ProteinGroupEnsemblAssociation.ensg_id == ensg_id)
.options(sa.orm.joinedload(models.MassSpecProteinGroup.hgnc_metadata))
.all()
)
return protein_groups
@classmethod
def construct_metadata(cls, ensg_id):
'''
Generates the metadata object for an ENSG ID,
following the schema of the cell line metadata (see payloads.generate_cell_line_payload)
'''
payload = {}
hgnc_metadata = (
flask.current_app.Session.query(models.HGNCMetadata)
.options(sa.orm.joinedload(models.HGNCMetadata.uniprotkb_metadata, innerjoin=True))
.options(sa.orm.joinedload(models.HGNCMetadata.abundance_measurements))
.filter(models.HGNCMetadata.ensg_id == ensg_id)
.one_or_none()
)
protein_groups = cls.get_protein_groups(ensg_id)
abundance_payload = payloads.generate_abundance_measurement_payload(
hgnc_metadata.abundance_measurements
)
payload['abundance_data'] = abundance_payload
# TODO: a better way to pick the best uniprot_id from which to construct the metadata
# (that is, from which to take the functional annotation)
uniprotkb_metadata = hgnc_metadata.uniprotkb_metadata[0]
# HACK: this is copied from the cell_line payload
payload['uniprot_metadata'] = {
'uniprot_id': uniprotkb_metadata.primary_uniprot_id,
'protein_name': uniprot_utils.prettify_hgnc_protein_name(hgnc_metadata.name),
'annotation': uniprot_utils.prettify_uniprot_annotation(
uniprotkb_metadata.function_comment
),
}
# generic ensg-level metadata (mimics the cell_line 'metadata' field)
payload['metadata'] = {
'ensg_id': ensg_id,
'target_name': hgnc_metadata.symbol,
'has_interactors': protein_groups is not None and len(protein_groups) > 0,
'is_expressed': (
abundance_payload is not None and abundance_payload['rna_abundance'] > 0
),
}
return payload
class InteractorMetadata(InteractorResource):
'''
The metadata for an 'interactor'
(note that the 'interactor' nomenclature is misleading;
this is any gene in the genome, identified by an ensg_id)
'''
@cache.cached(key_prefix=cache_key)
def get(self, ensg_id):
payload = self.construct_metadata(ensg_id)
return flask.jsonify(payload)
class InteractorNetwork(InteractorResource):
'''
The cytoscape interaction network for an interactor (identified by an ensg_id)
'''
@cache.cached(key_prefix=cache_key)
def get(self, ensg_id):
protein_groups = self.get_protein_groups(ensg_id)
if not protein_groups:
return flask.abort(404, 'There are no protein groups for ENSG ID %s' % ensg_id)
interacting_pulldowns = []
for protein_group in protein_groups:
interacting_pulldowns.extend(protein_group.get_pulldowns())
# TODO: refactor construct_network so we do not have to pass a single primary protein group
primary_protein_group = protein_groups[0]
nodes, edges = cytoscape_payload.construct_network(
interacting_pulldowns=interacting_pulldowns,
origin_protein_group=primary_protein_group,
)
# create compound nodes to represent superclusters and subclusters
nodes, parent_nodes = cytoscape_payload.construct_compound_nodes(
nodes,
clustering_analysis_type=flask.request.args.get('clustering_analysis_type'),
subcluster_type=flask.request.args.get('subcluster_type'),
engine=flask.current_app.Session.get_bind()
)
payload = {
'parent_nodes': [{'data': node} for node in parent_nodes],
'nodes': [{'data': node} for node in nodes],
'edges': [{'data': edge} for edge in edges],
'metadata': self.construct_metadata(ensg_id)
}
return flask.jsonify(payload)
class FACSDataset(CellLineResource):
@cache.cached(key_prefix=cache_key)
def get(self, cell_line_id):
line = self.get_cell_line(cell_line_id)
if not line.facs_dataset:
return flask.abort(404)
payload = payloads.generate_facs_payload(line.facs_dataset)
return flask.jsonify(payload)
class MicroscopyFOVMetadata(CellLineResource):
'''
Metadata for all of the FOVs associated with a cell line
'''
@cache.cached(key_prefix=cache_key)
def get(self, cell_line_id):
only_annotated = flask.request.args.get('annotatedonly') == 'true'
included_fields, error = self.parse_listlike_arg(
name='fields', allowed_values=['rois', 'thumbnails']
)
if error:
return error
line = self.get_cell_line(cell_line_id)
query = (
flask.current_app.Session.query(models.MicroscopyFOV)
.options(
sa.orm.joinedload(models.MicroscopyFOV.dataset, innerjoin=True),
sa.orm.joinedload(models.MicroscopyFOV.results, innerjoin=True),
sa.orm.joinedload(models.MicroscopyFOV.annotation)
)
.filter(models.MicroscopyFOV.cell_line_id == line.id)
)
if only_annotated:
query = query.filter(models.MicroscopyFOV.annotation != None) # noqa
if 'rois' in included_fields:
query = query.options(
sa.orm.joinedload(models.MicroscopyFOV.rois, innerjoin=True)
)
if 'thumbnails' in included_fields:
query = query.options(
sa.orm.joinedload(models.MicroscopyFOV.thumbnails, innerjoin=False)
)
fovs = query.all()
if not fovs:
return flask.abort(404, 'There are no FOVs associated with the cell line')
payload = [
payloads.generate_fov_payload(
fov,
include_rois=('rois' in included_fields),
include_thumbnails=('thumbnails' in included_fields)
)
for fov in fovs
]
# sort by FOV score (unscored FOVs last)
payload = sorted(payload, key=lambda row: row['metadata'].get('score') or -2)[::-1]
return flask.jsonify(payload)
class PulldownResource(CellLineResource):
@staticmethod
def get_pulldown(pulldown_id):
pulldown = (
flask.current_app.Session.query(models.MassSpecPulldown)
.filter(models.MassSpecPulldown.id == pulldown_id)
.one_or_none()
)
if not pulldown:
return flask.abort(404, 'Pulldown %d does not exist' % pulldown_id)
return pulldown
class PulldownHits(PulldownResource):
'''
The metadata and hits for a pulldown
'''
@cache.cached(key_prefix=cache_key)
def get(self, pulldown_id):
Session = flask.current_app.Session
pulldown = self.get_pulldown(pulldown_id)
if not pulldown.hits:
return flask.abort(404, 'Pulldown %s does not have any hits' % pulldown_id)
significant_hits = pulldown.get_significant_hits()
# we need only the pval and enrichment for the non-significant hits
nonsignificant_hits = (
Session.query(models.MassSpecHit.pval, models.MassSpecHit.enrichment)
.filter(models.MassSpecHit.pulldown_id == pulldown.id)
.filter(models.MassSpecHit.is_minor_hit == False) # noqa
.filter(models.MassSpecHit.is_significant_hit == False) # noqa
.all()
)
# construct the JSON payload from the pulldown and hit instances
payload = payloads.generate_pulldown_hits_payload(
pulldown, significant_hits, nonsignificant_hits
)
return flask.jsonify(payload)
class PulldownNetwork(PulldownResource):
'''
The cytoscape interaction network for a pulldown
(see comments in cytoscape_payload.construct_network for details)
'''
@cache.cached(key_prefix=cache_key)
def get(self, pulldown_id):
pulldown = self.get_pulldown(pulldown_id)
# determine the primary protein group to represent the target;
# if the target appears in its own pulldown, this is easy
origin_protein_group = None
bait_hit = pulldown.get_bait_hit(only_one=True)
if bait_hit:
origin_protein_group = bait_hit.protein_group
# if the target does not appear in its own pulldown, we must use the protein group
# for the ENSG ID associated with the target's crispr design
# TODO: refactor so we can keep the full list rather than picking one protein_group
else:
protein_groups = InteractorResource.get_protein_groups(
pulldown.cell_line.crispr_design.ensg_id
)
if protein_groups:
origin_protein_group = protein_groups[0]
# edge case: there is no significant protein group assoc with the target at all
# TODO: figure out how to handle this gracefully;
# either query for *any* PG assoc w the ensg_id, or mock a protein_group instance?
if origin_protein_group is None:
crispr_design = pulldown.cell_line.crispr_design
return flask.abort(
404,
'There is no protein group associated with target %s (%s)'
% (crispr_design.target_name, crispr_design.ensg_id)
)
# create nodes to represent direct hits and/or interacting pulldowns,
# and the edges between them
nodes, edges = cytoscape_payload.construct_network(
target_pulldown=pulldown, origin_protein_group=origin_protein_group
)
# create compound nodes to represent superclusters and subclusters
nodes, parent_nodes = cytoscape_payload.construct_compound_nodes(
nodes,
clustering_analysis_type=flask.request.args.get('clustering_analysis_type'),
subcluster_type=flask.request.args.get('subcluster_type'),
engine=flask.current_app.Session.get_bind()
)
payload = {
'parent_nodes': [{'data': node} for node in parent_nodes],
'nodes': [{'data': node} for node in nodes],
'edges': [{'data': edge} for edge in edges],
'metadata': pulldown.as_dict(),
}
# coerce NaNs and Infs in stoichiometries to None
payload['nodes'] = json.loads(
pd.DataFrame(data=payload['nodes']).to_json(orient='records')
)
return flask.jsonify(payload)
class PulldownClusters(PulldownResource):
'''
The cluster heatmap(s) in which a cell line's pulldown appears
The cluster heatmap represents a
'''
def get(self, pulldown_id):
Session = flask.current_app.Session
pulldown = self.get_pulldown(pulldown_id)
# get the cluster_ids of all clusters in which the pulldown appears
rows = (
Session.query(sa.distinct(models.MassSpecClusterHeatmap.cluster_id))
.join(models.MassSpecClusterHeatmap.hit)
.join(models.MassSpecHit.pulldown)
.filter(models.MassSpecPulldown.id == pulldown.id)
.all()
)
cluster_ids = [row[0] for row in rows]
if not cluster_ids:
return flask.abort(404, 'Pulldown %s does not appear in any clusters' % pulldown_id)
# for now, if there are multiple clusters, pick the first one
cluster_id = cluster_ids[0]
# get the cluster heatmap tiles
# (one row of the ClusterHeatmap table corresponds to one tile)
rows = (
Session.query(
models.MassSpecClusterHeatmap.hit_id,
models.MassSpecClusterHeatmap.row_index,
models.MassSpecClusterHeatmap.col_index,
models.MassSpecHit.pval,
models.MassSpecHit.enrichment,
models.MassSpecHit.interaction_stoich,
models.MassSpecHit.abundance_stoich,
)
.join(models.MassSpecClusterHeatmap.hit)
.filter(models.MassSpecClusterHeatmap.cluster_id == cluster_id)
.all()
)
heatmap_tiles = pd.DataFrame(data=rows)
# pick an arbitrary hit_id from each column and each row of the heatmap
# we will use these hit_ids to retrieve the pulldown and protein group metadata
# for the columns and rows, respectively, since we know/assume that all of the hits
# in each column correspond to the same pulldown, and all of the hits in each row
# correspond to the same protein group
heatmap_row_metadata = heatmap_tiles.groupby('row_index').first().reset_index()
heatmap_column_metadata = | |
import datetime
import traceback
from SmartAPI.common.RESOURCE import RESOURCE
from SmartAPI.common.PROPERTY import PROPERTY
from SmartAPI.common.Tools import Tools
from SmartAPI.rdf.Resource import Resource
from SmartAPI.rdf.Statement import Statement
from SmartAPI.common.SERIALIZATION import SERIALIZATION
from SmartAPI.common.SmartAPICrypto import SmartAPICrypto
from Crypto.PublicKey import RSA
from rdflib.term import Literal, BNode
from rdflib import URIRef, XSD
import uuid
import hashlib
import base64
from isodate.duration import Duration
import isodate
class Obj(object):
referenceUriPrefix = "http://smarteg.org/reference/1.0/seas#"
def __init__(self, seasIdentifierUri = None):
self.seasIdentifierUri = seasIdentifierUri
self.sameAs = None
self.name = None
self.generatedBy = None
self.generatedAt = None
self.description = None
self.types = []
self.provenances = []
self.targets = []
# map for properties (name, propertyobject pairs)
self.properties = {}
self.offerings = []
# Reference object related properties
# the following are all Strings
self.signature = None
self.sessionKey = None
self.hashCode = None
# the following are all type Obj
self.encryptionKeyType = None
self.notary = None
self.serializeAsReference = False
self.encrypted = False
self.signed = False
# keys
self.encryptKey = None # it is also public key
self.privateKey = None # i.e., signKey in Obj.java
# if the Obj is a reference, then encryptedStringRepresentation will have a String value after parse()
self.encryptedStringRepresentation = None
# only needed for NOTARIZEDSESSIONKEY type
self.senderId = ""
def _convert(self, obj, model):
if isinstance(obj, Obj):
return obj.serialize(model)
elif isinstance(obj, URIRef):
return obj
elif isinstance(obj, Duration) or isinstance(obj, datetime.timedelta):
return model.createTypedLiteral(isodate.duration_isoformat(obj), XSD.duration)
else:
return Literal(obj)
def serialize(self, model):
'''
turn current Object into a rdf Resource
'''
if self.serializeAsReference:
return self.serializeToReference(model)
if Tools.serializedObjs.has_key(self):
return Tools.serializedObjs.get(self)
# create resource
if self.hasIdentifierUri():
resource = model.createResource( self.seasIdentifierUri )
else:
resource = model.createResource()
Tools.serializedObjs[self] = resource
# sameas
if self.hasSameAs():
#sameAsRes = model.createResource(self.sameAs)
owlSameAs = model.createProperty( PROPERTY.SAMEAS )
resource.addProperty( owlSameAs, self.sameAs.serialize(model) )
# generatedby
if self.hasGeneratedBy():
resource.addProperty( model.createProperty( PROPERTY.GENERATEDBY ), self.generatedBy.serialize(model) )
# generatedat
if self.hasGeneratedAt():
resource.addProperty(model.createProperty( PROPERTY.GENERATEDAT ), model.createLiteral(self.getGeneratedAt()))
# types
typeProp = model.createProperty( PROPERTY.RDF_TYPE )
for type in self.types:
try:
serialized = type.serialize(model)
except:
serialized = URIRef(type)
resource.addProperty( typeProp, serialized )
# targets
for target in self.targets:
resource.addProperty(model.createProperty( PROPERTY.TARGET ), model.createResource(target))
# provenances
provenanceProp = model.createProperty( PROPERTY.PROVENANCE )
for p in self.provenances:
resource.addProperty( provenanceProp, p.serialize(model) )
# set offerings
for offering in self.offerings:
resource.addProperty(model.createProperty( PROPERTY.OFFERS ), offering.serialize(model))
# name
if self.hasName():
rdfsLabel = model.createProperty( PROPERTY.RDFS_LABEL )
resource.addProperty( rdfsLabel, model.createTypedLiteral(self.name, XSD.string) )
# comment
if self.hasDescription():
rdfsComment = model.createProperty( PROPERTY.COMMENT )
resource.addProperty( rdfsComment, model.createTypedLiteral(self.description, XSD.string) )
# Usually the following five properties should have None value. But they can have value if this
# Obj is recovered from a reference.
#sessionKey
if self.hasSessionKey():
resource.addProperty(model.createProperty( PROPERTY.SESSIONKEY), model.createTypedLiteral(self.sessionKey, XSD.string) )
#signature
if self.hasSignature():
resource.addProperty(model.createProperty( PROPERTY.SIGNATURE), model.createTypedLiteral(self.signature, XSD.string) )
# hashcode
if self.hasHashCode():
resource.addProperty(model.createProperty( PROPERTY.HASHCODE), model.createTypedLiteral(self.hashCode, XSD.string))
# encryptionKeyType
if self.hasEncryptionKeyType():
resource.addProperty(model.createProperty(PROPERTY.ENCRYPTIONKEYTYPE), model.createResource(self.getEncryptionKeyType()))
# notary
if self.hasNotary():
resource.addProperty(model.createProperty(PROPERTY.NOTARY), model.createResource(self.getNotary()))
# add object properties
for key, entry in self.properties.iteritems():
if isinstance(entry, list):
for e in entry:
self._add_property(resource, key, e, model)
else:
self._add_property(resource, key, entry, model)
return resource;
def serializeToReference(self, model):
'''
serialize to an encrypted version of this Object. Its actual content is saved in Tools.messageParts
@return: a rdf Resource
'''
from SmartAPI.agents.TransactionAgent import TransactionAgent
# generate a SeasIdentifierUri for this object if not yet exist. Anonymous is not allowed here.
if (self.hasIdentifierUri() == False):
self.setIdentifierUri(self.referenceUriPrefix + str(uuid.uuid4()))
# convert current Object to string for later encryption
self.serializeAsReference = False
partString = Tools().toString(self, SERIALIZATION.TURTLE)
# TODO: formalize this to a proper encoding method
partString = partString.replace("\n", "\r\n")
partString = partString.replace("\r\n\r\n", "\r\n")
# using the string to generate encrypted string
crypto = SmartAPICrypto()
if self.encrypted:
# encrypting the normally serialized Object
# symmetric with notary
if self.getEncryptionKeyType()== RESOURCE.NOTARIZEDSESSIONKEY:
sessionkey = crypto.generateSymmetricKey()
partString = crypto.symmetricEncrypt(sessionkey, partString)
# encrypt session key with recipient's public key
encryptedKey = crypto.encryptAndEncodeKey(self.encryptKey, sessionkey)
# calculate hashcode
hash = crypto.createEncodedMessageDigest(partString)
self.setHashCode(hash)
if self.signed:
signature = crypto.sign(self.privateKey, partString)
self.setSignature(signature)
# send encryptedKey to notary for storing
if not TransactionAgent.sendKeyToNotary(self.senderId, self.getIdentifierUri(),
hash, encryptedKey, self.getSignature(), self.getNotary()) :
raise Exception("Sending notarized session key to notary failed.")
# with public key
if self.getEncryptionKeyType() == RESOURCE.PUBLICKEY:
# encrypt data with encrypted session key attached
partString = crypto.asymmetricEncrypt(self.encryptKey, partString)[0]
# with only session key. *insecure*
if self.getEncryptionKeyType() == RESOURCE.SESSIONKEY:
# encrypt data
partString = crypto.symmetricEncrypt(self.encryptKey, partString)
# save key to reference
sessionkeyAsString = base64.b64encode(self.encryptKey)
self.setSessionKey(sessionkeyAsString)
if not self.hasHashCode():
hash = crypto.createEncodedMessageDigest(partString)
self.setHashCode(hash)
if self.signed and (not self.hasSignature()):
signature = crypto.sign(self.privateKey, partString)
self.setSignature(signature)
# save encrypted or unencrypted partString and its uri to multipart as body part
Tools.saveForMessageParts(self.getIdentifierUri(), partString)
# add all these to the new Resource
# Note: MULTIPARTREFERENCE type is not included in encrypted string representation.
self.addType(RESOURCE.MULTIPARTREFERENCE)
resource = model.createResource(self.getIdentifierUri())
# type
typeProp = model.createProperty( PROPERTY.RDF_TYPE )
for type in self.types:
try:
serialized = type.serialize(model)
except:
serialized = URIRef(type)
resource.addProperty( typeProp, serialized )
#sessionKey
if self.hasSessionKey():
resource.addProperty(model.createProperty( PROPERTY.SESSIONKEY), model.createTypedLiteral(self.sessionKey, XSD.string) )
#signature
if self.hasSignature():
resource.addProperty(model.createProperty( PROPERTY.SIGNATURE), model.createTypedLiteral(self.signature, XSD.string) )
# hashcode
if self.hasHashCode():
resource.addProperty(model.createProperty( PROPERTY.HASHCODE), model.createTypedLiteral(self.hashCode, XSD.string))
# encryptionKeyType
if self.hasEncryptionKeyType():
resource.addProperty(model.createProperty(PROPERTY.ENCRYPTIONKEYTYPE), model.createResource(self.getEncryptionKeyType()))
# notary
if self.hasNotary():
resource.addProperty(model.createProperty(PROPERTY.NOTARY), model.createResource(self.getNotary()))
return resource
def getStringRepresentation(self):
return self.encryptedStringRepresentation
def _add_property(self, resource, property, entry, model):
from SmartAPI.model.Parameter import Parameter
if not isinstance(property, URIRef):
obj = Parameter(key = property, value = entry)
property = PROPERTY.PARAMETER
else:
obj = entry
resource.addProperty( model.createProperty( property ), self._convert(obj, model))
@classmethod
def parse(cls, element):
'''
factory method and class method. It takes in Resource as parameter, create a Seas Obj or
its subClass object.
@return: this newly created object, which could be either a real Obj or its subclass Object
or a Reference, whose actual content is in multipart message's body part
'''
if isinstance(element, Resource) and Tools.parsedObjs.has_key(element.toString()):
return Tools.parsedObjs.get(element.toString())
elif isinstance(element, Resource) and not Tools.parsedObjs.has_key(element.toString()):
if not element.isAnon():
obj = cls(element.toString())
else:
obj = cls()
Tools.parsedObjs[element.toString()] = obj
for i in element.findProperties():
obj.parseStatement(i)
# check whether this Obj is a reference
if obj.isOfType(RESOURCE.MULTIPARTREFERENCE):
if obj.hasIdentifierUri():
if (len(Tools.messagePartsForParse)>0) and (obj.getIdentifierUri() in Tools.messagePartsForParse):
stringRepresentation = Tools.messagePartsForParse[obj.getIdentifierUri()]
# if this Obj is EncryptedReference, it will be decrypted later by decrypt() call.
if obj.isOfType(RESOURCE.ENCRYPTEDREFERENCE):
obj.encryptedStringRepresentation = stringRepresentation
else: # if not encrypted, replace this reference Obj with a new Object,
# which is deserialzed from the stringRepresentation, containing actual content.
# This happens in the case of digital signature
model = Tools().fromString(stringRepresentation, SERIALIZATION.TURTLE)
# find the root Resource
rootRes = Tools().getTopNode(model)[0]
# figure out its SMARTAPI Class
seasCls = Tools().getResourceClass(rootRes)
# recover it to Obj or its subclass Object
recoveredObj = seasCls.parse(rootRes)
# copy signature, hashcode from reference Obj to recovered Obj
recoveredObj.setSignature(obj.getSignature())
recoveredObj.setHashCode(obj.getHashCode())
recoveredObj.types.remove(URIRef(RESOURCE.REFERENCE))
obj = recoveredObj
del Tools.messagePartsForParse[obj.getIdentifierUri()]
else:
raise Exception('******* ERROR: can not find the encrypted string representation of {}'.format(obj.getIdentifierUri()))
#try:
# raise Exception('just to check!!!')
#except:
# traceback.print_stack()
else:
raise Exception('***** ERROR: reference Obj has to be named!!!*******')
return obj
return None
def parseStatement(self, statement):
'''
It takes in statement as input, add property to existing model Class object.
Return None
'''
from SmartAPI.model.Parameter import Parameter
from SmartAPI.model.Provenance import Provenance
from SmartAPI.model.Activity import Activity
from SmartAPI.model.Offering import Offering
# get predicate and object
predicate = str(statement.getPredicate())
objectNode = statement.getObject()
# type
if predicate == PROPERTY.RDF_TYPE:
self.addType(URIRef(statement.getResource().toString()))
return
# sameas
if predicate == PROPERTY.SAMEAS:
self.setSameAs(statement.getResource().toString())
return
# generatedby
if predicate == PROPERTY.GENERATEDBY:
self.setGeneratedBy(Activity.parse(statement.getResource()))
return
# generatedat
if predicate == PROPERTY.GENERATEDAT:
self.setGeneratedAt(statement.getObject().toPython())
return
# provenance
if predicate == PROPERTY.PROVENANCE:
self.addProvenance(Provenance.parse(statement.getResource()))
return
# offerings
if predicate == PROPERTY.OFFERS:
self.addOffering(Offering.parse(statement.getResource()))
return
# target
if predicate == PROPERTY.TARGET:
self.addTarget(statement.getString())
return
# label
if predicate == PROPERTY.RDFS_LABEL:
self.setName(statement.getString())
return
# comment
if predicate == PROPERTY.COMMENT:
self.setDescription(statement.getString())
return
# sessionKey
if predicate == | |
<filename>tests/test_kubernetes_tools.py
import unittest
from typing import Sequence
import mock
import pytest
from kubernetes.client import V1AWSElasticBlockStoreVolumeSource
from kubernetes.client import V1Container
from kubernetes.client import V1ContainerPort
from kubernetes.client import V1Deployment
from kubernetes.client import V1DeploymentSpec
from kubernetes.client import V1DeploymentStrategy
from kubernetes.client import V1EnvVar
from kubernetes.client import V1ExecAction
from kubernetes.client import V1Handler
from kubernetes.client import V1HostPathVolumeSource
from kubernetes.client import V1HTTPGetAction
from kubernetes.client import V1LabelSelector
from kubernetes.client import V1Lifecycle
from kubernetes.client import V1ObjectMeta
from kubernetes.client import V1PersistentVolumeClaim
from kubernetes.client import V1PersistentVolumeClaimSpec
from kubernetes.client import V1PodSpec
from kubernetes.client import V1PodTemplateSpec
from kubernetes.client import V1Probe
from kubernetes.client import V1ResourceRequirements
from kubernetes.client import V1RollingUpdateDeployment
from kubernetes.client import V1StatefulSet
from kubernetes.client import V1StatefulSetSpec
from kubernetes.client import V1TCPSocketAction
from kubernetes.client import V1Volume
from kubernetes.client import V1VolumeMount
from kubernetes.client.rest import ApiException
from paasta_tools.kubernetes_tools import create_deployment
from paasta_tools.kubernetes_tools import create_stateful_set
from paasta_tools.kubernetes_tools import ensure_paasta_namespace
from paasta_tools.kubernetes_tools import get_active_shas_for_service
from paasta_tools.kubernetes_tools import get_kubernetes_app_by_name
from paasta_tools.kubernetes_tools import get_kubernetes_app_deploy_status
from paasta_tools.kubernetes_tools import get_kubernetes_services_running_here
from paasta_tools.kubernetes_tools import get_kubernetes_services_running_here_for_nerve
from paasta_tools.kubernetes_tools import InvalidKubernetesConfig
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import KubeDeployment
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfigDict
from paasta_tools.kubernetes_tools import KubernetesDeployStatus
from paasta_tools.kubernetes_tools import KubeService
from paasta_tools.kubernetes_tools import list_all_deployments
from paasta_tools.kubernetes_tools import load_kubernetes_service_config
from paasta_tools.kubernetes_tools import load_kubernetes_service_config_no_cache
from paasta_tools.kubernetes_tools import pods_for_service_instance
from paasta_tools.kubernetes_tools import read_all_registrations_for_service_instance
from paasta_tools.kubernetes_tools import update_deployment
from paasta_tools.kubernetes_tools import update_stateful_set
from paasta_tools.utils import AwsEbsVolume
from paasta_tools.utils import DockerVolume
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import PaastaNotConfiguredError
def test_load_kubernetes_service_config_no_cache():
with mock.patch(
'service_configuration_lib.read_service_configuration', autospec=True,
) as mock_read_service_configuration, mock.patch(
'service_configuration_lib.read_extra_service_information', autospec=True,
) as mock_read_extra_service_information, mock.patch(
'paasta_tools.kubernetes_tools.load_v2_deployments_json', autospec=True,
) as mock_load_v2_deployments_json, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig', autospec=True,
) as mock_kube_deploy_config:
with pytest.raises(NoConfigurationForServiceError):
load_kubernetes_service_config_no_cache(
service='kurupt',
instance='fm',
cluster='brentford',
load_deployments=False,
)
with pytest.raises(InvalidJobNameError):
load_kubernetes_service_config_no_cache(
service='kurupt',
instance='_fm',
cluster='brentford',
load_deployments=False,
)
mock_config = {'freq': '108.9'}
mock_read_extra_service_information.return_value = {'fm': mock_config}
mock_read_service_configuration.return_value = {}
ret = load_kubernetes_service_config_no_cache(
service='kurupt',
instance='fm',
cluster='brentford',
load_deployments=False,
soa_dir='/nail/blah',
)
mock_kube_deploy_config.assert_called_with(
service='kurupt',
instance='fm',
cluster='brentford',
config_dict={'freq': '108.9'},
branch_dict=None,
soa_dir='/nail/blah',
)
assert not mock_load_v2_deployments_json.called
assert ret == mock_kube_deploy_config.return_value
mock_kube_deploy_config.reset_mock()
ret = load_kubernetes_service_config_no_cache(
service='kurupt',
instance='fm',
cluster='brentford',
load_deployments=True,
soa_dir='/nail/blah',
)
mock_load_v2_deployments_json.assert_called_with(
service='kurupt',
soa_dir='/nail/blah',
)
mock_kube_deploy_config.assert_called_with(
service='kurupt',
instance='fm',
cluster='brentford',
config_dict={'freq': '108.9'},
branch_dict=mock_load_v2_deployments_json.return_value.get_branch_dict(),
soa_dir='/nail/blah',
)
assert ret == mock_kube_deploy_config.return_value
def test_load_kubernetes_service_config():
with mock.patch(
'paasta_tools.kubernetes_tools.load_kubernetes_service_config_no_cache', autospec=True,
) as mock_load_kubernetes_service_config_no_cache:
ret = load_kubernetes_service_config(
service='kurupt',
instance='fm',
cluster='brentford',
load_deployments=True,
soa_dir='/nail/blah',
)
assert ret == mock_load_kubernetes_service_config_no_cache.return_value
class TestKubernetesDeploymentConfig(unittest.TestCase):
def setUp(self):
mock_config_dict = KubernetesDeploymentConfigDict(
bounce_method='crossover',
instances=3,
)
self.deployment = KubernetesDeploymentConfig(
service='kurupt',
instance='fm',
cluster='brentford',
config_dict=mock_config_dict,
branch_dict=None,
soa_dir='/nail/blah',
)
def test_copy(self):
assert self.deployment.copy() == self.deployment
assert self.deployment.copy() is not self.deployment
def test_get_bounce_method(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_aws_ebs_volumes', autospec=True,
) as mock_get_aws_ebs_volumes:
mock_get_aws_ebs_volumes.return_value = []
assert self.deployment.get_bounce_method() == 'RollingUpdate'
self.deployment.config_dict['bounce_method'] = 'downthenup'
assert self.deployment.get_bounce_method() == 'Recreate'
self.deployment.config_dict['bounce_method'] = 'crossover'
# if ebs we must downthenup for now as we need to free up the EBS for the new instance
mock_get_aws_ebs_volumes.return_value = ['some-ebs']
with pytest.raises(Exception):
self.deployment.get_bounce_method()
def test_get_deployment_strategy(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_bounce_method', autospec=True,
return_value='RollingUpdate',
) as mock_get_bounce_method:
assert self.deployment.get_deployment_strategy_config() == V1DeploymentStrategy(
type='RollingUpdate',
rolling_update=V1RollingUpdateDeployment(
max_surge='100%',
max_unavailable='0%',
),
)
mock_get_bounce_method.return_value = 'Recreate'
assert self.deployment.get_deployment_strategy_config() == V1DeploymentStrategy(
type='Recreate',
)
def test_get_sanitised_volume_name(self):
self.deployment.get_sanitised_volume_name('/var/tmp') == 'slash-varslash-tmp'
self.deployment.get_sanitised_volume_name('/var/tmp/') == 'slash-varslash-tmp'
def test_get_sidecar_containers(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_registrations', autospec=True,
return_value=['universal.credit'],
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_environment', autospec=True,
return_value={},
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True,
return_value='sane-name',
):
mock_system_config = mock.Mock(
get_enable_nerve_readiness_check=mock.Mock(return_value=False),
get_nerve_readiness_check_script=mock.Mock(return_value='/nail/blah.sh'),
get_hacheck_sidecar_image_url=mock.Mock(return_value='some-docker-image'),
)
ret = self.deployment.get_sidecar_containers(mock_system_config)
expected = [
V1Container(
env={},
image='some-docker-image',
lifecycle=V1Lifecycle(
pre_stop=V1Handler(
_exec=V1ExecAction(
command=[
'/bin/sh',
'-c',
'/usr/bin/hadown '
'universal.credit; sleep '
'31',
],
),
),
),
name='hacheck',
ports=[V1ContainerPort(container_port=6666)],
),
]
assert ret == expected
mock_system_config = mock.Mock(
get_enable_nerve_readiness_check=mock.Mock(return_value=True),
get_nerve_readiness_check_script=mock.Mock(return_value='/nail/blah.sh'),
get_hacheck_sidecar_image_url=mock.Mock(return_value='some-docker-image'),
)
ret = self.deployment.get_sidecar_containers(mock_system_config)
expected = [
V1Container(
env={},
image='some-docker-image',
lifecycle=V1Lifecycle(
pre_stop=V1Handler(
_exec=V1ExecAction(
command=[
'/bin/sh',
'-c',
'/usr/bin/hadown '
'universal.credit; sleep '
'31',
],
),
),
),
name='hacheck',
ports=[V1ContainerPort(container_port=6666)],
readiness_probe=V1Probe(
_exec=V1ExecAction(
command=['/nail/blah.sh', 'universal.credit'],
),
initial_delay_seconds=10,
period_seconds=10,
),
),
]
assert ret == expected
def test_get_container_env(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_env', autospec=True,
return_value={
'mc': 'grindah',
'dj': 'beats',
},
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_environment', autospec=True,
return_value=[
V1EnvVar(
name='manager',
value='chabuddy',
),
],
):
expected = [
V1EnvVar(name='mc', value='grindah'),
V1EnvVar(name='dj', value='beats'),
V1EnvVar(name='manager', value='chabuddy'),
]
assert expected == self.deployment.get_container_env()
def test_get_kubernetes_environment(self):
ret = self.deployment.get_kubernetes_environment()
assert 'PAASTA_POD_IP' in [env.name for env in ret]
def test_get_resource_requirements(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_cpus', autospec=True,
return_value=0.3,
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_cpu_burst_add', autospec=True,
return_value=1,
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_mem', autospec=True,
return_value=2048,
):
assert self.deployment.get_resource_requirements() == V1ResourceRequirements(
limits={
'cpu': 1.3,
'memory': '2048Mi',
},
requests={
'cpu': 0.3,
'memory': '2048Mi',
},
)
def test_get_kubernetes_containers(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_docker_url', autospec=True,
) as mock_get_docker_url, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_cmd', autospec=True,
) as mock_get_cmd, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_args', autospec=True,
) as mock_get_args, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_resource_requirements', autospec=True,
) as mock_get_resource_requirements, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_container_env', autospec=True,
) as mock_get_container_env, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_service_name', autospec=True,
return_value='kurupt',
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_instance_name', autospec=True,
return_value='fm',
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_volume_mounts', autospec=True,
) as mock_get_volume_mounts, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sidecar_containers', autospec=True,
return_value=['mock_sidecar'],
):
mock_system_config = mock.Mock()
mock_docker_volumes: Sequence[DockerVolume] = []
mock_aws_ebs_volumes: Sequence[AwsEbsVolume] = []
expected = [
V1Container(
args=mock_get_args.return_value,
command=mock_get_cmd.return_value,
env=mock_get_container_env.return_value,
resources=mock_get_resource_requirements.return_value,
image=mock_get_docker_url.return_value,
lifecycle=V1Lifecycle(
pre_stop=V1Handler(
_exec=V1ExecAction(
command=[
'/bin/sh',
'-c',
'sleep 30',
],
),
),
),
liveness_probe=V1Probe(
failure_threshold=30,
http_get=V1HTTPGetAction(
path='/status',
port=8888,
scheme='HTTP',
),
initial_delay_seconds=60,
period_seconds=10,
timeout_seconds=10,
),
name='kurupt-fm',
ports=[V1ContainerPort(container_port=8888)],
volume_mounts=mock_get_volume_mounts.return_value,
), 'mock_sidecar',
]
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'http'
service_namespace_config.get_healthcheck_uri.return_value = '/status'
assert self.deployment.get_kubernetes_containers(
docker_volumes=mock_docker_volumes,
system_paasta_config=mock_system_config,
aws_ebs_volumes=mock_aws_ebs_volumes,
service_namespace_config=service_namespace_config,
) == expected
def test_get_liveness_probe(self):
liveness_probe = V1Probe(
failure_threshold=30,
http_get=V1HTTPGetAction(
path='/status',
port=8888,
scheme='HTTP',
),
initial_delay_seconds=60,
period_seconds=10,
timeout_seconds=10,
)
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'http'
service_namespace_config.get_healthcheck_uri.return_value = '/status'
assert self.deployment.get_liveness_probe(service_namespace_config) == liveness_probe
def test_get_liveness_probe_non_smartstack(self):
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = None
assert self.deployment.get_liveness_probe(service_namespace_config) is None
def test_get_liveness_probe_numbers(self):
liveness_probe = V1Probe(
failure_threshold=1,
http_get=V1HTTPGetAction(
path='/status',
port=8888,
scheme='HTTP',
),
initial_delay_seconds=2,
period_seconds=3,
timeout_seconds=4,
)
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'http'
service_namespace_config.get_healthcheck_uri.return_value = '/status'
self.deployment.config_dict['healthcheck_max_consecutive_failures'] = 1
self.deployment.config_dict['healthcheck_grace_period_seconds'] = 2
self.deployment.config_dict['healthcheck_interval_seconds'] = 3
self.deployment.config_dict['healthcheck_timeout_seconds'] = 4
assert self.deployment.get_liveness_probe(service_namespace_config) == liveness_probe
def test_get_liveness_probe_tcp_socket(self):
liveness_probe = V1Probe(
failure_threshold=30,
tcp_socket=V1TCPSocketAction(
port=8888,
),
initial_delay_seconds=60,
period_seconds=10,
timeout_seconds=10,
)
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'tcp'
assert self.deployment.get_liveness_probe(service_namespace_config) == liveness_probe
def test_get_liveness_probe_cmd(self):
liveness_probe = V1Probe(
failure_threshold=30,
_exec=V1ExecAction(
command='/bin/true',
),
initial_delay_seconds=60,
period_seconds=10,
timeout_seconds=10,
)
service_namespace_config = mock.Mock()
service_namespace_config.get_mode.return_value = 'cmd'
self.deployment.config_dict['healthcheck_cmd'] = '/bin/true'
assert self.deployment.get_liveness_probe(service_namespace_config) == liveness_probe
def test_get_pod_volumes(self):
mock_docker_volumes = [
{'hostPath': '/nail/blah', 'containerPath': '/nail/foo'},
{'hostPath': '/nail/thing', 'containerPath': '/nail/bar'},
]
mock_aws_ebs_volumes = [
{'volume_id': 'vol-ZZZZZZZZZZZZZZZZZ', 'fs_type': 'ext4', 'container_path': '/nail/qux'},
]
expected_volumes = [
V1Volume(
host_path=V1HostPathVolumeSource(
path='/nail/blah',
),
name='host--slash-nailslash-blah',
),
V1Volume(
host_path=V1HostPathVolumeSource(
path='/nail/thing',
),
name='host--slash-nailslash-thing',
),
V1Volume(
aws_elastic_block_store=V1AWSElasticBlockStoreVolumeSource(
volume_id='vol-ZZZZZZZZZZZZZZZZZ',
fs_type='ext4',
read_only=False,
),
name='aws-ebs--vol-ZZZZZZZZZZZZZZZZZ',
),
]
assert self.deployment.get_pod_volumes(
docker_volumes=mock_docker_volumes,
aws_ebs_volumes=mock_aws_ebs_volumes,
) == expected_volumes
def test_get_volume_mounts(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True,
return_value='some-volume',
):
mock_docker_volumes = [
{'hostPath': '/nail/blah', 'containerPath': '/nail/foo'},
{'hostPath': '/nail/thing', 'containerPath': '/nail/bar', 'mode': 'RW'},
]
mock_aws_ebs_volumes = [
{'volume_id': 'vol-ZZZZZZZZZZZZZZZZZ', 'fs_type': 'ext4', 'container_path': '/nail/qux'},
]
mock_persistent_volumes = [
{'container_path': '/blah', 'mode': 'RW'},
]
expected_volumes = [
V1VolumeMount(
mount_path='/nail/foo',
name='some-volume',
read_only=True,
),
V1VolumeMount(
mount_path='/nail/bar',
name='some-volume',
read_only=False,
),
V1VolumeMount(
mount_path='/nail/qux',
name='some-volume',
read_only=True,
),
V1VolumeMount(
mount_path='/blah',
name='some-volume',
read_only=False,
),
]
assert self.deployment.get_volume_mounts(
docker_volumes=mock_docker_volumes,
aws_ebs_volumes=mock_aws_ebs_volumes,
persistent_volumes=mock_persistent_volumes,
) == expected_volumes
def test_get_sanitised_service_name(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_service', autospec=True,
return_value='my_service',
):
assert self.deployment.get_sanitised_service_name() == 'my--service'
def test_get_sanitised_instance_name(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instance', autospec=True,
return_value='my_instance',
):
assert self.deployment.get_sanitised_instance_name() == 'my--instance'
def test_get_desired_instances(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_aws_ebs_volumes', autospec=True,
) as mock_get_aws_ebs_volumes:
mock_get_aws_ebs_volumes.return_value = []
assert self.deployment.get_desired_instances() == 3
mock_get_aws_ebs_volumes.return_value = ['some-ebs-vol']
with pytest.raises(Exception):
self.deployment.get_desired_instances()
def test_format_kubernetes_app_dict(self):
with mock.patch(
'paasta_tools.kubernetes_tools.load_system_paasta_config', autospec=True,
) as mock_load_system_config, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_docker_url', autospec=True,
) as mock_get_docker_url, mock.patch(
'paasta_tools.kubernetes_tools.get_code_sha_from_dockerurl', autospec=True,
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_service_name', autospec=True,
return_value='kurupt',
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_instance_name', autospec=True,
return_value='fm',
), mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_service', autospec=True,
) as mock_get_service, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instance', autospec=True,
) as mock_get_instance, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_desired_instances', autospec=True,
) as mock_get_instances, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_deployment_strategy_config', autospec=True,
) as mock_get_deployment_strategy_config, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True,
), mock.patch(
'paasta_tools.kubernetes_tools.get_config_hash', autospec=True,
) as mock_get_config_hash, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_force_bounce', autospec=True,
) as mock_get_force_bounce, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.sanitize_for_config_hash', autospec=True,
) as mock_sanitize_for_config_hash, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_persistent_volumes', autospec=True,
) as mock_get_persistent_volumes, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_volume_claim_templates', autospec=True,
) as mock_get_volumes_claim_templates, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_pod_template_spec', autospec=True,
) as mock_get_pod_template_spec, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_metadata', autospec=True,
) as mock_get_kubernetes_metadata:
mock_get_persistent_volumes.return_value = []
ret = self.deployment.format_kubernetes_app()
assert mock_load_system_config.called
assert mock_get_docker_url.called
mock_get_config_hash.assert_called_with(
mock_sanitize_for_config_hash.return_value,
force_bounce=mock_get_force_bounce.return_value,
)
expected = V1Deployment(
api_version='apps/v1',
kind='Deployment',
metadata=mock_get_kubernetes_metadata.return_value,
spec=V1DeploymentSpec(
replicas=mock_get_instances.return_value,
selector=V1LabelSelector(
match_labels={
'instance': mock_get_instance.return_value,
'service': mock_get_service.return_value,
},
),
strategy=mock_get_deployment_strategy_config.return_value,
template=mock_get_pod_template_spec.return_value,
),
)
assert ret == expected
ret.metadata.labels.__setitem__.assert_called_with('config_sha', mock_get_config_hash.return_value)
ret.spec.template.metadata.labels.__setitem__.assert_called_with(
'config_sha',
mock_get_config_hash.return_value,
)
mock_get_deployment_strategy_config.side_effect = Exception("Bad bounce method")
with pytest.raises(InvalidKubernetesConfig):
self.deployment.format_kubernetes_app()
mock_get_persistent_volumes.return_value = [mock.Mock()]
ret = self.deployment.format_kubernetes_app()
expected = V1StatefulSet(
api_version='apps/v1',
kind='StatefulSet',
metadata=mock_get_kubernetes_metadata.return_value,
spec=V1StatefulSetSpec(
service_name='kurupt-fm',
replicas=mock_get_instances.return_value,
selector=V1LabelSelector(
match_labels={
'instance': mock_get_instance.return_value,
'service': mock_get_service.return_value,
},
),
template=mock_get_pod_template_spec.return_value,
volume_claim_templates=mock_get_volumes_claim_templates.return_value,
),
)
assert ret == expected
ret.metadata.labels.__setitem__.assert_called_with('config_sha', mock_get_config_hash.return_value)
ret.spec.template.metadata.labels.__setitem__.assert_called_with(
'config_sha',
mock_get_config_hash.return_value,
)
def test_get_pod_template_spec(self):
with mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_volumes', autospec=True,
) as mock_get_volumes, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_service', autospec=True,
) as mock_get_service, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instance', autospec=True,
) as mock_get_instance, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_containers', autospec=True,
) as mock_get_kubernetes_containers, mock.patch(
'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_pod_volumes', autospec=True,
return_value=[],
) as mock_get_pod_volumes:
ret = self.deployment.get_pod_template_spec(code_sha='aaaa123', system_paasta_config=mock.Mock())
assert mock_get_pod_volumes.called
assert mock_get_volumes.called
assert ret == V1PodTemplateSpec(
metadata=V1ObjectMeta(
labels={
'git_sha': 'aaaa123',
'instance': mock_get_instance.return_value,
'service': mock_get_service.return_value,
},
),
spec=V1PodSpec(
containers=mock_get_kubernetes_containers.return_value,
restart_policy='Always',
volumes=[],
),
)
def test_get_kubernetes_metadata(self):
| |
command=self._restraint_by_mvz)
self.mvz_box.config(width=40)
# self.mvz_sap_label = tk.Label(row1_cf, text='SAPmvz', padx=10)
# self.mvz_sap = tk.Label(row1_cf, padx=10, bg='lightgray', width=11)
self.menubutton = tk.Menubutton(row1_cf, text="Выбрать адреса договора",
indicatoron=True, borderwidth=1,
relief="raised")
self.menu_choice_mvz = tk.Menu(self.menubutton, tearoff=False)
self.menubutton.configure(menu=self.menu_choice_mvz)
self.choices = {}
for choice in self.mvz.keys():
self.choices[choice] = tk.IntVar(value=0)
self.menu_choice_mvz.add_checkbutton(label=choice,
variable=self.choices[choice],
onvalue=1, offvalue=0,
command=self._mvz_choice_list)
self.square = StringSumVar()
self.square.set('0,00')
self.square_label = tk.Label(row1_cf, text='Площадь, м²')
vcmd = (self.register(self._validate_sum))
self.square_entry = tk.Entry(row1_cf, name='square_entry', width=18,
textvariable=self.square, validate='all',
validatecommand=(vcmd, '%P')
)
self.square_entry.bind("<FocusIn>", self._on_focus_in_format_sum)
self.square_entry.bind("<FocusOut>", self._on_focus_out_format_sum)
self._row1_pack()
# Second Fill Frame
row2_cf = tk.Frame(self, name='row2_cf', padx=15)
self.type_business_label = tk.Label(row2_cf, text='Тип бизнеса', padx=7)
self.type_business_box = ttk.Combobox(row2_cf, width=20,
state='readonly')
self.type_business_box['values'] = self.type_business
self.type_business_box.configure(state="normal")
self.date_main_label_start = tk.Label(row2_cf, text='Договор с:',
padx=12)
self.date_main_start = tk.StringVar()
self.date_main_contract_start = DateEntry(row2_cf, width=16,
state='readonly',
textvariable=self.date_main_start,
font=('Arial', 9),
selectmode='day',
borderwidth=2,
locale='ru_RU')
self.square_cost = StringSumVar()
self.square_cost.set('0,00')
self.square_cost_label = tk.Label(row2_cf, text='Цена за 1м², грн')
vcmd = (self.register(self._validate_sum))
self.square_cost_entry = tk.Entry(row2_cf, name='square_cost_entry',
width=18,
textvariable=self.square_cost,
validate='all',
validatecommand=(vcmd, '%P')
)
self.square_cost_entry.bind("<FocusIn>", self._on_focus_in_format_sum)
# Change format price to max decimal format
self.square_cost_entry.bind("<FocusOut>",
self._on_focus_out_format_sum_decimal)
self._row2_pack()
# Third Fill Frame
row3_cf = tk.Frame(self, name='row3_cf', padx=15)
self.num_main_contract = tk.Label(row3_cf, text='№ договора', padx=0)
self.num_main_contract_entry = tk.Entry(row3_cf, width=23)
self.date_main_label_end = tk.Label(row3_cf, text='Договор по:', padx=7)
self.date_main_end = tk.StringVar()
self.date_main_contract_end = DateEntry(row3_cf, width=16,
state='readonly',
textvariable=self.date_main_end,
font=('Arial', 9),
selectmode='day', borderwidth=2,
locale='ru_RU')
self.sum_extra_label = tk.Label(row3_cf,
text='Сумма экспл. без НДС, грн',
padx=3)
self.sum_extra_total = StringSumVar()
self.sum_extra_total.set('0,00')
vcmd = (self.register(self._validate_sum))
self.sum_extra_entry = tk.Entry(row3_cf, name='sum_extra_entry',
width=18,
textvariable=self.sum_extra_total,
validate='all',
validatecommand=(vcmd, '%P')
)
self.sum_extra_entry.bind("<FocusIn>", self._on_focus_in_format_sum)
self.sum_extra_entry.bind("<FocusOut>", self._on_focus_out_format_sum)
self._row3_pack()
# Fourth Fill Frame
row4_cf = tk.Frame(self, name='row4_cf', padx=15)
self.num_add_contract = tk.Label(row4_cf, text='№ доп.дог. ', padx=0)
self.num_add_contract_entry = tk.Entry(row4_cf, width=23)
self.date_add_label = tk.Label(row4_cf, text='Дата доп.', padx=7)
self.date_add = tk.StringVar()
self.date_add_contract = DateEntry(row4_cf, width=16, state='readonly',
textvariable=self.date_add,
font=('Arial', 9),
selectmode='day', borderwidth=2,
locale='ru_RU')
self.sum_label = tk.Label(row4_cf, text='Сумма всего без НДС, грн',
padx=3)
self.sumtotal = tk.StringVar(row4_cf, value='0.00')
vcmd = (self.register(self._validate_sum))
# self.sum_entry = tk.Entry(row4_cf, textvariable=self.sumtotal, width=18)
self.sum_entry = tk.Entry(row4_cf, name='sum_entry',
width=18,
textvariable=self.sumtotal,
validate='all',
validatecommand=(vcmd, '%P')
)
self.sum_entry.config(background='lightgrey')
# self.sum_entry.config(state='disabled')
self.sum_entry.bind("<FocusIn>", self._on_focus_in_format_sum)
self.sum_entry.bind("<FocusOut>", self._on_focus_out_format_sum)
self._row4_pack()
# Fifth Fill Frame
row5_cf = tk.Frame(self, name='row5_cf', padx=15)
self.contragent_label = tk.Label(row5_cf, text='Арендодатель')
self.contragent_entry = tk.Entry(row5_cf, width=23)
self.date_start_label = tk.Label(row5_cf, text='Период с:', padx=17)
self.date_start = tk.StringVar()
self.date_start_entry = DateEntry(row5_cf, width=16, state='readonly',
textvariable=self.date_start,
font=('Arial', 9),
selectmode='day', borderwidth=2,
locale='ru_RU')
self.nds_label = tk.Label(row5_cf, text='Ставка НДС', padx=0)
self.nds = tk.IntVar()
self.nds.set(20)
self.nds20 = ttk.Radiobutton(row5_cf, text="20 %", variable=self.nds,
value=20)
self.nds0 = ttk.Radiobutton(row5_cf, text="0 %", variable=self.nds,
value=0)
self._row5_pack()
# Six Fill Frame
row6_cf = tk.Frame(self, name='row6_cf', padx=15)
self.okpo_label = tk.Label(row6_cf, text='ЕГРПОУ ')
self.okpo_entry = tk.Entry(row6_cf, width=23)
self.date_finish_label = tk.Label(row6_cf, text='Период по:', padx=16)
self.date_finish = tk.StringVar()
self.date_finish_entry = DateEntry(row6_cf, width=16, state='readonly',
textvariable=self.date_finish,
font=('Arial', 9),
selectmode='day', borderwidth=2,
locale='ru_RU')
self.file_label = tk.Label(row6_cf, text='Файл не выбран')
bt_upload = ttk.Button(row6_cf, text="Выбрать файл", width=20,
command=self._file_opener,
style='ButtonGreen.TButton')
bt_upload.pack(side=tk.RIGHT, padx=15, pady=0)
# Text Frame
text_cf = ttk.LabelFrame(self, text=' Комментарий к договору ',
name='text_cf')
self.customFont = tkFont.Font(family="Arial", size=10)
self.desc_text = tk.Text(text_cf,
font=self.customFont) # input and output box
self.desc_text.configure(width=115)
self.desc_text.pack(in_=text_cf, expand=True)
self._row6_pack()
# Bottom Frame with buttons
bottom_cf = tk.Frame(self, name='bottom_cf')
bt3 = ttk.Button(bottom_cf, text="Назад", width=10,
# command=self._deselect_checked_mvz)
# command=self.button_back(controller))
command=lambda: controller._show_frame('PreviewForm'))
bt3.pack(side=tk.RIGHT, padx=15, pady=10)
bt1 = ttk.Button(bottom_cf, text="Обновить", width=10,
command=self._update_request,
style='ButtonGreen.TButton')
bt1.pack(side=tk.RIGHT, padx=15, pady=10)
# Pack frames
top.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
bottom_cf.pack(side=tk.BOTTOM, fill=tk.X)
row1_cf.pack(side=tk.TOP, fill=tk.X, pady=5)
row2_cf.pack(side=tk.TOP, fill=tk.X, pady=5)
row3_cf.pack(side=tk.TOP, fill=tk.X, pady=5)
row4_cf.pack(side=tk.TOP, fill=tk.X, pady=5)
row5_cf.pack(side=tk.TOP, fill=tk.X, pady=5)
row6_cf.pack(side=tk.TOP, fill=tk.X, pady=5)
text_cf.pack(side=tk.TOP, fill=tk.X, expand=True, padx=15, pady=15)
def _mvz_choice_list(self):
self.mvz_choice_list = []
for name, var in self.choices.items():
if var.get() == 1:
self.mvz_choice_list.append(self.get_mvzID(name))
# Deselect checked row in menu (destroy and create menubutton again)
def _deselect_checked_mvz(self):
self.mvz_choice_list.clear()
self.menu_choice_mvz.destroy()
self.menu_choice_mvz = tk.Menu(self.menubutton, tearoff=False)
self.menubutton.configure(menu=self.menu_choice_mvz)
for choice in self.mvz.keys():
self.choices[choice] = tk.IntVar(value=0)
self.menu_choice_mvz.add_checkbutton(label=choice,
variable=self.choices[choice],
onvalue=1, offvalue=0,
command=self._mvz_choice_list)
def _file_opener(self):
filename = fd.askopenfilename()
if filename:
copy2(filename, UPLOAD_PATH)
path = Path(filename)
self.upload_filename = path.name
self.file_label.config(text='Файл добавлен')
def _remove_upload_file(self):
os.remove(UPLOAD_PATH + '\\' + self.upload_filename)
self.file_label.config(text='Файл не выбран')
def _multiply_cost_square(self):
square_get = float(self.square.get_float_form()
if self.square_entry.get() else 0)
square_cost_get = float(self.square_cost.get_float_form()
if self.square_cost_entry.get() else 0)
total_square_cost = square_get * square_cost_get
if total_square_cost:
self.sum_entry.delete(0, tk.END)
self.sum_entry.insert(0, total_square_cost)
def _clear(self):
self.type_business_box.configure(state="readonly")
self.num_main_contract_entry.configure(state="normal")
self.date_main_contract_start.configure(state="normal")
self.date_main_contract_end.configure(state="normal")
self.contragent_entry.configure(state="normal")
self.num_main_contract_entry.delete(0, tk.END)
self.contragent_entry.delete(0, tk.END)
self.square.set('0,00')
self.square_cost_entry.delete(0, tk.END)
self.square_cost_entry.insert(0, '0,00')
self.num_main_contract_entry.delete(0, tk.END)
self.num_add_contract_entry.delete(0, tk.END)
self.okpo_entry.delete(0, tk.END)
self.sum_extra_total.set('0,00')
self.sumtotal.set('0,00')
self.nds.set(20)
self.desc_text.delete("1.0", tk.END)
self.date_start_entry.set_date(datetime.now())
self.date_finish_entry.set_date(datetime.now())
self.date_add_contract.set_date(datetime.now())
self.date_main_contract_start.set_date(datetime.now())
self.date_main_contract_end.set_date(datetime.now())
self.file_label.config(text='Файл не выбран')
self._deselect_checked_mvz()
def _fill_from_UpdateForm(self, mvz, id, num_main_contract,
date_main_contract_start,
date_main_contract_end, date_add_contract,
date_add_contract_end,
add_contract_num, date_add_contract_start, square,
price1m2, cost_extra, cost, contragent,
business, okpo, description, filename):
""" When button "Редактировать" from PreviewForm is activated,
fill some fields taken from choosed in PreviewForm request.
"""
self.contract_id = id
self.mvz_current.set(mvz)
self.type_business_box.set(business)
self.num_main_contract_entry.delete(0, tk.END)
self.num_main_contract_entry.insert(0, num_main_contract)
self.num_add_contract_entry.insert(0, add_contract_num)
self.date_add_contract.set_date(
self._convert_str_date(date_add_contract))
self.date_start_entry.set_date(
self._convert_str_date(date_add_contract_start))
self.date_finish_entry.set_date(
self._convert_str_date(date_add_contract_end))
self.date_main_contract_start.set_date(
self._convert_str_date(date_main_contract_start))
self.date_main_contract_end.set_date(
self._convert_str_date(date_main_contract_end))
self.mvz_sap = self.get_mvzSAP(self.mvz_current.get())
self.contragent_entry.delete(0, tk.END)
self.contragent_entry.insert(0, contragent)
self.okpo_entry.insert(0, okpo)
self.square_cost.set(price1m2)
self.square.set(square)
self.sum_extra_entry.insert(0, cost)
self.sum_entry.insert(0, cost_extra)
self.desc_text.insert("1.0", description)
self.fill_filename = filename
if filename:
self.file_label.config(text='Файл добавлен')
self._multiply_cost_square()
def _convert_date(self, date, output=None):
""" Take date and convert it into output format.
If output is None datetime object is returned.
date: str in format '%d[./]%m[./]%y' or '%d[./]%m[./]%Y'.
output: str or None, output format.
"""
date = date.replace('/', '.')
try:
dat = datetime.strptime(date, '%d.%m.%y')
except ValueError:
dat = datetime.strptime(date, '%d.%m.%Y')
if output:
return dat.strftime(output)
return dat
def _update_request(self):
messagetitle = 'Обновление договора'
sumtotal = self.sum_entry.get()
sum_extra_total = float(self.sum_extra_total.get_float_form()
if self.sum_extra_entry.get() else 0)
square = float(self.square.get_float_form()
if self.square_entry.get() else 0)
price_meter = float(self.square_cost.get_float_form()
if self.square_cost_entry.get() else 0)
is_validated = self._validate_request_creation(messagetitle, sumtotal)
if not is_validated:
return
update_request = {'id': self.contract_id,
'mvz': self.mvz_sap,
# self.mvz_sap.cget('text') or None,
'start_date': self._convert_date(
self.date_start_entry.get()),
'finish_date': self._convert_date(
self.date_finish_entry.get()),
'sum_extra_total': sum_extra_total,
'sumtotal': sumtotal,
'nds': self.nds.get(),
'square': square,
'contragent': self.contragent_entry.get().strip().replace(
'\n', '') or None,
'okpo': self.okpo_entry.get(),
'num_main_contract': self.num_main_contract_entry.get(),
'num_add_contract': self.num_add_contract_entry.get(),
'date_main_contract_start': self._convert_date(
self.date_main_contract_start.get()),
'date_add_contract': self._convert_date(
self.date_add_contract.get()),
'text': self.desc_text.get("1.0", tk.END).strip(),
'filename': self.fill_filename if self.fill_filename else self.upload_filename,
'date_main_contract_end': self._convert_date(
self.date_main_contract_end.get()),
'price_meter': price_meter,
'type_business': self.type_business_box.get(),
'mvz_choice_list': ','.join(
map(str, self.mvz_choice_list))
}
update_success = self.conn.update_request(userID=self.userID,
**update_request)
if update_success == 1:
messagebox.showinfo(
messagetitle, 'Договор обновлен'
)
self._clear()
self.controller._show_frame('PreviewForm')
else:
# self._remove_upload_file()
messagebox.showerror(
messagetitle, 'Произошла ошибка при обновлении договора'
)
# МВЗ, Договор, Арендодатель, ЕГРПОУ, Описание
def _convert_str_date(self, date):
""" Take str and convert it into date format.
date: str in format '%d[./]%m[./]%y' or '%d[./]%m[./]%Y'.
"""
date_time_str = date
date_time_obj = dt.datetime.strptime(date_time_str, '%Y-%m-%d')
return date_time_obj.date()
def _restraint_by_mvz(self, event):
""" Shows mvz_sap that corresponds to chosen MVZ and restraint offices.
If 1 office is available, choose it, otherwise make box active.
"""
# tcl language has no notion of None or a null value, so use '' instead
self.mvz_sap = self.get_mvzSAP(self.mvz_current.get()) or ''
def _row1_pack(self):
self.mvz_label.pack(side=tk.LEFT)
self.mvz_box.pack(side=tk.LEFT, padx=10)
# self.mvz_sap_label.pack(side=tk.LEFT)
# self.mvz_sap.pack(side=tk.LEFT)
self.menubutton.pack(side=tk.LEFT, padx=10)
self.square_entry.pack(side=tk.RIGHT, padx=10)
self.square_label.pack(side=tk.RIGHT, padx=10)
def _row2_pack(self):
self.type_business_label.pack(side=tk.LEFT)
self.type_business_box.pack(side=tk.LEFT, padx=17)
self.date_main_label_start.pack(side=tk.LEFT)
self.date_main_contract_start.pack(side=tk.LEFT, padx=0)
self.square_cost_entry.pack(side=tk.RIGHT, padx=10)
self.square_cost_label.pack(side=tk.RIGHT, padx=10)
def _row3_pack(self):
self.num_main_contract.pack(side=tk.LEFT, padx=7)
self.num_main_contract_entry.pack(side=tk.LEFT, padx=19)
self.date_main_label_end.pack(side=tk.LEFT)
self.date_main_contract_end.pack(side=tk.LEFT, padx=0)
self.sum_extra_entry.pack(side=tk.RIGHT, padx=11)
self.sum_extra_label.pack(side=tk.RIGHT, padx=0)
def _row4_pack(self):
self.num_add_contract.pack(side=tk.LEFT, padx=7)
self.num_add_contract_entry.pack(side=tk.LEFT, padx=19)
self.date_add_label.pack(side=tk.LEFT)
self.date_add_contract.pack(side=tk.LEFT, padx=18)
self.sum_entry.pack(side=tk.RIGHT, padx=11)
self.sum_label.pack(side=tk.RIGHT, padx=0)
def _row5_pack(self):
self.contragent_label.pack(side=tk.LEFT, padx=7)
self.contragent_entry.pack(side=tk.LEFT, padx=7)
self.date_start_label.pack(side=tk.LEFT)
self.date_start_entry.pack(side=tk.LEFT, padx=6)
self.nds0.pack(side=tk.RIGHT, padx=7)
self.nds20.pack(side=tk.RIGHT, padx=8)
self.nds_label.pack(side=tk.RIGHT)
def _row6_pack(self):
self.okpo_label.pack(side=tk.LEFT, padx=7)
self.okpo_entry.pack(side=tk.LEFT, padx=7)
self.date_finish_label.pack(side=tk.LEFT)
self.date_finish_entry.pack(side=tk.LEFT, padx=0)
self.file_label.pack(side=tk.RIGHT, padx=0)
def _top_pack(self):
self.main_label.pack(side=tk.TOP, expand=False, anchor=tk.NW)
def _validate_request_creation(self, messagetitle, sumtotal):
""" Check if all fields are filled properly. """
if not self.mvz_current.get():
messagebox.showerror(
messagetitle, 'Не выбран объект'
)
return False
if not self.mvz_choice_list:
messagebox.showerror(
messagetitle, 'Не выбраны адреса к договору'
)
return False
if not self.type_business_box.get():
messagebox.showerror(
messagetitle, 'Не выбран тип бизнеса'
)
return False
if not self.num_main_contract_entry.get():
messagebox.showerror(
messagetitle, 'Не указан номер основного договора'
)
return False
if not self.num_add_contract_entry.get():
messagebox.showerror(
messagetitle, 'Не указан номер дополнительного соглашения'
)
return False
if not self.contragent_entry.get():
messagebox.showerror(
messagetitle, 'Не указан арендодатель'
)
return False
if ast.literal_eval(self.square_entry.get()[0]) == 0:
messagebox.showerror(
messagetitle, 'Не указана площадь аренды'
)
return False
if ast.literal_eval(self.square_cost_entry.get()[0]) == 0:
messagebox.showerror(
messagetitle, 'Не указана стоимость за 1 кв.м.'
)
return False
return True
class PreviewForm(PaymentFrame):
def __init__(self, parent, controller, connection, user_info,
mvz, type_business, status_list, **kwargs):
super().__init__(parent, controller, connection, user_info, mvz)
self.statusID, self.status_list = zip(*[(None, 'Все'), ] + status_list)
self.type_businessID, self.type_business = zip(
*[(None, 'Все'), ] + type_business)
# print(self.statusID, self.status_list)
# List of functions to get payments
# determines what payments will be shown when refreshing
self.contracts_load_list = [self._get_all_contracts]
self.get_contracts = self._get_all_contracts
# Parameters for sorting
self.rows = None # store all rows for sorting and redrawing
self.sort_reversed_index = None # reverse sorting for the last sorted | |
# -*- test-case-name: vumi.transports.smpp.tests.test_smpp -*-
from datetime import datetime
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from vumi import log
from vumi.utils import get_operator_number
from vumi.transports.base import Transport
from vumi.transports.smpp.clientserver.client import (
EsmeTransceiverFactory, EsmeTransmitterFactory, EsmeReceiverFactory,
EsmeCallbacks)
from vumi.transports.smpp.clientserver.config import ClientConfig
from vumi.transports.failures import FailureMessage
from vumi.message import Message, TransportUserMessage
from vumi.persist.txredis_manager import TxRedisManager
class SmppTransport(Transport):
"""
An SMPP transport.
The SMPP transport has many configuration parameters. These are
divided up into sections below.
SMPP server account configuration options:
:type system_id: str
:param system_id:
User id used to connect to the SMPP server.
:type password: str
:param password:
Password for the system id.
:type system_type: str, optional
:param system_type:
Additional system metadata that is passed through to the SMPP server
on connect.
:type host: str
:param host:
Hostname of the SMPP server.
:type port: int
:param port:
Port the SMPP server is listening on.
:type initial_reconnect_delay: int, optional
:param initial_reconnect_delay:
Number of seconds to delay before reconnecting to the server after
being disconnected. Default is 5s. Some WASPs, e.g. Clickatell,
require a 30s delay before reconnecting. In these cases a 45s
initial_reconnect_delay is recommended.
:type split_bind_prefix: str, optional
:param split_bind_prefix:
This is the Redis prefix to use for storing things like sequence
numbers and message ids for delivery report handling.
It defaults to `<system_id>@<host>:<port>`.
*ONLY* if the connection is split into two separate binds for RX and TX
then make sure this is the same value for both binds.
This _only_ needs to be done for TX & RX since messages sent via the TX
bind are handled by the RX bind and they need to share the same prefix
for the lookup for message ids in delivery reports to work.
:type throttle_delay: float, optional
:param throttle_delay:
Delay (in seconds) before retrying a message after receiving
`ESME_RTHROTTLED`. Default 0.1
SMPP protocol configuration options:
:type interface_version: str, optional
:param interface_version:
SMPP protocol version. Default is '34' (i.e. version 3.4).
:type dest_addr_ton:
:param dest_addr_ton:
Destination TON (type of number). Default .
:type dest_addr_npi:
:param dest_addr_npi:
Destination NPI (number plan identifier). Default 1 (ISDN/E.164/E.163).
:type source_addr_ton:
:param source_addr_ton:
Source TON (type of number). Default is 0 (Unknown)
:type source_addr_npi:
:param source_addr_npi:
Source NPI (number plan identifier). Default is 0 (Unknown)
:type registered_delivery:
:param registered_delivery:
Whether to ask for delivery reports. Default 1 (request delivery
reports).
:param dict data_coding_overrides:
Overrides for data_coding character set mapping. This is useful for
setting the default encoding (0), adding additional undefined encodings
(such as 4 or 8) or overriding encodings in cases where the SMSC is
violating the spec (which happens a lot). Keys should be integers,
values should be strings containing valid Python character encoding
names.
:param bool send_long_messages:
If `True`, messages longer than 254 characters will be sent in the
`message_payload` optional field instead of the `short_message` field.
Default is `False`, simply because that maintains previous behaviour.
The list of SMPP protocol configuration options given above is not
exhaustive. Any other options specified are passed through to the
python-smpp library PDU (protocol data unit) builder.
Cellphone number routing options:
:type COUNTRY_CODE: str, optional
:param COUNTRY_CODE:
Used to translate a leading zero in a destination MSISDN into a
country code. Default '',
:type OPERATOR_PREFIX: str, optional
:param OPERATOR_PREFIX:
Nested dictionary of prefix to network name mappings. Default {} (set
network to 'UNKNOWN'). E.g. { '27': { '27761': 'NETWORK1' }}.
:type OPERATOR_NUMBER:
:param OPERATOR_NUMBER:
Dictionary of source MSISDN to use for each network listed in
OPERATOR_PREFIX. If a network is not listed, the source MSISDN
specified by the message sender is used. Default {} (always used the
from address specified by the message sender). E.g. { 'NETWORK1':
'27761234567'}.
"""
# We only want to start this after we finish connecting to SMPP.
start_message_consumer = False
callLater = reactor.callLater
def validate_config(self):
self.client_config = ClientConfig.from_config(self.config)
self.throttle_delay = float(self.config.get('throttle_delay', 0.1))
@inlineCallbacks
def setup_transport(self):
log.msg("Starting the SmppTransport with %s" % self.config)
self.third_party_id_expiry = self.config.get(
"third_party_id_expiry",
60 * 60 * 24 * 7 # 1 week
)
r_config = self.config.get('redis_manager', {})
default_prefix = "%s@%s:%s" % (
self.client_config.system_id,
self.client_config.host,
self.client_config.port,
)
r_prefix = self.config.get('split_bind_prefix', default_prefix)
redis = yield TxRedisManager.from_config(r_config)
self.redis = redis.sub_manager(r_prefix)
self.r_message_prefix = "message_json"
self.throttled = False
self.esme_callbacks = EsmeCallbacks(
connect=self.esme_connected,
disconnect=self.esme_disconnected,
submit_sm_resp=self.submit_sm_resp,
delivery_report=self.delivery_report,
deliver_sm=self.deliver_sm)
if not hasattr(self, 'esme_client'):
# start the Smpp transport (if we don't have one)
self.factory = self.make_factory()
reactor.connectTCP(
self.client_config.host,
self.client_config.port,
self.factory)
@inlineCallbacks
def teardown_transport(self):
if hasattr(self, 'factory'):
self.factory.stopTrying()
self.factory.esme.transport.loseConnection()
yield self.redis._close()
def make_factory(self):
return EsmeTransceiverFactory(
self.client_config, self.redis, self.esme_callbacks)
def esme_connected(self, client):
log.msg("ESME Connected, adding handlers")
self.esme_client = client
# Start the consumer
self.unpause_connectors()
@inlineCallbacks
def handle_outbound_message(self, message):
log.debug("Consumed outgoing message %r" % (message,))
log.debug("Unacknowledged message count: %s" % (
(yield self.esme_client.get_unacked_count()),))
yield self.r_set_message(message)
yield self._submit_outbound_message(message)
@inlineCallbacks
def _submit_outbound_message(self, message):
sequence_number = yield self.send_smpp(message)
yield self.r_set_id_for_sequence(
sequence_number, message.payload.get("message_id"))
def esme_disconnected(self):
log.msg("ESME Disconnected")
self.pause_connectors()
# Redis message storing methods
def r_message_key(self, message_id):
return "%s#%s" % (self.r_message_prefix, message_id)
def r_set_message(self, message):
message_id = message.payload['message_id']
return self.redis.set(
self.r_message_key(message_id), message.to_json())
def r_get_message_json(self, message_id):
return self.redis.get(self.r_message_key(message_id))
@inlineCallbacks
def r_get_message(self, message_id):
json_string = yield self.r_get_message_json(message_id)
if json_string:
returnValue(Message.from_json(json_string))
else:
returnValue(None)
def r_delete_message(self, message_id):
return self.redis.delete(self.r_message_key(message_id))
# Redis sequence number storing methods
def r_get_id_for_sequence(self, sequence_number):
return self.redis.get(str(sequence_number))
def r_delete_for_sequence(self, sequence_number):
return self.redis.delete(str(sequence_number))
def r_set_id_for_sequence(self, sequence_number, id):
return self.redis.set(str(sequence_number), id)
# Redis 3rd party id to vumi id mapping
def r_third_party_id_key(self, third_party_id):
return "3rd_party_id#%s" % (third_party_id,)
def r_get_id_for_third_party_id(self, third_party_id):
return self.redis.get(self.r_third_party_id_key(third_party_id))
def r_delete_for_third_party_id(self, third_party_id):
return self.redis.delete(
self.r_third_party_id_key(third_party_id))
@inlineCallbacks
def r_set_id_for_third_party_id(self, third_party_id, id):
rkey = self.r_third_party_id_key(third_party_id)
yield self.redis.set(rkey, id)
yield self.redis.expire(rkey, self.third_party_id_expiry)
def _start_throttling(self):
if self.throttled:
return
log.err("Throttling outbound messages.")
self.throttled = True
self.pause_connectors()
def _stop_throttling(self):
if not self.throttled:
return
log.err("No longer throttling outbound messages.")
self.throttled = False
self.unpause_connectors()
@inlineCallbacks
def submit_sm_resp(self, *args, **kwargs):
transport_msg_id = kwargs['message_id']
sent_sms_id = (
yield self.r_get_id_for_sequence(kwargs['sequence_number']))
if sent_sms_id is None:
log.err("Sequence number lookup failed for:%s" % (
kwargs['sequence_number'],))
else:
yield self.r_set_id_for_third_party_id(
transport_msg_id, sent_sms_id)
yield self.r_delete_for_sequence(kwargs['sequence_number'])
status = kwargs['command_status']
if status == 'ESME_ROK':
# The sms was submitted ok
yield self.submit_sm_success(sent_sms_id, transport_msg_id)
yield self._stop_throttling()
elif status == 'ESME_RTHROTTLED':
yield self._start_throttling()
yield self.submit_sm_throttled(sent_sms_id)
else:
# We have an error
yield self.submit_sm_failure(sent_sms_id,
status or 'Unspecified')
yield self._stop_throttling()
@inlineCallbacks
def submit_sm_success(self, sent_sms_id, transport_msg_id):
yield self.r_delete_message(sent_sms_id)
log.debug("Mapping transport_msg_id=%s to sent_sms_id=%s" % (
transport_msg_id, sent_sms_id))
log.debug("PUBLISHING ACK: (%s -> %s)" % (
sent_sms_id, transport_msg_id))
self.publish_ack(
user_message_id=sent_sms_id,
sent_message_id=transport_msg_id)
@inlineCallbacks
def submit_sm_failure(self, sent_sms_id, reason, failure_code=None):
error_message = yield self.r_get_message(sent_sms_id)
if error_message is None:
log.err("Could not retrieve failed message:%s" % (
sent_sms_id))
else:
yield self.r_delete_message(sent_sms_id)
yield self.publish_nack(sent_sms_id, reason)
yield self.failure_publisher.publish_message(FailureMessage(
message=error_message.payload,
failure_code=None,
reason=reason))
@inlineCallbacks
def submit_sm_throttled(self, sent_sms_id):
message = yield self.r_get_message(sent_sms_id)
if message is None:
log.err("Could not retrieve throttled message:%s" % (
sent_sms_id))
else:
self.callLater(self.throttle_delay,
self._submit_outbound_message, message)
def delivery_status(self, state):
if state in [
"DELIVRD",
"0" # Currently we will accept this for Yo! TODO: investigate
]:
return "delivered"
if state in [
"REJECTD"
]:
return "failed"
return "pending"
@inlineCallbacks
def delivery_report(self, *args, **kwargs):
transport_metadata = {
"message": kwargs['delivery_report'],
"date": datetime.strptime(
kwargs['delivery_report']['done_date'], "%y%m%d%H%M%S")
}
delivery_status = self.delivery_status(
kwargs['delivery_report']['stat'])
message_id = yield self.r_get_id_for_third_party_id(
kwargs['delivery_report']['id'])
if message_id is None:
log.warning("Failed to retrieve message id for delivery report."
" Delivery report from %s discarded."
% self.transport_name)
return
log.msg("PUBLISHING DELIV REPORT: %s %s" % (message_id,
delivery_status))
returnValue((yield self.publish_delivery_report(
user_message_id=message_id,
delivery_status=delivery_status,
transport_metadata=transport_metadata)))
def deliver_sm(self, *args, **kwargs):
message_type = kwargs.get('message_type', 'sms')
message = {
'message_id': kwargs['message_id'],
'to_addr': kwargs['destination_addr'],
'from_addr': kwargs['source_addr'],
'content': kwargs['short_message'],
'transport_type': message_type,
'transport_metadata': {},
}
if message_type == 'ussd':
session_event = {
'new': TransportUserMessage.SESSION_NEW,
'continue': TransportUserMessage.SESSION_RESUME,
'close': TransportUserMessage.SESSION_CLOSE,
}[kwargs['session_event']]
message['session_event'] = session_event
session_info = kwargs.get('session_info')
message['transport_metadata']['session_info'] = session_info
log.msg("PUBLISHING INBOUND: %s" % (message,))
# TODO: This logs messages that fail to serialize to JSON
# Usually this happens when an SMPP message has content
# we can't decode (e.g. data_coding == 4). We should
# remove the try-except once we handle such messages
# better.
return self.publish_message(**message).addErrback(log.err)
def send_smpp(self, message):
log.debug("Sending SMPP message: %s" % (message))
# first do a lookup in our YAML to see if we've got a source_addr
# defined for the given | |
<reponame>mclark58/coexpression
#!/usr/bin/env python
# standard library imports
import os
import sys
import traceback
import argparse
import json
import logging
import time
import pprint
import string
import subprocess
from os import environ
from ConfigParser import ConfigParser
import re
# 3rd party imports
import requests
# KBase imports
import biokbase.workspace.client
import biokbase.Transform.script_utils as script_utils
FVE_2_TSV = 'trns_transform_KBaseFeatureValues_ExpressionMatrix_to_TSV'
TSV_2_FVE = 'trns_transform_TSV_Exspression_to_KBaseFeatureValues_ExpressionMatrix'
RAWEXPR_DIR = 'raw_dir'
FLTRD_DIR = 'fltr_dir'
CLSTR_DIR = 'clstr_dir'
FINAL_DIR = 'final_dir'
EXPRESS_FN = 'expression.tsv'
SAMPLE_FN = 'sample.tsv'
COEX_FILTER = 'coex_filter'
COEX_CLUSTER = 'coex_cluster2'
FLTRD_FN = 'filtered.tsv'
CLSTR_FN = 'clusters.tsv'
FINAL_FN = 'filtered.json'
GENELST_FN = 'selected.tsv'
def empty_results(err_msg, expr, workspace_service_url, param, logger, ws):
if 'description' not in expr:
expr['description'] = "Filtered Expression Matrix"
expr['description'] += " : Empty Expression Matrix by '{0}' method; {1}".format(param['method'], err_msg)
expr['feature_mapping'] = {}
expr['data'] = {'row_ids' : [], 'col_ids' : [], 'values' : []}
ws.save_objects({'workspace' : param['workspace_name'], 'objects' : [{'type' : 'KBaseFeatureValues.ExpressionMatrix',
'data' : expr,
'name' : (param['out_expr_object_name'])}]})
def empty_cluster_results(err_msg, expr, workspace_service_url, param, logger, ws):
#clrst = {'feature_clusters' : [{'id_to_pos' : {} }],
clrst = {'feature_clusters' : [],
'report' : {
'checkTypeDetected' : '',
'checkUsed' : '',
'checkDescriptions' : [],
'checkResults' : [],
'messages' : [],
'warnings' : [],
'errors' : [err_msg]
}
}
ws.save_objects({'workspace' : param['workspace_name'], 'objects' : [{'type' : 'KBaseFeatureValues.FeatureClusters',
'data' : clrst,
'name' : (param['out_object_name'])}]})
def run_coex_cluster(workspace_service_url=None, param_file = None, level=logging.INFO, logger = None):
"""
Narrative Job Wrapper script to execute coex_cluster2
Args:
workspace_service_url: A url for the KBase Workspace service
param_file: parameter file
object_name: Name of the object in the workspace
level: Logging level, defaults to logging.INFO.
Returns:
Output is written back in WS
Authors:
<NAME>
"""
try:
os.makedirs(RAWEXPR_DIR)
except:
pass
try:
os.makedirs(CLSTR_DIR)
except:
pass
try:
os.makedirs(FINAL_DIR)
except:
pass
if logger is None:
logger = script_utils.stderrlogger(__file__)
logger.info("Starting conversion of KBaseFeatureValues.ExpressionMatrix to TSV")
token = os.environ.get("KB_AUTH_TOKEN")
with open(param_file) as paramh:
param = json.load(paramh)
from biokbase.workspace.client import Workspace
ws = Workspace(url=workspace_service_url, token=os.environ['KB_AUTH_TOKEN'])
expr = ws.get_objects([{'workspace': param['workspace_name'], 'name' : param['object_name']}])[0]['data']
cmd_dowload_cvt_tsv = [FVE_2_TSV, '--workspace_service_url', workspace_service_url,
'--workspace_name', param['workspace_name'],
'--object_name', param['object_name'],
'--working_directory', RAWEXPR_DIR,
'--output_file_name', EXPRESS_FN
]
# need shell in this case because the java code is depending on finding the KBase token in the environment
# -- copied from FVE_2_TSV
tool_process = subprocess.Popen(" ".join(cmd_dowload_cvt_tsv), stderr=subprocess.PIPE, shell=True)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
logger.info(stdout)
if stderr is not None and len(stderr) > 0:
logger.info(stderr)
#raise Exception(stderr)
logger.info("Coexpression clustering analysis")
## Prepare sample file
# detect num of columns
with open("{0}/{1}".format(RAWEXPR_DIR, EXPRESS_FN), 'r') as f:
fl = f.readline()
ncol = len(fl.split('\t'))
with open("{0}/{1}".format(RAWEXPR_DIR, SAMPLE_FN), 'wt') as s:
s.write("0")
for j in range(1,ncol-1):
s.write("\t{0}".format(j))
s.write("\n")
## Run coex_cluster
cmd_coex_cluster = [COEX_CLUSTER, '-t', 'y',
'-i', "{0}/{1}".format(RAWEXPR_DIR, EXPRESS_FN),
'-o', "{0}/{1}".format(CLSTR_DIR, CLSTR_FN)]
for p in ['net_method', 'minRsq', 'maxmediank', 'maxpower', 'clust_method', 'minModuleSize', 'detectCutHeight']:
if p in param:
cmd_coex_cluster.append("--{0}".format(p))
cmd_coex_cluster.append(str(param[p]))
#sys.exit(2) #TODO: No error handling in narrative so we do graceful termination
#if 'p_value' in param and 'num_features' in param:
# logger.error("Both of p_value and num_features cannot be defined together");
# sys.exit(3)
tool_process = subprocess.Popen(cmd_coex_cluster, stderr=subprocess.PIPE)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
logger.info(stdout)
if stderr is not None and len(stderr) > 0:
if re.search(r'^There were \d+ warnings \(use warnings\(\) to see them\)', stderr):
logger.info(stderr)
else:
logger.error(stderr)
raise Exception(stderr)
# build index for gene list
pos_index ={expr['data']['row_ids'][i]: i for i in range(0, len(expr['data']['row_ids']))}
# parse clustering results
cid2genelist = {}
with open("{0}/{1}".format(CLSTR_DIR, CLSTR_FN),'r') as glh:
glh.readline() # skip header
for line in glh:
gene, cluster = line.replace('"','').split("\t")
if cluster not in cid2genelist:
cid2genelist[cluster] = []
cid2genelist[cluster].append(gene)
if(len(cid2genelist) < 1) :
logger.error("Clustering failed")
return empty_results("Error: No cluster output", expr,workspace_service_url, param, logger, ws)
#sys.exit(4)
logger.info("Uploading the results onto WS")
feature_clusters = []
for cluster in cid2genelist:
feature_clusters.append( { "id_to_pos" : { gene : pos_index[gene] for gene in cid2genelist[cluster]}})
## Upload Clusters
feature_clusters ={"original_data": "{0}/{1}".format(param['workspace_name'],param['object_name']),
"feature_clusters": feature_clusters}
ws.save_objects({'workspace' : param['workspace_name'], 'objects' : [{'type' : 'KBaseFeatureValues.FeatureClusters',
'data' : feature_clusters,
'name' : (param['out_object_name'])}]})
def run_filter_genes(workspace_service_url=None, param_file = None, level=logging.INFO, logger = None):
"""
Narrative Job Wrapper script to execute coex_filter
Args:
workspace_service_url: A url for the KBase Workspace service
param_file: parameter file
object_name: Name of the object in the workspace
level: Logging level, defaults to logging.INFO.
Returns:
Output is written back in WS
Authors:
<NAME>
"""
try:
os.makedirs(RAWEXPR_DIR)
except:
pass
try:
os.makedirs(FLTRD_DIR)
except:
pass
try:
os.makedirs(FINAL_DIR)
except:
pass
if logger is None:
logger = script_utils.stderrlogger(__file__)
logger.info("Starting conversion of KBaseFeatureValues.ExpressionMatrix to TSV")
token = os.environ.get("KB_AUTH_TOKEN")
with open(param_file) as paramh:
param = json.load(paramh)
from biokbase.workspace.client import Workspace
ws = Workspace(url=workspace_service_url, token=os.environ['KB_AUTH_TOKEN'])
expr = ws.get_objects([{'workspace': param['workspace_name'], 'name' : param['object_name']}])[0]['data']
cmd_dowload_cvt_tsv = [FVE_2_TSV, '--workspace_service_url', workspace_service_url,
'--workspace_name', param['workspace_name'],
'--object_name', param['object_name'],
'--working_directory', RAWEXPR_DIR,
'--output_file_name', EXPRESS_FN
]
# need shell in this case because the java code is depending on finding the KBase token in the environment
# -- copied from FVE_2_TSV
tool_process = subprocess.Popen(" ".join(cmd_dowload_cvt_tsv), stderr=subprocess.PIPE, shell=True)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
logger.info(stdout)
if stderr is not None and len(stderr) > 0:
logger.info(stderr)
logger.info("Identifying differentially expressed genes")
## Prepare sample file
# detect num of columns
with open("{0}/{1}".format(RAWEXPR_DIR, EXPRESS_FN), 'r') as f:
fl = f.readline()
ncol = len(fl.split('\t'))
# force to use ANOVA if the number of sample is two
if(ncol == 3): param['method'] = 'anova'
with open("{0}/{1}".format(RAWEXPR_DIR, SAMPLE_FN), 'wt') as s:
s.write("0")
for j in range(1,ncol-1):
s.write("\t{0}".format(j))
s.write("\n")
## Run coex_filter
cmd_coex_filter = [COEX_FILTER, '-i', "{0}/{1}".format(RAWEXPR_DIR, EXPRESS_FN), '-o', "{0}/{1}".format(FLTRD_DIR, FLTRD_FN),
'-m', param['method'], '-s', "{0}/{1}".format(RAWEXPR_DIR, SAMPLE_FN),
'-x', "{0}/{1}".format(RAWEXPR_DIR, GENELST_FN), '-t', 'y']
if 'num_features' in param:
cmd_coex_filter.append("-n")
cmd_coex_filter.append(str(param['num_features']))
if 'p_value' in param:
cmd_coex_filter.append("-p")
cmd_coex_filter.append(str(param['p_value']))
if 'p_value' not in param and 'num_features' not in param:
logger.error("One of p_value or num_features must be defined");
return empty_results("One of p_value or num_features must be defined", expr,workspace_service_url, param, logger, ws)
#sys.exit(2) #TODO: No error handling in narrative so we do graceful termination
#if 'p_value' in param and 'num_features' in param:
# logger.error("Both of p_value and num_features cannot be defined together");
# sys.exit(3)
tool_process = subprocess.Popen(cmd_coex_filter, stderr=subprocess.PIPE)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
logger.info(stdout)
if stderr is not None and len(stderr) > 0:
logger.info(stderr)
## Header correction
try:
with open("{0}/{1}".format(FLTRD_DIR, FLTRD_FN), 'r') as ff:
fe = ff.readlines()
with open("{0}/{1}".format(FLTRD_DIR, FLTRD_FN), 'w') as ff:
ff.write(fl) # use original first line that has correct header information
fe.pop(0)
ff.writelines(fe)
except:
logger.error("Output was not found");
return empty_results("Increase p_value or specify num_features", expr,workspace_service_url, param, logger, ws)
## checking genelist
with open("{0}/{1}".format(RAWEXPR_DIR, GENELST_FN),'r') as glh:
gl = glh.readlines()
gl = [x.strip('\n') for x in gl]
if(len(gl) < 1) :
logger.error("No genes are selected")
return empty_results("Increase p_value or specify num_features", expr,workspace_service_url, param, logger, ws)
#sys.exit(4)
## Upload FVE
# change workspace to be the referenced object's workspace_name because it may not be in the same working ws due to referencing
# Updates: change missing genome handling strategy by copying reference to working workspace
cmd_upload_expr = [TSV_2_FVE, '--workspace_service_url', workspace_service_url,
'--object_name', param['out_expr_object_name'],
'--working_directory', FINAL_DIR,
'--input_directory', FLTRD_DIR,
'--output_file_name', FINAL_FN
]
tmp_ws = param['workspace_name']
if 'genome_ref' in expr:
obj_infos = ws.get_object_info_new({"objects": [{'ref':expr['genome_ref']}]})[0]
if len(obj_infos) < 1:
logger.error("Couldn't find {0} from the workspace".format(expr['genome_ref']))
raise Exception("Couldn't find {0} from the workspace".format(expr['genome_ref']))
#tmp_ws = "{0}".format(obj_infos[7])
logger.info("{0} => {1} / {2}".format(expr['genome_ref'], obj_infos[7], obj_infos[1]))
if obj_infos[7] != param['workspace_name']:
#we need to copy it from the other workspace
try:
logger.info("trying to copy the referenced genome object : {0}".format(expr['genome_ref']))
ws.copy_object({'from' : {'ref' : expr['genome_ref']},'to' : {'workspace': param['workspace_name'], 'name' : obj_infos[1]}})
# add genome_object_name only after successful copy
cmd_upload_expr.append('--genome_object_name')
cmd_upload_expr.append(obj_infos[1])
except:
# no permission or any issues... then, give up providing genome reference
logger.info("".join(traceback.format_exc()))
pass
else:
# it is local... we can simply add reference without copying genome
cmd_upload_expr.append('--genome_object_name')
cmd_upload_expr.append(obj_infos[1])
# updated ws name
cmd_upload_expr.append('--workspace_name')
cmd_upload_expr.append(tmp_ws)
logger.info(" ".join(cmd_upload_expr))
tool_process = subprocess.Popen(" ".join(cmd_upload_expr), stderr=subprocess.PIPE, shell=True)
stdout, stderr = tool_process.communicate()
if stdout is not None and len(stdout) > 0:
logger.info(stdout)
if stderr is not None and len(stderr) > 0:
logger.info(stderr)
with open("{0}/{1}".format(FINAL_DIR,FINAL_FN),'r') as et:
| |
<filename>ucs_data/ucs_main.py<gh_stars>0
from ucsmsdk.ucshandle import UcsHandle
from colorama import Fore, Back, Style, init
from ucsmsdk.ucsexception import UcsException
import argparse
import sys
init(autoreset=True)
parser = argparse.ArgumentParser(description='Configure UCS from spreadsheet')
parser.add_argument('-a', help='UCSM IP (a)ddress (not URL)',type=str,
required=True)
parser.add_argument('-u', help='UCSM (u)ser name',type=str, required=True)
parser.add_argument('-p', help='UCSM (p)assword',type=str, required=True)
parser.add_argument('-f', help='Excel Spreadsheet File Name and Path',type=str,
required=False)
args = parser.parse_args()
def ucs_logon(ip_addr=args.a, usr=args.u, pw=args.p):
handle = UcsHandle(ip_addr, usr, pw, port=443, secure=True)
handle.get_auth_token()
handle.login(auto_refresh=True)
print('Connecting to {}')
return handle
def configure_organisation(handle, name):
from ucsmsdk.mometa.org.OrgOrg import OrgOrg
mo = OrgOrg(parent_mo_or_dn="org-root", name=name)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'Organisation {} configured'.format(name))
except UcsException:
print(Fore.YELLOW + 'Error: {} Organisation {}, not configured. '.format(UcsException, name))
def configure_uuid_pool(handle, org, name, descr, assgn_order, uuid_to, uuid_from, pref = 'derived'):
from ucsmsdk.mometa.uuidpool.UuidpoolPool import UuidpoolPool
from ucsmsdk.mometa.uuidpool.UuidpoolBlock import UuidpoolBlock
mo = UuidpoolPool(parent_mo_or_dn="org-root/org-{}".format(org), policy_owner="local", prefix=pref,
descr=descr, assignment_order=assgn_order,
name=name)
mo_1 = UuidpoolBlock(parent_mo_or_dn=mo, to=uuid_to, r_from=uuid_from)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'UUID {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: UUID {}, {}. '.format(name,
sys.exc_info()[1]))
def configure_boot_policy(handle, org, name, descr, reboot_on_upd,
enforce_vnic_name, boot_mode):
from ucsmsdk.mometa.lsboot.LsbootPolicy import LsbootPolicy
from ucsmsdk.mometa.lsboot.LsbootVirtualMedia import LsbootVirtualMedia
from ucsmsdk.mometa.lsboot.LsbootStorage import LsbootStorage
from ucsmsdk.mometa.lsboot.LsbootLocalStorage import LsbootLocalStorage
from ucsmsdk.mometa.lsboot.LsbootDefaultLocalImage import LsbootDefaultLocalImage
mo = LsbootPolicy(parent_mo_or_dn="org-root/org-{}".format(org), name=name,
descr=descr,reboot_on_update=reboot_on_upd,
policy_owner="local",
enforce_vnic_name=enforce_vnic_name,
boot_mode=boot_mode)
mo_1 = LsbootVirtualMedia(parent_mo_or_dn=mo, access="read-only-remote", lun_id="0", mapping_name="", order="1")
mo_2 = LsbootStorage(parent_mo_or_dn=mo, order="2")
mo_2_1 = LsbootLocalStorage(parent_mo_or_dn=mo_2, )
mo_2_1_1 = LsbootDefaultLocalImage(parent_mo_or_dn=mo_2_1, order="2")
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'Boot Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: Boot Policy {}, {}. '.format(name,
sys.exc_info()[1]))
def configure_local_disk_conf_policy(handle, org, name, descr, mode, flex_flash,
flex_flash_report, flex_flash_remove):
from ucsmsdk.mometa.storage.StorageLocalDiskConfigPolicy import StorageLocalDiskConfigPolicy
mo = StorageLocalDiskConfigPolicy(parent_mo_or_dn="org-root/org-{}".format(
org), protect_config="yes", name=name, descr=descr,
flex_flash_raid_reporting_state=flex_flash_report,
flex_flash_state=flex_flash, policy_owner="local", mode=mode)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'Local Disk Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: Local Disk Policy {}, {}. '.format(name,
sys.exc_info()[1]))
def configure_bios_policy(handle, org, name, descr, quiet_boot, cdn_ctrl,
post_err_pause, reboot_on_upd):
##### Start-Of-PythonScript #####
from ucsmsdk.mometa.bios.BiosVProfile import BiosVProfile
mo = BiosVProfile(parent_mo_or_dn="org-root/org-{}".format(org), descr=descr,
name=name, reboot_on_update=reboot_on_upd)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'BIOS Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: BIOS Policy {}, {}. '.format(name,
sys.exc_info()[1]))
from ucsmsdk.mometa.bios.BiosTokenSettings import BiosTokenSettings
from ucsmsdk.mometa.bios.BiosTokenSettings import BiosTokenSettings
mo = BiosTokenSettings(parent_mo_or_dn="org-root/org-{}/bios-prof-{}/tokn-featr-Quiet Boot/tokn-param-QuietBoot".format(org, name),
is_assigned="yes", settings_mo_rn=quiet_boot)
handle.add_mo(mo, True)
from ucsmsdk.mometa.bios.BiosTokenSettings import BiosTokenSettings
mo = BiosTokenSettings(parent_mo_or_dn="org-root/org-{}/bios-prof-{}/tokn-featr-POST error pause/tokn-param-POSTErrorPause".format(org, name),
is_assigned="yes", settings_mo_rn=post_err_pause)
handle.add_mo(mo, True)
from ucsmsdk.mometa.bios.BiosTokenSettings import BiosTokenSettings
mo = BiosTokenSettings(parent_mo_or_dn="org-root/org-{}/bios-prof-{}/tokn-featr-Consistent Device Name Control/tokn-param-cdnEnable".format(org, name),
is_assigned="yes", settings_mo_rn=cdn_ctrl)
handle.add_mo(mo, True)
try:
handle.commit()
print(Fore.GREEN + 'BIOS Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: BIOS Policy {}, {}. '.format(name, sys.exc_info()[1]))
def configure_sol_policy(handle, org, name, descr, baud_speed='115200'):
from ucsmsdk.mometa.sol.SolPolicy import SolPolicy
mo = SolPolicy(admin_state='enable',
descr=descr,
parent_mo_or_dn="org-root/org-{}".format(org),
name=name,
speed=str(baud_speed))
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'SoL Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: SoL Policy {}, {}. '.format(name, sys.exc_info()[1]))
def configure_scrub_policy(handle, org, name, descr, bios_scrub,
flex_flash_scrub, disk_scrub):
from ucsmsdk.mometa.compute.ComputeScrubPolicy import ComputeScrubPolicy
mo = ComputeScrubPolicy(parent_mo_or_dn="org-root/org-{}".format(org),
flex_flash_scrub=flex_flash_scrub, name=name,
descr=descr, policy_owner="local",
bios_settings_scrub=bios_scrub,
disk_scrub=disk_scrub)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'Scrub Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: Scrub Policy {}, {}. '.format(name,
sys.exc_info()[1]))
def configure_maint_policy(handle, org, ss_timer, name='', reboot_pol="user-ack",
descr=''):
from ucsmsdk.mometa.lsmaint.LsmaintMaintPolicy import LsmaintMaintPolicy
mo = LsmaintMaintPolicy(parent_mo_or_dn="org-root/org-{}".format(org), uptime_disr=reboot_pol, name=name,
descr=descr, trigger_config="", soft_shutdown_timer=ss_timer, sched_name="",
policy_owner="local")
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'Maintenance Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: Maintenance Policy {}, {}. '.format(name,
sys.exc_info()[1]))
def create_sp_from_template(handle, start_sp_value=611, sp_quantity=12, sp_name_prefix="csvipresx", org="AKL-VI-APP",
template_name="AKL-VI-APPLICATION_1"):
from ucsmsdk.ucsmethodfactory import ls_instantiate_n_named_template
from ucsmsdk.ucsbasetype import DnSet, Dn
start_value = start_sp_value
for sp in range(sp_quantity):
dn_set = DnSet()
dn = Dn()
dn.attr_set("value", sp_name_prefix+str(start_value))
dn_set.child_add(dn)
start_value += 1
elem = ls_instantiate_n_named_template(cookie=handle.cookie, dn="org-root/org-{}/ls-{}".format(org, template_name),
in_error_on_existing="true", in_name_set=dn_set,
in_target_org="org-root/org-{}".format(org), in_hierarchical="false")
mo_list = handle.process_xml_elem(elem)
def configure_host_fw_policy(handle, org, name, descr, ignore_comp_check,
stage_size, upd_trig, mode,
override_def_exc, rack_bun_ver='', blade_bun_ver=''):
from ucsmsdk.mometa.firmware.FirmwareComputeHostPack import FirmwareComputeHostPack
from ucsmsdk.mometa.firmware.FirmwareExcludeServerComponent import FirmwareExcludeServerComponent
mo = FirmwareComputeHostPack(parent_mo_or_dn="org-root/org-{}".format(org),
ignore_comp_check=ignore_comp_check, name=name,
descr=descr, stage_size=stage_size,
rack_bundle_version=rack_bun_ver,
update_trigger=upd_trig,
policy_owner="local", mode=mode,
blade_bundle_version=blade_bun_ver,
override_default_exclusion=override_def_exc)
mo_1 = FirmwareExcludeServerComponent(parent_mo_or_dn=mo,
server_component="local-disk")
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'Host Firmware Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: Host Firmware Policy {}, {}. '.format(name,
sys.exc_info()[1]))
def configure_vlans(handle, vlan_id, vlan_name):
from ucsmsdk.mometa.fabric.FabricVlan import FabricVlan
mo = FabricVlan(parent_mo_or_dn="fabric/lan", sharing="none",
name=vlan_name,
id=vlan_id,
mcast_policy_name="", policy_owner="local",
default_net="no", pub_nw_name="",
compression_type="included")
handle.add_mo(mo)
#handle.commit()
#print(Fore.GREEN + 'VLAN {} configured'.format(vlan_id_cleaned))
try:
handle.commit()
print(Fore.GREEN + 'VLAN {} configured'.format(vlan_id))
except:
print(Fore.YELLOW + 'Error: VLAN {}, {}. '.format(vlan_id,
sys.exc_info()[1]))
def configure_mac_pools(handle, org, description, name, mac_from, mac_to):
from ucsmsdk.mometa.macpool.MacpoolPool import MacpoolPool
from ucsmsdk.mometa.macpool.MacpoolBlock import MacpoolBlock
mo = MacpoolPool(parent_mo_or_dn="org-root/org-{}".format(org),
policy_owner="local", descr=description,
assignment_order="sequential", name=name)
mo_1 = MacpoolBlock(parent_mo_or_dn=mo, to=mac_to, r_from=mac_from)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'MAC Pool {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: MAC Pool {}, {}. '.format(name,
sys.exc_info()[1]))
def configure_ip_pools(handle, org, description, name, ip_from, ip_to, ip_gw = "10.233.178.1",
assignment_ordr = "sequential", ip_subnet="255.255.255.0", dns_prim='10.46.116.13',
dns_sec="10.50.116.14"):
from ucsmsdk.mometa.ippool.IppoolPool import IppoolPool
from ucsmsdk.mometa.ippool.IppoolBlock import IppoolBlock
mo = IppoolPool(parent_mo_or_dn="org-root/org-{}".format(org), is_net_bios_enabled="disabled", name=name,
descr=description, policy_owner="local", ext_managed="internal", supports_dhcp="disabled",
assignment_order=assignment_ordr)
mo_1 = IppoolBlock(parent_mo_or_dn=mo, to=ip_to, r_from=ip_from, def_gw=ip_gw, subnet=ip_subnet, prim_dns=dns_prim,
sec_dns=dns_sec)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'IP Pool {} configured'.format(name))
except:
print(Fore.YELLOW + 'Unable to configure IP Pool {}. Does it already exist?'.format(name))
#data = handle.set_dump_xml()
#print(data)
def configure_qos_policy(handle, org, description, name, priority, burst):
from ucsmsdk.mometa.epqos.EpqosDefinition import EpqosDefinition
from ucsmsdk.mometa.epqos.EpqosEgress import EpqosEgress
mo = EpqosDefinition(parent_mo_or_dn="org-root/org-{}".format(org),
policy_owner="local", name=name, descr=description)
mo_1 = EpqosEgress(parent_mo_or_dn=mo, rate="line-rate",
host_control="none", name="", prio=priority,
burst=str(burst))
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'QoS Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: QoS Policy {}, {}. '.format(name,
sys.exc_info()[1]))
#data = handle.set_dump_xml()
#print(data)
def configure_cdp_pol(handle, org, description, name, cdp, macreg, actionon,
macsec, lldprx, lldptx):
from ucsmsdk.mometa.nwctrl.NwctrlDefinition import NwctrlDefinition
from ucsmsdk.mometa.dpsec.DpsecMac import DpsecMac
mo = NwctrlDefinition(parent_mo_or_dn="org-root/org-{}".format(org),
lldp_transmit=lldptx, name=name,
lldp_receive=lldprx,
mac_register_mode=macreg,
policy_owner="local",
cdp=cdp, uplink_fail_action=actionon, descr=description)
mo_1 = DpsecMac(parent_mo_or_dn=mo, forge=macsec, policy_owner="local", name="", descr="")
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'Network Control Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Unable to configure CDP Policy {}. Does it already exist?'.format(name))
#data = handle.set_dump_xml()
#print(data)
def configure_wwnn_pools(handle, org,
wwnn_name="AKL-WWNN-Pool",
description="Auckland WWNN pool",
assignment_order="sequential",
from_wwnn="fdf8:f53e:61e4::18",
to_wwnn="fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"):
from ucsmsdk.mometa.fcpool.FcpoolInitiators import FcpoolInitiators
from ucsmsdk.mometa.fcpool.FcpoolBlock import FcpoolBlock
mo = FcpoolInitiators(parent_mo_or_dn='org-root/org-{}'.format(org),
name=wwnn_name,
policy_owner="local",
descr=description,
assignment_order=assignment_order,
purpose="node-wwn-assignment")
mo_1 = FcpoolBlock(parent_mo_or_dn=mo,
to=to_wwnn,
r_from=from_wwnn)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'WWNN Pool {} configured'.format(wwnn_name))
except:
print(Fore.YELLOW + 'Error: WWNN Pool {}, {}. '.format(wwnn_name,
sys.exc_info()[1]))
def configure_kvm_policy(handle, org, description, name, vmedia_encrypt,
kvm_port):
from ucsmsdk.mometa.compute.ComputeKvmMgmtPolicy import ComputeKvmMgmtPolicy
mo = ComputeKvmMgmtPolicy(parent_mo_or_dn="org-root/org-{}".format(org),
descr=description, name=name,
vmedia_encryption=vmedia_encrypt)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'KVM Policy {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: KVM Policy {}, {}. '.format(name,
sys.exc_info()[1]))
def configure_wwpn_pools(handle, org, description, name, wwpn_from, wwpn_to, assignment_ordr = "sequential"):
from ucsmsdk.mometa.fcpool.FcpoolInitiators import FcpoolInitiators
from ucsmsdk.mometa.fcpool.FcpoolBlock import FcpoolBlock
mo = FcpoolInitiators(parent_mo_or_dn="org-root/org-{}".format(org), name=name, policy_owner="local",
descr=description, assignment_order=assignment_ordr, purpose="port-wwn-assignment")
mo_1 = FcpoolBlock(parent_mo_or_dn=mo, to=wwpn_to, r_from=wwpn_from)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'WWPN Pool {} configured'.format(name))
except:
print(Fore.YELLOW + 'Unable to configure WWPN Pool {}. Does it already exist?'.format(name))
#data = handle.set_dump_xml()
#print(data)
def configure_vsans(handle, name='',
vsan_id='1280',
fabric='A'):
from ucsmsdk.mometa.fabric.FabricVsan import FabricVsan
mo = FabricVsan(parent_mo_or_dn="fabric/san/{}".format(fabric),
name=str(name),
fcoe_vlan=str(vsan_id),
policy_owner="local",
fc_zone_sharing_mode="coalesce",
zoning_state="disabled",
id=str(vsan_id))
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'VSAN {} configured'.format(vsan_id))
except:
print(Fore.YELLOW + 'Error: VSAN {}, {}. '.format(vsan_id, sys.exc_info()[1]))
def configure_vhba_templates(handle, org, description, name, wwpn_pool,
vsan_name, fabric = 'A', qos_pol='VI-FC'):
from ucsmsdk.mometa.vnic.VnicSanConnTempl import VnicSanConnTempl
from ucsmsdk.mometa.vnic.VnicFcIf import VnicFcIf
mo = VnicSanConnTempl(parent_mo_or_dn="org-root/org-{}".format(org),
redundancy_pair_type="none",
name=name,
descr=description,
stats_policy_name="default",
switch_id=fabric,
pin_to_group_name="",
policy_owner="local",
peer_redundancy_templ_name="",
templ_type="updating-template",
qos_policy_name=qos_pol,
ident_pool_name=wwpn_pool,
max_data_field_size="2048")
mo_1 = VnicFcIf(parent_mo_or_dn=mo,
name=vsan_name)
handle.add_mo(mo)
try:
handle.commit()
print(Fore.GREEN + 'vHBA Template {} configured'.format(name))
except:
print(Fore.YELLOW + 'Error: vHBA Template {}, {}. '.format(name,
sys.exc_info()[1]))
def configure_vnic_templates(handle, org,
description='',
name='',
mac_pool='',
mtu='',
qos_pol='',
network_ctrl_pol='',
vlan_name='',
switch="A"):
from ucsmsdk.mometa.vnic.VnicLanConnTempl import VnicLanConnTempl
from ucsmsdk.mometa.vnic.VnicEtherIf import VnicEtherIf
mo = VnicLanConnTempl(parent_mo_or_dn="org-root/org-{}".format(org),
redundancy_pair_type="none",
name=name,
descr=description,
stats_policy_name="default",
admin_cdn_name="",
switch_id=switch,
pin_to_group_name="",
mtu=mtu, policy_owner="local",
peer_redundancy_templ_name="",
templ_type="updating-template",
qos_policy_name=qos_pol,
ident_pool_name=mac_pool,
cdn_source="vnic-name",
nw_ctrl_policy_name=network_ctrl_pol)
mo_1 = VnicEtherIf(parent_mo_or_dn=mo,
default_net="no",
name=vlan_name)
try:
handle.add_mo(mo)
handle.commit()
print(Fore.GREEN + 'vNIC Template {} configured'.format(name))
except:
handle.add_mo(mo, modify_present=True)
handle.commit()
print(Fore.YELLOW + 'Error: vNIC Template {}, {}. '.format(name,
sys.exc_info()[1]))
def configure_app_vnic_template(handle, org, desc='',
name='',
mac_pool='',
mtu="9000",
qos_pol='',
network_pol='',
vlan_name='',
fabric=''):
from ucsmsdk.mometa.vnic.VnicLanConnTempl import VnicLanConnTempl
from ucsmsdk.mometa.vnic.VnicEtherIf import VnicEtherIf
mo = VnicLanConnTempl(parent_mo_or_dn="org-root/org-{}".format(org),
redundancy_pair_type="none",
name=name,
descr=desc,
stats_policy_name="default",
admin_cdn_name="",
switch_id=fabric,
pin_to_group_name="",
mtu=mtu,
policy_owner="local",
peer_redundancy_templ_name="",
templ_type="updating-template",
qos_policy_name=qos_pol,
ident_pool_name=mac_pool,
cdn_source="vnic-name",
nw_ctrl_policy_name=network_pol)
mo_1 = VnicEtherIf(parent_mo_or_dn=mo,
default_net="no",
name=vlan_name)
try:
handle.add_mo(mo)
handle.commit()
print(Fore.GREEN + 'vNIC Template {} configured'.format(name))
except:
print(Fore.GREEN + 'vNIC Template already detected {}. Modifying'.format(name))
handle.add_mo(mo, True)
handle.commit()
def configure_san_connectivity_policy(handle, org, name, wwnn_pool_name,
adaptor_prof_name, vhba_name, vhba_tmpl_name ):
from ucsmsdk.mometa.vnic.VnicSanConnPolicy import VnicSanConnPolicy
from ucsmsdk.mometa.vnic.VnicFcNode import VnicFcNode
from ucsmsdk.mometa.vnic.VnicFc import VnicFc
from ucsmsdk.mometa.vnic.VnicFcIf import VnicFcIf
mo = VnicSanConnPolicy(parent_mo_or_dn="org-root/org-{}".format(org), name=name)
mo_1 = VnicFcNode(parent_mo_or_dn=mo, addr="pool-derived",
ident_pool_name=wwnn_pool_name)
mo_2 = VnicFc(parent_mo_or_dn=mo, adaptor_profile_name=adaptor_prof_name,
name=vhba_name, nw_templ_name=vhba_tmpl_name, order="1")
mo_2_1 = VnicFcIf(parent_mo_or_dn=mo_2, name="default")
try:
handle.add_mo(mo)
handle.commit()
print(Fore.GREEN + 'SAN Connectivity Policy {} configured'.format(
name))
except:
handle.add_mo(mo, modify_present=True)
handle.commit()
print(Fore.YELLOW + 'Error: SAN Connectivity Policy {}, {}. '.format(
name, sys.exc_info()[1]))
def configure_lan_connectivity_policy(handle, organisation = "org-root/org-AKL-VI-APP",
vnic_template_name="AKL-VI-MGMT-A",
vnic_order="1",
name="UCS_Lan",
vnic_name="vNIC1",
switch_id="A",
adapter_profile ="VMWare"
):
from | |
<filename>lightautoml/tasks/gpu/common_metric_gpu.py
"""Bunch of metrics with unified interface (GPU version)."""
from functools import partial
from typing import Optional, Callable
import dask_cudf
import cudf
import dask.array as da
import cupy as cp
from cuml.metrics import roc_auc_score
from cuml.metrics import log_loss
from cuml.metrics import accuracy_score
from cuml.metrics.regression import mean_squared_error
from cuml.metrics.regression import r2_score
from cuml.metrics.regression import mean_absolute_error
from cuml.metrics.regression import mean_squared_log_error
from dask_ml.metrics import accuracy_score as dask_accuracy_score
from dask_ml.metrics import mean_absolute_error as dask_mean_absolute_error
def log_loss_gpu(y_true, y_pred,
sample_weight = None,
eps: float = 1e-15) -> float:
res = None
if isinstance(y_true, da.Array):
#res = da.map_blocks(log_loss, y_true, y_pred,
# sample_weight=sample_weight, eps=eps,
# meta=cp.array((), dtype=cp.float32), drop_axis=1)
#res = cp.array(res.compute()).mean()
res = log_loss(y_true.compute(), y_pred.compute(),
sample_weight=sample_weight, eps=eps)
else:
res = log_loss(y_true, y_pred, sample_weight=sample_weight, eps=eps)
return res
def r2_score_gpu(y_true, y_pred) -> float:
if isinstance(y_true, da.Array):
output = da.map_blocks(r2_score, y_true, y_pred,
meta=cp.array((), dtype=cp.float32), drop_axis=1)
res = cp.array(output.compute()).mean()
else:
res = r2_score(y_true, y_pred)
return res
def roc_auc_score_gpu(y_true, y_pred, sample_weight=None) -> float:
if isinstance(y_true, da.Array):
output = da.map_blocks(roc_auc_score, y_true, y_pred,
meta=cp.array((), dtype=cp.float32), drop_axis=1)
res = cp.array(output.compute()).mean()
else:
res = roc_auc_score(y_true, y_pred)
return res
def mean_squared_error_gpu(y_true, y_pred,
sample_weight = None) -> float:
"""Computes Mean Squared Error for Multi-GPU data.
Args:
y_true: True target values.
y_pred: Predicted target values.
sample_weight: Specify weighted mean (currently not used).
Returns:
metric value.
"""
if isinstance(y_true, da.Array):
err = y_pred - y_true
err_sq = da.multiply(err, err)
mean = err_sq.mean().compute()
else:
err_sq = cp.square(y_pred - y_true)
mean = err_sq.mean()
return mean
def mean_absolute_error_gpu(y_true, y_pred,
sample_weight = None):
if isinstance(y_true, da.Array):
return dask_mean_absolute_error(y_true, y_pred, sample_weight)
else:
return mean_absolute_error(y_true, y_pred, sample_weight)
def mean_quantile_error_gpu(y_true, y_pred,
sample_weight = None,
q: float = 0.9) -> float:
"""Computes Mean Quantile Error for Multi-GPU data.
Args:
y_true: True target values.
y_pred: Predicted target values.
sample_weight: Specify weighted mean.
q: Metric coefficient.
Returns:
metric value.
"""
if isinstance(y_true, da.Array):
err = y_pred - y_true
s = da.sign(err)
err = da.where(s>0, q*err, (q-1)*err)
if sample_weight is not None:
return ((err * sample_weight).mean() / sample_weight.mean()).compute()
return err.mean().compute()
else:
err = y_pred - y_true
s = cp.sign(err)
err = cp.abs(err)
err = cp.where(s > 0, q, 1 - q) * err
if sample_weight is not None:
return (err * sample_weight).mean() / sample_weight.mean()
return err.mean()
def mean_huber_error_gpu(y_true, y_pred,
sample_weight = None,
a: float = 0.9) -> float:
"""Computes Mean Huber Error for Multi-GPU data.
Args:
y_true: True target values.
y_pred: Predicted target values.
sample_weight: Specify weighted mean.
a: Metric coefficient.
Returns:
Metric value.
"""
assert a>=0, "a cannot be negative"
if isinstance(y_true, da.Array):
err = y_pred - y_true
s = da.where(err < 0, err > -a, err < a)
abs_err = da.where(err > 0, ebestclassmulticlassrr, -err)
err = da.where(s, .5 * (err ** 2), a * abs_err - .5 * (a ** 2))
if sample_weight is not None:
return ((err * sample_weight).mean() / sample_weight.mean()).compute()
return err.mean().compute()
else:
err = y_pred - y_true
s = cp.abs(err) < a
err = cp.where(s, .5 * (err ** 2), a * cp.abs(err) - .5 * (a ** 2))
if sample_weight is not None:
return (err * sample_weight).mean() / sample_weight.mean()
return err.mean()
def mean_fair_error_gpu(y_true, y_pred,
sample_weight = None,
c: float = 0.9) -> float:
"""Computes Mean Fair Error for Multi-GPU data.
Args:
y_true: True target values.
y_pred: Predicted target values.
sample_weight: Specify weighted mean.
c: Metric coefficient.
Returns:
Metric value.
"""
if isinstance(y_true, da.Array):
err = y_pred - y_true
x = da.where(err>0, err, -err) / c
err = c ** 2 * (x - da.log(x + 1))
if sample_weight is not None:
return ((err * sample_weight).mean() / sample_weight.mean()).compute()
return err.mean().compute()
else:
x = cp.abs(y_pred - y_true) / c
err = c ** 2 * (x - cp.log(x + 1))
if sample_weight is not None:
return (err * sample_weight).mean() / sample_weight.mean()
return err.mean()
def mean_absolute_percentage_error_gpu(y_true, y_pred,
sample_weight = None) -> float:
"""Computes Mean Absolute Percentage error for Mulit-GPU data.
Args:
y_true: True target values.
y_pred: Predicted target values.
sample_weight: Specify weighted mean.
Returns:
Metric value.
"""
if isinstance(y_true, da.Array):
err = (y_true - y_pred) / y_true
err = da.where(err > 0, err, -err)
if sample_weight is not None:
return ((err * sample_weight).mean() / sample_weight.mean()).compute()
return err.mean().compute()
else:
err = (y_true - y_pred) / y_true
err = cp.abs(err)
if sample_weight is not None:
return (err * sample_weight).mean() / sample_weight.mean()
return err.mean()
def roc_auc_ovr_gpu(y_true, y_pred, sample_weight = None):
"""ROC-AUC One-Versus-Rest for Multi-GPU data.
Args:
y_true: True target values.
y_pred: Predicted target values.
sample_weight: Weights of samples.
Returns:
Metric values.
"""
if isinstance(y_true, da.Array):
res = da.map_blocks(roc_auc_ovr_gpu, y_true, y_pred, sample_weight, meta=cp.array((), dtype=cp.float32), drop_axis=1)
return cp.array(res.compute()).mean()
else:
if isinstance(y_true, (cudf.Series, cudf.DataFrame)):
y_pred = y_pred.values
y_true = y_true.values
n_classes = y_pred.shape[1]
res = 0.0
for i in range(n_classes):
res += (roc_auc_score(cp.where(y_true==i, 1, 0), y_pred[:, i]))
return res/n_classes
def rmsle_gpu(y_true, y_pred, sample_weight = None):
"""Root mean squared log error for Multi-GPU data.
Args:
y_true: True target values.
y_pred: Predicted target values.
sample_weight: Weights of samples.
Returns:
Metric values.
"""
if isinstance(y_true, da.Array):
output_errors = da.subtract(da.log1p(y_true), da.log1p(y_pred))
output_errors = da.multiply(output_errors, output_errors)
if sample_weight is not None:
output_errors = da.multiply(output_errors, sample_weight)
output_errors = da.divide(da.sum(output_errors), sample_weight.sum())
else:
output_errors = da.mean(output_errors)
return cp.sqrt(output_errors.compute())
else:
return mean_squared_log_error(y_true, y_pred, sample_weight=sample_weight, squared=False)
def auc_mu_gpu(y_true: cp.ndarray, y_pred: cp.ndarray,
sample_weight: Optional[cp.ndarray] = None,
class_weights: Optional[cp.ndarray] = None) -> float:
"""Compute multi-class metric AUC-Mu.
We assume that confusion matrix full of ones, except diagonal elements.
All diagonal elements are zeroes.
By default, for averaging between classes scores we use simple mean.
Args:
y_true: True target values.
y_pred: Predicted target values.
sample_weight: Not used.
class_weights: The between classes weight matrix. If ``None``,
the standard mean will be used. It is expected to be a lower
triangular matrix (diagonal is also full of zeroes).
In position (i, j), i > j, there is a partial positive score
between i-th and j-th classes. All elements must sum up to 1.
Returns:
Metric value.
Note:
Code was refactored from https://github.com/kleimanr/auc_mu/blob/master/auc_mu.py
"""
if isinstance(y_true, da.Array):
raise NotImplementedError
if not isinstance(y_pred, cp.ndarray):
raise TypeError('Expected y_pred to be cp.ndarray, got: {}'.format(type(y_pred)))
if not y_pred.ndim == 2:
raise ValueError('Expected array with predictions be a 2-dimentional array')
if not isinstance(y_true, cp.ndarray):
raise TypeError('Expected y_true to be cp.ndarray, got: {}'.format(type(y_true)))
if not y_true.ndim == 1:
raise ValueError('Expected array with ground truths be a 1-dimentional array')
if y_true.shape[0] != y_pred.shape[0]:
raise ValueError('Expected number of samples in y_true and y_pred be same,'
' got {} and {}, respectively'.format(y_true.shape[0], y_pred.shape[0]))
uniq_labels = cp.unique(y_true)
n_samples, n_classes = y_pred.shape
if not cp.all(uniq_labels == cp.arange(n_classes)):
raise ValueError('Expected classes encoded values 0, ..., N_classes-1')
if class_weights is None:
class_weights = cp.tri(n_classes, k=-1)
class_weights /= class_weights.sum()
if not isinstance(class_weights, cp.ndarray):
raise TypeError('Expected class_weights to be cp.ndarray, got: {}'.format(type(class_weights)))
if not class_weights.ndim == 2:
raise ValueError('Expected class_weights to be a 2-dimentional array')
if not class_weights.shape == (n_classes, n_classes):
raise ValueError('Expected class_weights size: {}, got: {}'.format((n_classes, n_classes),
class_weights.shape))
# check sum?
confusion_matrix = cp.ones((n_classes, n_classes)) - cp.eye(n_classes)
auc_full = 0.0
for class_i in range(n_classes):
preds_i = y_pred[y_true == class_i]
n_i = preds_i.shape[0]
for class_j in range(class_i):
preds_j = y_pred[y_true == class_j]
n_j = preds_j.shape[0]
n = n_i + n_j
tmp_labels = cp.zeros((n,), dtype=cp.int32)
tmp_labels[n_i:] = 1
tmp_pres = cp.vstack((preds_i, preds_j))
v = confusion_matrix[class_i, :] - confusion_matrix[class_j, :]
scores = cp.dot(tmp_pres, v)
score_ij = roc_auc_score(tmp_labels, scores)
auc_full += class_weights[class_i, class_j] * score_ij
return auc_full
# TODO: add the support for F1 score
# class F1Factory:
# """
# Wrapper for :func:`~sklearn.metrics.f1_score` function.
# """
#
# def __init__(self, average: str = 'micro'):
# """
#
# Args:
# average: Averaging type ('micro', 'macro', 'weighted').
#
# """
# self.average = average
#
# def __call__(self, y_true: cp.ndarray, y_pred: cp.ndarray,
# sample_weight: Optional[cp.ndarray] = None) -> float:
# """Compute metric.
#
# Args:
# y_true: Ground truth target values.
# y_pred: Estimated target values.
# sample_weight: Sample weights.
#
# Returns:
# F1 score of the positive class in binary classification
# or weighted average of the F1 scores of each class
# for the multiclass task.
#
# """
# return f1_score(y_true, y_pred, sample_weight=sample_weight, average=self.average)
class BestClassBinaryWrapper_gpu:
"""Metric wrapper to get best class prediction instead of probs.
There is cut-off for prediction by ``0.5``.
"""
def __init__(self, func: Callable):
"""
Args:
func: Metric function. Function format:
func(y_pred, y_true, weights, \*\*kwargs).
"""
self.func | |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains terms related classes."""
import collections
import copy
import pickle # nosec
from typing import Any, Dict, List, Optional, Tuple
from aea.crypto.ledger_apis import LedgerApis
Address = str
class RawTransaction:
"""This class represents an instance of RawTransaction."""
def __init__(
self, ledger_id: str, body: Any,
):
"""Initialise an instance of RawTransaction."""
self._ledger_id = ledger_id
self._body = body
self._check_consistency()
def _check_consistency(self) -> None:
"""Check consistency of the object."""
assert isinstance(self._ledger_id, str), "ledger_id must be str"
assert self._body is not None, "body must not be None"
@property
def ledger_id(self) -> str:
"""Get the id of the ledger on which the terms are to be settled."""
return self._ledger_id
@property
def body(self):
"""Get the body."""
return self._body
@staticmethod
def encode(
raw_transaction_protobuf_object, raw_transaction_object: "RawTransaction"
) -> None:
"""
Encode an instance of this class into the protocol buffer object.
The protocol buffer object in the raw_transaction_protobuf_object argument must be matched with the instance of this class in the 'raw_transaction_object' argument.
:param raw_transaction_protobuf_object: the protocol buffer object whose type corresponds with this class.
:param raw_transaction_object: an instance of this class to be encoded in the protocol buffer object.
:return: None
"""
raw_transaction_bytes = pickle.dumps(raw_transaction_object) # nosec
raw_transaction_protobuf_object.raw_transaction = raw_transaction_bytes
@classmethod
def decode(cls, raw_transaction_protobuf_object) -> "RawTransaction":
"""
Decode a protocol buffer object that corresponds with this class into an instance of this class.
A new instance of this class must be created that matches the protocol buffer object in the 'raw_transaction_protobuf_object' argument.
:param raw_transaction_protobuf_object: the protocol buffer object whose type corresponds with this class.
:return: A new instance of this class that matches the protocol buffer object in the 'raw_transaction_protobuf_object' argument.
"""
raw_transaction = pickle.loads( # nosec
raw_transaction_protobuf_object.raw_transaction
)
return raw_transaction
def __eq__(self, other):
return (
isinstance(other, RawTransaction)
and self.ledger_id == other.ledger_id
and self.body == other.body
)
def __str__(self):
return "RawTransaction: ledger_id={}, body={}".format(
self.ledger_id, self.body,
)
class RawMessage:
"""This class represents an instance of RawMessage."""
def __init__(
self, ledger_id: str, body: bytes, is_deprecated_mode: bool = False,
):
"""Initialise an instance of RawMessage."""
self._ledger_id = ledger_id
self._body = body
self._is_deprecated_mode = is_deprecated_mode
self._check_consistency()
def _check_consistency(self) -> None:
"""Check consistency of the object."""
assert isinstance(self._ledger_id, str), "ledger_id must be str"
assert self._body is not None, "body must not be None"
assert isinstance(
self._is_deprecated_mode, bool
), "is_deprecated_mode must be bool"
@property
def ledger_id(self) -> str:
"""Get the id of the ledger on which the terms are to be settled."""
return self._ledger_id
@property
def body(self):
"""Get the body."""
return self._body
@property
def is_deprecated_mode(self):
"""Get the is_deprecated_mode."""
return self._is_deprecated_mode
@staticmethod
def encode(raw_message_protobuf_object, raw_message_object: "RawMessage") -> None:
"""
Encode an instance of this class into the protocol buffer object.
The protocol buffer object in the raw_message_protobuf_object argument must be matched with the instance of this class in the 'raw_message_object' argument.
:param raw_message_protobuf_object: the protocol buffer object whose type corresponds with this class.
:param raw_message_object: an instance of this class to be encoded in the protocol buffer object.
:return: None
"""
raw_message_bytes = pickle.dumps(raw_message_object) # nosec
raw_message_protobuf_object.raw_message = raw_message_bytes
@classmethod
def decode(cls, raw_message_protobuf_object) -> "RawMessage":
"""
Decode a protocol buffer object that corresponds with this class into an instance of this class.
A new instance of this class must be created that matches the protocol buffer object in the 'raw_message_protobuf_object' argument.
:param raw_message_protobuf_object: the protocol buffer object whose type corresponds with this class.
:return: A new instance of this class that matches the protocol buffer object in the 'raw_message_protobuf_object' argument.
"""
raw_message = pickle.loads(raw_message_protobuf_object.raw_message) # nosec
return raw_message
def __eq__(self, other):
return (
isinstance(other, RawMessage)
and self.ledger_id == other.ledger_id
and self.body == other.body
and self.is_deprecated_mode == other.is_deprecated_mode
)
def __str__(self):
return "RawMessage: ledger_id={}, body={}, is_deprecated_mode={}".format(
self.ledger_id, self.body, self.is_deprecated_mode,
)
class SignedTransaction:
"""This class represents an instance of SignedTransaction."""
def __init__(
self, ledger_id: str, body: Any,
):
"""Initialise an instance of SignedTransaction."""
self._ledger_id = ledger_id
self._body = body
self._check_consistency()
def _check_consistency(self) -> None:
"""Check consistency of the object."""
assert isinstance(self._ledger_id, str), "ledger_id must be str"
assert self._body is not None, "body must not be None"
@property
def ledger_id(self) -> str:
"""Get the id of the ledger on which the terms are to be settled."""
return self._ledger_id
@property
def body(self):
"""Get the body."""
return self._body
@staticmethod
def encode(
signed_transaction_protobuf_object,
signed_transaction_object: "SignedTransaction",
) -> None:
"""
Encode an instance of this class into the protocol buffer object.
The protocol buffer object in the signed_transaction_protobuf_object argument must be matched with the instance of this class in the 'signed_transaction_object' argument.
:param signed_transaction_protobuf_object: the protocol buffer object whose type corresponds with this class.
:param signed_transaction_object: an instance of this class to be encoded in the protocol buffer object.
:return: None
"""
signed_transaction_bytes = pickle.dumps(signed_transaction_object) # nosec
signed_transaction_protobuf_object.signed_transaction = signed_transaction_bytes
@classmethod
def decode(cls, signed_transaction_protobuf_object) -> "SignedTransaction":
"""
Decode a protocol buffer object that corresponds with this class into an instance of this class.
A new instance of this class must be created that matches the protocol buffer object in the 'signed_transaction_protobuf_object' argument.
:param signed_transaction_protobuf_object: the protocol buffer object whose type corresponds with this class.
:return: A new instance of this class that matches the protocol buffer object in the 'signed_transaction_protobuf_object' argument.
"""
signed_transaction = pickle.loads( # nosec
signed_transaction_protobuf_object.signed_transaction
)
return signed_transaction
def __eq__(self, other):
return (
isinstance(other, SignedTransaction)
and self.ledger_id == other.ledger_id
and self.body == other.body
)
def __str__(self):
return "SignedTransaction: ledger_id={}, body={}".format(
self.ledger_id, self.body,
)
class SignedMessage:
"""This class represents an instance of RawMessage."""
def __init__(
self, ledger_id: str, body: str, is_deprecated_mode: bool = False,
):
"""Initialise an instance of SignedMessage."""
self._ledger_id = ledger_id
self._body = body
self._is_deprecated_mode = is_deprecated_mode
self._check_consistency()
def _check_consistency(self) -> None:
"""Check consistency of the object."""
assert isinstance(self._ledger_id, str), "ledger_id must be str"
assert isinstance(self._body, str), "body must be string"
assert isinstance(
self._is_deprecated_mode, bool
), "is_deprecated_mode must be bool"
@property
def ledger_id(self) -> str:
"""Get the id of the ledger on which the terms are to be settled."""
return self._ledger_id
@property
def body(self):
"""Get the body."""
return self._body
@property
def is_deprecated_mode(self):
"""Get the is_deprecated_mode."""
return self._is_deprecated_mode
@staticmethod
def encode(
signed_message_protobuf_object, signed_message_object: "SignedMessage"
) -> None:
"""
Encode an instance of this class into the protocol buffer object.
The protocol buffer object in the signed_message_protobuf_object argument must be matched with the instance of this class in the 'signed_message_object' argument.
:param signed_message_protobuf_object: the protocol buffer object whose type corresponds with this class.
:param signed_message_object: an instance of this class to be encoded in the protocol buffer object.
:return: None
"""
signed_message_bytes = pickle.dumps(signed_message_object) # nosec
signed_message_protobuf_object.signed_message = signed_message_bytes
@classmethod
def decode(cls, signed_message_protobuf_object) -> "SignedMessage":
"""
Decode a protocol buffer object that corresponds with this class into an instance of this class.
A new instance of this class must be created that matches the protocol buffer object in the 'signed_message_protobuf_object' argument.
:param signed_message_protobuf_object: the protocol buffer object whose type corresponds with this class.
:return: A new instance of this class that matches the protocol buffer object in the 'signed_message_protobuf_object' argument.
"""
signed_message = pickle.loads( # nosec
signed_message_protobuf_object.signed_message
)
return signed_message
def __eq__(self, other):
return (
isinstance(other, SignedMessage)
and self.ledger_id == other.ledger_id
and self.body == other.body
and self.is_deprecated_mode == other.is_deprecated_mode
)
def __str__(self):
return "SignedMessage: ledger_id={}, body={}, is_deprecated_mode={}".format(
self.ledger_id, self.body, self.is_deprecated_mode,
)
class State:
"""This class represents an instance of State."""
def __init__(self, ledger_id: str, body: bytes):
"""Initialise an instance of State."""
self._ledger_id = ledger_id
self._body = body
self._check_consistency()
def _check_consistency(self) -> None:
"""Check consistency of the object."""
assert isinstance(self._ledger_id, str), "ledger_id must | |
= temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-中国-金融指标-M2货币供应年率
def macro_china_m2_yearly():
"""
中国年度M2数据, 数据区间从19980201-至今
https://datacenter.jin10.com/reportType/dc_chinese_m2_money_supply_yoy
https://cdn.jin10.com/dc/reports/dc_chinese_m2_money_supply_yoy_all.js?v=1578818474
:return: pandas.Series
1998-02-01 17.4
1998-03-01 16.7
1998-04-01 15.4
1998-05-01 14.6
1998-06-01 15.5
...
2019-09-11 8.2
2019-09-13 0
2019-10-14 0
2019-10-15 8.4
2019-10-17 0
"""
t = time.time()
res = requests.get(
JS_CHINA_M2_YEARLY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["中国M2货币供应年率报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df["今值(%)"]
temp_df.name = "m2"
temp_df = temp_df[temp_df != 0]
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-中国-金融指标-上海银行业同业拆借报告
def macro_china_shibor_all():
"""
上海银行业同业拆借报告, 数据区间从20170317-至今
https://datacenter.jin10.com/reportType/dc_shibor
https://cdn.jin10.com/dc/reports/dc_shibor_all.js?v=1578755058
:return: 上海银行业同业拆借报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
params = {"_": t}
res = requests.get(
"https://cdn.jin10.com/data_center/reports/il_1.json", params=params
)
json_data = res.json()
temp_df = pd.DataFrame(json_data["values"]).T
big_df = pd.DataFrame()
temp_df.fillna(value="--", inplace=True)
big_df["O/N_定价"] = temp_df["O/N"].apply(lambda x: x[0])
big_df["O/N_涨跌幅"] = temp_df["O/N"].apply(lambda x: x[1])
big_df["1W_定价"] = temp_df["1W"].apply(lambda x: x[0])
big_df["1W_涨跌幅"] = temp_df["1W"].apply(lambda x: x[1])
big_df["2W_定价"] = temp_df["2W"].apply(lambda x: x[0])
big_df["2W_涨跌幅"] = temp_df["2W"].apply(lambda x: x[1])
big_df["1M_定价"] = temp_df["1M"].apply(lambda x: x[0])
big_df["1M_涨跌幅"] = temp_df["1M"].apply(lambda x: x[1])
big_df["3M_定价"] = temp_df["3M"].apply(lambda x: x[0])
big_df["3M_涨跌幅"] = temp_df["3M"].apply(lambda x: x[1])
big_df["6M_定价"] = temp_df["6M"].apply(lambda x: x[0])
big_df["6M_涨跌幅"] = temp_df["6M"].apply(lambda x: x[1])
big_df["9M_定价"] = temp_df["9M"].apply(lambda x: x[0])
big_df["9M_涨跌幅"] = temp_df["9M"].apply(lambda x: x[1])
big_df["1Y_定价"] = temp_df["1Y"].apply(lambda x: x[0])
big_df["1Y_涨跌幅"] = temp_df["1Y"].apply(lambda x: x[1])
# big_df["ON_定价"] = temp_df["ON"].apply(lambda x: x[0])
# big_df["ON_涨跌幅"] = temp_df["ON"].apply(lambda x: x[1])
# big_df["2M_定价"] = temp_df["2M"].apply(lambda x: x[0])
# big_df["2M_涨跌幅"] = temp_df["2M"].apply(lambda x: x[1])
big_df = big_df.apply(lambda x: x.replace("-", np.nan))
big_df = big_df.apply(lambda x: x.replace([None], np.nan))
big_df.sort_index(inplace=True)
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-中国-金融指标-人民币香港银行同业拆息
def macro_china_hk_market_info():
"""
香港同业拆借报告, 数据区间从20170320-至今
https://datacenter.jin10.com/reportType/dc_hk_market_info
https://cdn.jin10.com/dc/reports/dc_hk_market_info_all.js?v=1578755471
:return: 香港同业拆借报告-今值(%)
:rtype: pandas.Series
"""
t = time.time()
params = {"_": t}
res = requests.get(
"https://cdn.jin10.com/data_center/reports/il_2.json", params=params
)
json_data = res.json()
temp_df = pd.DataFrame(json_data["values"]).T
big_df = pd.DataFrame()
temp_df.fillna(value="--", inplace=True)
# big_df["O/N_定价"] = temp_df["O/N"].apply(lambda x: x[0])
# big_df["O/N_涨跌幅"] = temp_df["O/N"].apply(lambda x: x[1])
big_df["1W_定价"] = temp_df["1W"].apply(lambda x: x[0])
big_df["1W_涨跌幅"] = temp_df["1W"].apply(lambda x: x[1])
big_df["2W_定价"] = temp_df["2W"].apply(lambda x: x[0])
big_df["2W_涨跌幅"] = temp_df["2W"].apply(lambda x: x[1])
big_df["1M_定价"] = temp_df["1M"].apply(lambda x: x[0])
big_df["1M_涨跌幅"] = temp_df["1M"].apply(lambda x: x[1])
big_df["3M_定价"] = temp_df["3M"].apply(lambda x: x[0])
big_df["3M_涨跌幅"] = temp_df["3M"].apply(lambda x: x[1])
big_df["6M_定价"] = temp_df["6M"].apply(lambda x: x[0])
big_df["6M_涨跌幅"] = temp_df["6M"].apply(lambda x: x[1])
# big_df["9M_定价"] = temp_df["9M"].apply(lambda x: x[0])
# big_df["9M_涨跌幅"] = temp_df["9M"].apply(lambda x: x[1])
big_df["1Y_定价"] = temp_df["1Y"].apply(lambda x: x[0])
big_df["1Y_涨跌幅"] = temp_df["1Y"].apply(lambda x: x[1])
big_df["ON_定价"] = temp_df["ON"].apply(lambda x: x[0])
big_df["ON_涨跌幅"] = temp_df["ON"].apply(lambda x: x[1])
big_df["2M_定价"] = temp_df["2M"].apply(lambda x: x[0])
big_df["2M_涨跌幅"] = temp_df["2M"].apply(lambda x: x[1])
big_df = big_df.apply(lambda x: x.replace("-", np.nan))
big_df = big_df.apply(lambda x: x.replace([None], np.nan))
big_df.sort_index(inplace=True)
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-中国-其他-中国日度沿海六大电库存数据
def macro_china_daily_energy():
"""
中国日度沿海六大电库存数据, 数据区间从20160101-至今
https://datacenter.jin10.com/reportType/dc_qihuo_energy_report
https://cdn.jin10.com/dc/reports/dc_qihuo_energy_report_all.js?v=1578819100
:return: pandas.Series
沿海六大电库存 日耗 存煤可用天数
2016-01-01 1167.60 64.20 18.19
2016-01-02 1162.90 63.40 18.34
2016-01-03 1160.80 62.60 18.54
2016-01-04 1185.30 57.60 20.58
2016-01-05 1150.20 57.20 20.11
... ... ...
2019-05-17 1639.47 61.71 26.56
2019-05-21 1591.92 62.67 25.40
2019-05-22 1578.63 59.54 26.51
2019-05-24 1671.83 60.65 27.56
2019-06-21 1786.64 66.57 26.84
"""
t = time.time()
res = requests.get(
JS_CHINA_ENERGY_DAILY_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list = [item["datas"]["沿海六大电厂库存动态报告"] for item in json_data["list"]]
value_df = pd.DataFrame(value_list)
value_df.columns = json_data["kinds"]
value_df.index = pd.to_datetime(date_list)
temp_df = value_df[["沿海六大电库存", "日耗", "存煤可用天数"]]
temp_df.name = "energy"
temp_df = temp_df.astype(float)
return temp_df
# 金十数据中心-经济指标-中国-其他-中国人民币汇率中间价报告
def macro_china_rmb():
"""
中国人民币汇率中间价报告, 数据区间从20170103-至今
https://datacenter.jin10.com/reportType/dc_rmb_data
:return: pandas.Series
美元/人民币_中间价 美元/人民币_涨跌幅 ... 人民币/泰铢_定价 人民币/泰铢_涨跌幅
2018-02-06 6.3072 NaN ... 5.0191 NaN
2018-02-07 6.2882 -190.0 ... 5.0178 -13.0
2018-02-08 6.2822 -60.0 ... 5.0429 251.0
2018-02-09 6.3194 372.0 ... 5.0406 -23.0
2018-02-12 6.3001 -193.0 ... 5.0310 -96.0
... ... ... ... ...
2020-04-16 7.0714 312.0 ... 4.6260 -156.0
2020-04-17 7.0718 4.0 ... 4.6083 -177.0
2020-04-20 7.0657 -61.0 ... 4.5977 -106.0
2020-04-21 7.0752 95.0 ... 4.5929 -48.0
2020-04-22 7.0903 151.0 ... 4.5843 -86.0
"""
t = time.time()
params = {"_": t}
res = requests.get(
"https://cdn.jin10.com/data_center/reports/exchange_rate.json", params=params
)
json_data = res.json()
temp_df = pd.DataFrame(json_data["values"]).T
big_df = pd.DataFrame()
temp_df.fillna(value="--", inplace=True)
big_df["美元/人民币_中间价"] = temp_df["美元/人民币"].apply(lambda x: x[0])
big_df["美元/人民币_涨跌幅"] = temp_df["美元/人民币"].apply(lambda x: x[1])
big_df["欧元/人民币_中间价"] = temp_df["欧元/人民币"].apply(lambda x: x[0])
big_df["欧元/人民币_涨跌幅"] = temp_df["欧元/人民币"].apply(lambda x: x[1])
big_df["100日元/人民币_中间价"] = temp_df["100日元/人民币"].apply(lambda x: x[0])
big_df["100日元/人民币_涨跌幅"] = temp_df["100日元/人民币"].apply(lambda x: x[1])
big_df["港元/人民币_中间价"] = temp_df["港元/人民币"].apply(lambda x: x[0])
big_df["港元/人民币_涨跌幅"] = temp_df["港元/人民币"].apply(lambda x: x[1])
big_df["英镑/人民币_中间价"] = temp_df["英镑/人民币"].apply(lambda x: x[0])
big_df["英镑/人民币_涨跌幅"] = temp_df["英镑/人民币"].apply(lambda x: x[1])
big_df["澳元/人民币_中间价"] = temp_df["澳元/人民币"].apply(lambda x: x[0])
big_df["澳元/人民币_涨跌幅"] = temp_df["澳元/人民币"].apply(lambda x: x[1])
big_df["新西兰元/人民币_中间价"] = temp_df["新西兰元/人民币"].apply(lambda x: x[0])
big_df["新西兰元/人民币_涨跌幅"] = temp_df["新西兰元/人民币"].apply(lambda x: x[1])
big_df["新加坡元/人民币_中间价"] = temp_df["新加坡元/人民币"].apply(lambda x: x[0])
big_df["新加坡元/人民币_涨跌幅"] = temp_df["新加坡元/人民币"].apply(lambda x: x[1])
big_df["瑞郎/人民币_中间价"] = temp_df["瑞郎/人民币"].apply(lambda x: x[0])
big_df["瑞郎/人民币_涨跌幅"] = temp_df["瑞郎/人民币"].apply(lambda x: x[1])
big_df["加元/人民币_中间价"] = temp_df["加元/人民币"].apply(lambda x: x[0])
big_df["加元/人民币_涨跌幅"] = temp_df["加元/人民币"].apply(lambda x: x[1])
big_df["人民币/马来西亚林吉特_中间价"] = temp_df["人民币/马来西亚林吉特"].apply(lambda x: x[0])
big_df["人民币/马来西亚林吉特_涨跌幅"] = temp_df["人民币/马来西亚林吉特"].apply(lambda x: x[1])
big_df["人民币/俄罗斯卢布_中间价"] = temp_df["人民币/俄罗斯卢布"].apply(lambda x: x[0])
big_df["人民币/俄罗斯卢布_涨跌幅"] = temp_df["人民币/俄罗斯卢布"].apply(lambda x: x[1])
big_df["人民币/南非兰特_中间价"] = temp_df["人民币/南非兰特"].apply(lambda x: x[0])
big_df["人民币/南非兰特_涨跌幅"] = temp_df["人民币/南非兰特"].apply(lambda x: x[1])
big_df["人民币/韩元_中间价"] = temp_df["人民币/韩元"].apply(lambda x: x[0])
big_df["人民币/韩元_涨跌幅"] = temp_df["人民币/韩元"].apply(lambda x: x[1])
big_df["人民币/阿联酋迪拉姆_中间价"] = temp_df["人民币/阿联酋迪拉姆"].apply(lambda x: x[0])
big_df["人民币/阿联酋迪拉姆_涨跌幅"] = temp_df["人民币/阿联酋迪拉姆"].apply(lambda x: x[1])
big_df["人民币/沙特里亚尔_中间价"] = temp_df["人民币/沙特里亚尔"].apply(lambda x: x[0])
big_df["人民币/沙特里亚尔_涨跌幅"] = temp_df["人民币/沙特里亚尔"].apply(lambda x: x[1])
big_df["人民币/匈牙利福林_中间价"] = temp_df["人民币/匈牙利福林"].apply(lambda x: x[0])
big_df["人民币/匈牙利福林_涨跌幅"] = temp_df["人民币/匈牙利福林"].apply(lambda x: x[1])
big_df["人民币/波兰兹罗提_中间价"] = temp_df["人民币/波兰兹罗提"].apply(lambda x: x[0])
big_df["人民币/波兰兹罗提_涨跌幅"] = temp_df["人民币/波兰兹罗提"].apply(lambda x: x[1])
big_df["人民币/丹麦克朗_中间价"] = temp_df["人民币/丹麦克朗"].apply(lambda x: x[0])
big_df["人民币/丹麦克朗_涨跌幅"] = temp_df["人民币/丹麦克朗"].apply(lambda x: x[1])
big_df["人民币/瑞典克朗_中间价"] = temp_df["人民币/瑞典克朗"].apply(lambda x: x[0])
big_df["人民币/瑞典克朗_涨跌幅"] = temp_df["人民币/瑞典克朗"].apply(lambda x: x[1])
big_df["人民币/挪威克朗_中间价"] = temp_df["人民币/挪威克朗"].apply(lambda x: x[0])
big_df["人民币/挪威克朗_涨跌幅"] = temp_df["人民币/挪威克朗"].apply(lambda x: x[1])
big_df["人民币/土耳其里拉_中间价"] = temp_df["人民币/土耳其里拉"].apply(lambda x: x[0])
big_df["人民币/土耳其里拉_涨跌幅"] = temp_df["人民币/土耳其里拉"].apply(lambda x: x[1])
big_df["人民币/墨西哥比索_中间价"] = temp_df["人民币/墨西哥比索"].apply(lambda x: x[0])
big_df["人民币/墨西哥比索_涨跌幅"] = temp_df["人民币/墨西哥比索"].apply(lambda x: x[1])
big_df["人民币/泰铢_定价"] = temp_df["人民币/泰铢"].apply(lambda x: x[0])
big_df["人民币/泰铢_涨跌幅"] = temp_df["人民币/泰铢"].apply(lambda x: x[1])
big_df = big_df.apply(lambda x: x.replace("-", np.nan))
big_df = big_df.apply(lambda x: x.replace([None], np.nan))
big_df.sort_index(inplace=True)
big_df = big_df.astype("float")
return big_df
# 金十数据中心-经济指标-中国-其他-深圳融资融券报告
def macro_china_market_margin_sz():
"""
深圳融资融券报告, 数据区间从20100331-至今
https://datacenter.jin10.com/reportType/dc_market_margin_sz
:return: pandas.DataFrame
融资买入额(元) 融资余额(元) 融券卖出量(股) 融券余量(股) 融券余额(元) \
2010-03-31 684569 670796 4000 3900 70895
2010-04-08 6713260 14467758 2100 3100 56023
2010-04-09 9357095 19732998 6700 5400 108362
2010-04-12 10406563 24813027 2200 1000 8100
2010-04-15 16607172 47980287 4200 5200 97676
... ... ... ... ...
2019-12-12 25190412075 423457288662 29769255 209557883 2504593151
2019-12-13 29636811209 423422868505 32820867 206092170 2509424768
2019-12-16 39166060634 428851154451 44000215 217123568 2647520178
2019-12-17 46930557203 433966722200 40492711 220945538 2750371397
2019-12-18 41043515833 438511398249 39150376 224554586 2761303194
融资融券余额(元)
2010-03-31 741691
2010-04-08 14523781
2010-04-09 19841360
2010-04-12 24821127
2010-04-15 48077963
...
2019-12-12 425961881813
2019-12-13 425932293273
2019-12-16 431498674629
2019-12-17 436717093597
2019-12-18 441272701443
"""
t = time.time()
params = {"_": t}
res = requests.get(
"https://cdn.jin10.com/data_center/reports/fs_2.json", params=params
)
json_data = res.json()
temp_df = pd.DataFrame(json_data["values"]).T
temp_df.columns = ["融资买入额", "融资余额", "融券卖出量", "融券余量", "融券余额", "融资融券余额"]
temp_df.sort_index(inplace=True)
temp_df.index = pd.to_datetime(temp_df.index)
temp_df = temp_df.astype("float")
return temp_df
# 金十数据中心-经济指标-中国-其他-上海融资融券报告
def macro_china_market_margin_sh():
"""
上海融资融券报告, 数据区间从 20100331-至今
https://datacenter.jin10.com/reportType/dc_market_margin_sse
:return: pandas.DataFrame
"""
t = time.time()
res = requests.get(
JS_CHINA_MARKET_MARGIN_SH_URL.format(
str(int(round(t * 1000))), str(int(round(t * 1000)) + 90)
)
)
json_data = json.loads(res.text[res.text.find("{") : res.text.rfind("}") + 1])
date_list = [item["date"] for item in json_data["list"]]
value_list_1 = [item["datas"]["总量"][0] for item in json_data["list"]]
value_list_2 = [item["datas"]["总量"][1] for item in json_data["list"]]
value_list_3 = [item["datas"]["总量"][2] for item in json_data["list"]]
value_list_4 = [item["datas"]["总量"][3] for item in json_data["list"]]
value_list_5 = [item["datas"]["总量"][4] for item in json_data["list"]]
value_list_6 = [item["datas"]["总量"][5] for item in json_data["list"]]
value_df = pd.DataFrame(
[
value_list_1,
value_list_2,
value_list_3,
value_list_4,
value_list_5,
value_list_6,
]
).T
value_df.columns = [
"融资余额",
"融资买入额",
"融券余量",
"融券余额",
"融券卖出量",
"融资融券余额",
]
value_df.index = pd.to_datetime(date_list)
value_df.name = "market_margin_sh"
value_df.index = pd.to_datetime(value_df.index)
value_df = value_df.astype(float)
url = "https://datacenter-api.jin10.com/reports/list_v2"
params = {
"max_date": "",
"category": "fs",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
"cache-control": "no-cache",
"origin": "https://datacenter.jin10.com",
"pragma": "no-cache",
"referer": "https://datacenter.jin10.com/reportType/dc_market_margin_sse",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"x-app-id": "rU6QIu7JHe2gOUeR",
"x-csrf-token": "",
"x-version": "1.0.0",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36",
}
r = requests.get(url, params=params, headers=headers)
temp_df = pd.DataFrame(r.json()["data"]["values"])
temp_df.index = pd.to_datetime(temp_df.iloc[:, 0])
temp_df = temp_df.iloc[:, 1:]
temp_df.columns = [item["name"] for item in r.json()["data"]["keys"]][1:]
for_times = math.ceil(
int(str((temp_df.index[-1] - value_df.index[-1])).split(" ")[0]) / 20
)
big_df = temp_df
for i in tqdm(range(for_times)):
params = {
"max_date": temp_df.index[-1],
"category": "fs",
"attr_id": "1",
"_": str(int(round(t * 1000))),
}
r = requests.get(url, params=params, headers=headers)
temp_df = pd.DataFrame(r.json()["data"]["values"])
temp_df.index = pd.to_datetime(temp_df.iloc[:, 0])
temp_df = temp_df.iloc[:, 1:]
temp_df.columns = | |
import os
import ast
from collections import namedtuple
#from tensorflow.python import debug as tf_debug
from shutil import copy, copytree
from tqdm import trange
import csv
import random
import tensorflow as tf
import numpy as np
from data import get_dataset
from models import Generator, Discriminator, MappingNetwork
from ops import name_scope, upsample, downsample, downsample_nv
from utils import filter_vars_with_checkpoint, build_label_list_from_file
TrainHps = namedtuple("TrainingHyperparams",
["res_h", "res_w", "current_res_w", "psi_w", "batch_size", "epochs_per_res",
"optimizer", "loss_fn", "profile", "ngpus",
"learning_rate", "adam_beta1", "adam_beta2", "use_beholder",
"model_dir", "gp_fn", "lambda_gp", "ncritic", "cond_uniform_fake",
"do_pixel_norm", "start_res_h", "start_res_w", "map_cond",
"tboard_debug", "cli_debug", "cond_weight", "cond_layers",
"eager", "no_train", "lambda_drift", "conditional_type",
"do_equalized_lr", "do_minibatch_stddev", "label_file",
"steps_per_save", "save_paths", "do_traditional_input",
"do_mapping_network", "do_add_noise", "resize_method"])
TrainHps.__new__.__defaults__ = (None,) * len(TrainHps._fields)
SavePaths = namedtuple("SavePaths",
["gen_model", "dis_model", "mapping_network", "sampling_model",
"gen_optim", "dis_optim", "mn_optim", "alpha", "step"])
SavePaths.__new__.__defaults__ = (None,) * len(SavePaths._fields)
@name_scope("non_saturating_loss")
def non_saturating_loss(real_logit, fake_logit):
"""
:param real_logit: logit(s) for real images (if None just return generator loss)
:param fake_logit: logit(s) for fake images
:return: loss for discriminator and generator (unless real_logit is None)
"""
loss_generator = .5 * tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(fake_logit),
logits=fake_logit))
if real_logit is None:
return loss_generator
loss_discriminator_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(real_logit),
logits=real_logit))
loss_discriminator_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(fake_logit),
logits=fake_logit))
loss_discriminator = .5 * loss_discriminator_real + .5 * loss_discriminator_fake
return loss_discriminator, loss_generator
@name_scope("wasserstein_loss")
def wasserstein_loss(real_logit, fake_logit):
"""
:param real_logit: logit(s) for real images (if None just return generator loss)
:param fake_logit: logit(s) for fake images
:return: loss for discriminator and generator (unless real_logit is None)
"""
loss_generator = - fake_logit
if real_logit is None:
return loss_generator
loss_discriminator_real = - real_logit
loss_discriminator_fake = fake_logit
# this actually negates the need for a bias in the FC layer, it's cancelled out
loss_discriminator = loss_discriminator_real + loss_discriminator_fake
return loss_discriminator, loss_generator
@name_scope("drift_penalty")
def drift_penalty(real_logit):
return tf.square(real_logit)
@name_scope("interpolates")
def get_interpolates(real_data, fake_data, alpha_interpolates=None):
if alpha_interpolates is None:
alpha_interpolates = tf.random_uniform([real_data.get_shape().as_list()[0], 1, 1, 1], 0., 1.)
return alpha_interpolates*fake_data + (1-alpha_interpolates)*real_data
@name_scope("R1_gp")
def r1_gp(fake_image, real_image, dis_model, alpha, label_dict=None, conditional_type=None, **kwargs):
output_sum = 0
if conditional_type == "acgan":
output, class_logits = dis_model(real_image, alpha=alpha, y=None)
if class_logits is not None:
for label in label_dict.keys():
output_sum = output_sum + tf.reduce_sum(class_logits[label])
elif conditional_type == "proj":
output, _ = dis_model(real_image,
alpha=alpha,
y=tf.concat([label_dict[l] for l in label_dict.keys()], axis=-1))
else:
output, _ = dis_model(real_image,
alpha=alpha,
y=None)
# sum of outputs for each image in batch. The derivative of a output for an image from a different
# batch should be 0, so this won't scale with batch size
# todo: is the sum even necessary?
output_sum = output_sum + tf.reduce_sum(output)
grads = tf.gradients(output_sum, [real_image])[0]
# all but first axis (usually [1,2,3]) or the first axis if only that is available
axis = np.arange(1, grads.get_shape().ndims) if grads.get_shape().ndims is not 1 else None
l2_squared_grads = tf.reduce_sum(tf.square(grads), axis=axis)
penalty = l2_squared_grads * 0.5
return penalty
@name_scope("l2_gp")
def l2_gp(input, output):
if output.get_shape().ndims not in [0, 1, 2]:
raise ValueError("output should be ranks 0 to 2 (list of losses or single loss)")
grads = tf.gradients(output, [input])[0]
# all but first axis (usually [1,2,3]) or the first axis if only that is available
axis = np.arange(1, grads.get_shape().ndims) if grads.get_shape().ndims is not 1 else None
l2_grads = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=axis))
penalty = tf.square(l2_grads-1.)
return penalty
@name_scope("wgan_gp")
def wgan_gp(fake_image, real_image, dis_model, alpha, alpha_interpolates=None):
interps = get_interpolates(real_image, fake_image, alpha_interpolates)
output = tf.reduce_sum(dis_model(interps, alpha=alpha))
return l2_gp(interps, output)
@name_scope("wgan_gp_eager")
def wgan_gp_eager(fake_image, real_image, dis_model, alpha, alpha_interpolates=None):
interps = get_interpolates(real_image, fake_image, alpha_interpolates)
with tf.GradientTape() as tape:
tape.watch(interps) # interps is not trainable so not auto-watched
output = dis_model(interps, alpha=alpha)
if output.get_shape().ndims not in [0, 1, 2]:
raise ValueError("output should be ranks 0 to 2 (list of losses or single loss)")
grads = tape.gradient(output, interps)
# all but first axis (usually [1,2,3]) or the first axis if only that is available
axis = np.arange(1, grads.get_shape().ndims) if grads.get_shape().ndims is not 1 else None
l2_grads = tf.sqrt(tf.reduce_sum(tf.square(grads), axis=axis))
penalty = tf.square(l2_grads-1.)
return penalty
def build_models(hps, current_res_w, use_ema_sampling=False, num_classes=None, label_list=None): # todo: fix num_classes
mapping_network = MappingNetwork() if hps.do_mapping_network else None
gen_model = Generator(current_res_w, hps.res_w, use_pixel_norm=hps.do_pixel_norm,
start_shape=(hps.start_res_h, hps.start_res_w),
equalized_lr=hps.do_equalized_lr,
traditional_input=hps.do_traditional_input,
add_noise=hps.do_add_noise,
resize_method=hps.resize_method,
use_mapping_network=hps.do_mapping_network,
cond_layers=hps.cond_layers,
map_cond=hps.map_cond)
dis_model = Discriminator(current_res_w, equalized_lr=hps.do_equalized_lr,
do_minibatch_stddev=hps.do_minibatch_stddev,
end_shape=(hps.start_res_h, hps.start_res_w),
resize_method=hps.resize_method, cgan_nclasses=num_classes,
label_list=label_list)
if use_ema_sampling:
sampling_model = Generator(current_res_w, hps.res_w, use_pixel_norm=hps.do_pixel_norm,
start_shape=(hps.start_res_h, hps.start_res_w),
equalized_lr=hps.do_equalized_lr,
traditional_input=hps.do_traditional_input,
add_noise=hps.do_add_noise,
resize_method=hps.resize_method,
use_mapping_network=hps.do_mapping_network,
cond_layers=hps.cond_layers,
map_cond=hps.map_cond)
return gen_model, mapping_network, dis_model, sampling_model
else:
return gen_model, mapping_network, dis_model
def build_optimizers(hps):
optimizer_g = build_optimizer_from_hps(hps)
optimizer_d = build_optimizer_from_hps(hps)
optimizer_m = build_optimizer_from_hps(hps, lr_multiplier=1.)
return optimizer_g, optimizer_d, optimizer_m
def build_data_iterator(hps, files, current_res_h, current_res_w, batch_size=None, label_list=None,
num_shards=None, shard_index=None):
random.shuffle(files)
dataset = get_dataset(files, current_res_h, current_res_w, hps.epochs_per_res, batch_size,
label_list=label_list, num_shards=None, shard_index=None)
it = dataset.make_one_shot_iterator()
return it
@name_scope("optimizer")
def build_optimizer_from_hps(hps, lr_multiplier=1.):
if hps.optimizer == "adam":
return tf.train.AdamOptimizer(learning_rate=hps.learning_rate*lr_multiplier,
beta1=hps.adam_beta1,
beta2=hps.adam_beta2)
elif hps.optimizer == "gradient_descent":
return tf.train.GradientDescentOptimizer(learning_rate=hps.learning_rate*lr_multiplier)
@name_scope("generate_summary")
def generate_image_summary(images, name, step=None):
"""
:param images: images to display (batch_size, h, w, c)
:param name: name for summary
:param batch_size: if batch size in get_shape() is ambiguous, use this
:param step: step to specify for summary
:return: summary for grid of images
"""
#if images.get_shape()[0] % 4 != 0:
# raise ValueError("batch must be divisible by 4")
images = tf.pad(images, [[0, (4-images.get_shape()[0] % 4)], [0, 0], [0, 0], [0, 0]])
images = tf.clip_by_value(images, -1., 1.) # essential due to how tf.summary.image scales values
grid = tf.contrib.gan.eval.image_grid(
images,
grid_shape=[images.get_shape()[0]//4, 4],
image_shape=images.get_shape().as_list()[1:3])
if tf.executing_eagerly():
return tf.contrib.summary.image(name, grid, step=step)
else:
return tf.summary.image(name, grid)
def backup_model_for_this_phase(save_paths, writer_path):
copy(save_paths.gen_model, writer_path)
copy(save_paths.dis_model, writer_path)
copy(save_paths.sampling_model, writer_path)
if os.path.exists(save_paths.mapping_network):
copy(save_paths.mapping_network, writer_path)
copy(save_paths.alpha, os.path.join(writer_path, "alpha.txt"))
copy(save_paths.step, os.path.join(writer_path, "step.txt"))
copytree(os.path.dirname(save_paths.gen_optim),
os.path.join(writer_path, os.path.basename(os.path.dirname(save_paths.gen_optim))))
copytree(os.path.dirname(save_paths.dis_optim),
os.path.join(writer_path, os.path.basename(os.path.dirname(save_paths.dis_optim))))
if os.path.exists(save_paths.mn_optim):
copytree(os.path.dirname(save_paths.mn_optim),
os.path.join(writer_path, os.path.basename(os.path.dirname(save_paths.mn_optim))))
def save_alpha_and_step(alpha, step, save_paths):
with open(save_paths.alpha, "w") as f:
f.write(str(alpha))
with open(save_paths.step, "w") as f:
f.write(str(step))
def save_models_and_optimizers(sess, gen_model, dis_model, mapping_network, sampling_model,
optimizer_g, optimizer_d, optimizer_m, save_paths):
"""
:param sess: session if in graph mode, otherwise unused
:param alpha: float value for alpha at time of saving
:param gen_model: generator with defined variables
:param dis_model: discriminator with defined variables
:param optimizer_g: generator's optimizer
:param optimizer_d: discriminator's optimizer
:param save_paths: paths containing models, optimizers, and alpha on disk
"""
gen_model.save_weights(save_paths.gen_model, save_format='h5')
dis_model.save_weights(save_paths.dis_model, save_format='h5')
sampling_model.save_weights(save_paths.sampling_model, save_format='h5')
if mapping_network is not None:
mapping_network.save_weights(save_paths.mapping_network, save_format='h5')
if tf.executing_eagerly():
saver_d = tf.contrib.eager.Saver(var_list=optimizer_d.variables())
saver_d.save(file_prefix=save_paths.dis_optim)
saver_g = tf.contrib.eager.Saver(var_list=optimizer_g.variables())
saver_g.save(file_prefix=save_paths.gen_optim)
saver_g = tf.contrib.eager.Saver(var_list=optimizer_m.variables())
saver_g.save(file_prefix=save_paths.mn_optim)
else:
saver_d = tf.train.Saver(var_list=optimizer_d.variables())
saver_d.save(sess=sess, save_path=save_paths.dis_optim)
saver_g = tf.train.Saver(var_list=optimizer_g.variables())
saver_g.save(sess=sess, save_path=save_paths.gen_optim)
if len(optimizer_m.variables()) > 0:
saver_g = tf.train.Saver(var_list=optimizer_m.variables())
saver_g.save(sess=sess, save_path=save_paths.mn_optim)
def restore_models_and_optimizers(sess, gen_model, dis_model, mapping_network, sampling_model,
optimizer_g, optimizer_d, optimizer_m, save_paths):
"""
:param sess: session if in graph mode, otherwise unused
:param gen_model: generator with defined variables
:param dis_model: discriminator with defined variables
:param optimizer_g: generator's optimizer
:param optimizer_d: discriminator's optimizer
:param save_paths: paths containing models, optimizers, and alpha on disk
:return: read alpha value
"""
if gen_model is not None:
gen_model.load_weights(save_paths.gen_model, by_name=True)
if dis_model is not None:
dis_model.load_weights(save_paths.dis_model, by_name=True)
if mapping_network is not None:
mapping_network.load_weights(save_paths.mapping_network, by_name=True)
if sampling_model is not None:
sampling_model.load_weights(save_paths.sampling_model, by_name=True)
if optimizer_g is not None:
vars_g = filter_vars_with_checkpoint(chkpt_path=save_paths.gen_optim,
var_list=optimizer_g.variables())
if optimizer_d is not None:
vars_d = filter_vars_with_checkpoint(chkpt_path=save_paths.dis_optim,
var_list=optimizer_d.variables())
if optimizer_m is not None and \
mapping_network is not None and \
os.path.exists(os.path.dirname(save_paths.mn_optim)):
vars_mn = filter_vars_with_checkpoint(chkpt_path=save_paths.mn_optim,
var_list=optimizer_m.variables())
if tf.executing_eagerly():
if optimizer_d is not None:
saver_d = tf.contrib.eager.Saver(var_list=vars_d)
saver_d.restore(file_prefix=tf.train.latest_checkpoint(os.path.dirname(save_paths.dis_optim)))
if optimizer_g is not None:
saver_g = tf.contrib.eager.Saver(var_list=vars_g)
saver_g.restore(file_prefix=tf.train.latest_checkpoint(os.path.dirname(save_paths.gen_optim)))
if optimizer_m is not None and os.path.exists(os.path.dirname(save_paths.mn_optim)):
saver_g = tf.contrib.eager.Saver(var_list=vars_mn)
saver_g.restore(file_prefix=tf.train.latest_checkpoint(os.path.dirname(save_paths.mn_optim)))
else:
if optimizer_d is not None:
saver_d = tf.train.Saver(var_list=vars_d)
saver_d.restore(sess=sess,
save_path=tf.train.latest_checkpoint(os.path.dirname(save_paths.dis_optim)))
if optimizer_g is not None:
saver_g = tf.train.Saver(var_list=vars_g)
saver_g.restore(sess=sess,
save_path=tf.train.latest_checkpoint(os.path.dirname(save_paths.gen_optim)))
if optimizer_m is not None and \
mapping_network is not None and \
os.path.exists(os.path.dirname(save_paths.mn_optim)):
saver_g = tf.train.Saver(var_list=vars_mn)
saver_g.restore(sess=sess,
save_path=tf.train.latest_checkpoint(os.path.dirname(save_paths.mn_optim)))
def restore_alpha_and_step(save_paths):
step = None
alpha = None
if save_paths.step is not None:
with open(save_paths.step, "r") as f:
step = int(f.read())
if save_paths.alpha is not None:
with open(save_paths.alpha, "r") as f:
alpha = float(f.read())
return alpha, step
def weight_following_ema_ops(average_model, reference_model, decay=.99):
return [tf.assign(average_weight, average_weight*decay + updated_weight*(1-decay)
if updated_weight.trainable else updated_weight)
for average_weight, updated_weight in zip(average_model.weights, reference_model.weights)]
def train(hps, files):
ngpus = hps.ngpus
config = tf.ConfigProto()
if ngpus > 1:
try:
import horovod.tensorflow as hvd
config = tf.ConfigProto()
config.gpu_options.visible_device_list = str(hvd.local_rank())
except ImportError:
hvd = None
print("horovod not available, can only use 1 gpu")
ngpus = 1
# todo: organize
current_res_w = hps.current_res_w
res_multiplier = current_res_w // hps.start_res_w
current_res_h = hps.start_res_h * res_multiplier
tfrecord_input = any('.tfrecords' in fname for fname in files)
# if using tfrecord, assume dataset is duplicated across multiple resolutions
if tfrecord_input:
num_files = 0
for fname in [fname for fname in files | |
# -*- coding: utf-8 -*-
"""System operating cost plots.
This module plots figures related to the cost of operating the power system.
Plots can be broken down by cost categories, generator types etc.
@author: <NAME>
"""
import logging
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import marmot.config.mconfig as mconfig
from marmot.plottingmodules.plotutils.plot_data_helper import PlotDataHelper
from marmot.plottingmodules.plotutils.plot_exceptions import (MissingInputData, MissingZoneData)
class MPlot(PlotDataHelper):
"""production_cost MPlot class.
All the plotting modules use this same class name.
This class contains plotting methods that are grouped based on the
current module name.
The production_cost.py module contains methods that are
related related to the cost of operating the power system.
MPlot inherits from the PlotDataHelper class to assist in creating figures.
"""
def __init__(self, argument_dict: dict):
"""
Args:
argument_dict (dict): Dictionary containing all
arguments passed from MarmotPlot.
"""
# iterate over items in argument_dict and set as properties of class
# see key_list in Marmot_plot_main for list of properties
for prop in argument_dict:
self.__setattr__(prop, argument_dict[prop])
# Instantiation of MPlotHelperFunctions
super().__init__(self.Marmot_Solutions_folder, self.AGG_BY, self.ordered_gen,
self.PLEXOS_color_dict, self.Scenarios, self.ylabels,
self.xlabels, self.gen_names_dict, Region_Mapping=self.Region_Mapping)
self.logger = logging.getLogger('marmot_plot.'+__name__)
self.x = mconfig.parser("figure_size","xdimension")
self.y = mconfig.parser("figure_size","ydimension")
self.y_axes_decimalpt = mconfig.parser("axes_options","y_axes_decimalpt")
def prod_cost(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Plots total system net revenue and cost normalized by the installed capacity of the area.
Total revenue is made up of reserve and energy revenues which are displayed in a stacked
bar plot with total generation cost. Net revensue is represented by a dot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True, "generator_Total_Generation_Cost", self.Scenarios),
(True, "generator_Pool_Revenue", self.Scenarios),
(True, "generator_Reserves_Revenue", self.Scenarios),
(True, "generator_Installed_Capacity", self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Installed_Capacity = self["generator_Installed_Capacity"].get(scenario)
#Check if zone has installed generation, if not skips
try:
Total_Installed_Capacity = Total_Installed_Capacity.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No installed capacity in : {zone_input}")
continue
Total_Installed_Capacity = self.df_process_gen_inputs(Total_Installed_Capacity)
Total_Installed_Capacity.reset_index(drop=True, inplace=True)
Total_Installed_Capacity = Total_Installed_Capacity.iloc[0]
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
Total_Gen_Cost = self.df_process_gen_inputs(Total_Gen_Cost)
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)*-1
Total_Gen_Cost = Total_Gen_Cost/Total_Installed_Capacity #Change to $/MW-year
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Pool_Revenues = self["generator_Pool_Revenue"].get(scenario)
Pool_Revenues = Pool_Revenues.xs(zone_input,level=self.AGG_BY)
Pool_Revenues = self.df_process_gen_inputs(Pool_Revenues)
Pool_Revenues = Pool_Revenues.sum(axis=0)
Pool_Revenues = Pool_Revenues/Total_Installed_Capacity #Change to $/MW-year
Pool_Revenues.rename("Energy_Revenues", inplace=True)
### Might change to Net Reserve Revenue at later date
Reserve_Revenues = self["generator_Reserves_Revenue"].get(scenario)
Reserve_Revenues = Reserve_Revenues.xs(zone_input,level=self.AGG_BY)
Reserve_Revenues = self.df_process_gen_inputs(Reserve_Revenues)
Reserve_Revenues = Reserve_Revenues.sum(axis=0)
Reserve_Revenues = Reserve_Revenues/Total_Installed_Capacity #Change to $/MW-year
Reserve_Revenues.rename("Reserve_Revenues", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Pool_Revenues, Reserve_Revenues], axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost = Total_Systems_Cost.sum(axis=0)
Total_Systems_Cost = Total_Systems_Cost.rename(scenario)
total_cost_chunk.append(Total_Systems_Cost)
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=1, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out.T
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000 #Change to $/kW-year
Net_Revenue = Total_Systems_Cost_Out.sum(axis=1)
#Checks if Net_Revenue contains data, if not skips zone and does not return a plot
if Net_Revenue.empty:
out = MissingZoneData()
outputs[zone_input] = out
continue
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" ($/KW-yr)")
fig1, ax = plt.subplots(figsize=(self.x,self.y))
net_rev = plt.plot(Net_Revenue.index, Net_Revenue.values, color='black', linestyle='None', marker='o')
Total_Systems_Cost_Out.plot.bar(stacked=True, edgecolor='black', linewidth='0.1', ax=ax)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Total System Net Rev, Rev, & Cost ($/KW-yr)', color='black', rotation='vertical')
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
PlotDataHelper.set_barplot_xticklabels(tick_labels, ax=ax)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='upper center',bbox_to_anchor=(0.5,-0.15),
facecolor='inherit', frameon=True, ncol=3)
#Legend 1
leg1 = ax.legend(reversed(handles), reversed(labels), loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
#Legend 2
ax.legend(net_rev, ['Net Revenue'], loc='center left',bbox_to_anchor=(1, 0.9),
facecolor='inherit', frameon=True)
# Manually add the first legend back
ax.add_artist(leg1)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
outputs[zone_input] = {'fig': fig1, 'data_table': Data_Table_Out}
return outputs
def sys_cost(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates a stacked bar plot of Total Generation Cost and Cost of Unserved Energy.
Plot only shows totals and is NOT broken down into technology or cost type
specific values.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which to represent data to.
Defaults to None.
Returns:
dict: Dictionary containing the created plot and its data table.
"""
outputs = {}
if self.AGG_BY == 'zone':
agg = 'zone'
else:
agg = 'region'
# List of properties needed by the plot, properties are a set of tuples and contain 3 parts:
# required True/False, property name and scenarios required, scenarios must be a list.
properties = [(True,"generator_Total_Generation_Cost",self.Scenarios),
(False,f"{agg}_Cost_Unserved_Energy",self.Scenarios)]
# Runs get_formatted_data within PlotDataHelper to populate PlotDataHelper dictionary
# with all required properties, returns a 1 if required data is missing
check_input_data = self.get_formatted_data(properties)
# Checks if all data required by plot is available, if 1 in list required data is missing
if 1 in check_input_data:
return MissingInputData()
for zone_input in self.Zones:
total_cost_chunk = []
self.logger.info(f"{self.AGG_BY} = {zone_input}")
for scenario in self.Scenarios:
self.logger.info(f"Scenario = {scenario}")
Total_Systems_Cost = pd.DataFrame()
Total_Gen_Cost = self["generator_Total_Generation_Cost"].get(scenario)
try:
Total_Gen_Cost = Total_Gen_Cost.xs(zone_input,level=self.AGG_BY)
except KeyError:
self.logger.warning(f"No Generators found for : {zone_input}")
continue
Total_Gen_Cost = Total_Gen_Cost.sum(axis=0)
Total_Gen_Cost.rename("Total_Gen_Cost", inplace=True)
Cost_Unserved_Energy = self[f"{agg}_Cost_Unserved_Energy"][scenario]
if Cost_Unserved_Energy.empty:
Cost_Unserved_Energy = self["generator_Total_Generation_Cost"][scenario].copy()
Cost_Unserved_Energy.iloc[:,0] = 0
Cost_Unserved_Energy = Cost_Unserved_Energy.xs(zone_input,level=self.AGG_BY)
Cost_Unserved_Energy = Cost_Unserved_Energy.sum(axis=0)
Cost_Unserved_Energy.rename("Cost_Unserved_Energy", inplace=True)
Total_Systems_Cost = pd.concat([Total_Systems_Cost, Total_Gen_Cost, Cost_Unserved_Energy],
axis=1, sort=False)
Total_Systems_Cost.columns = Total_Systems_Cost.columns.str.replace('_',' ')
Total_Systems_Cost.rename({0:scenario}, axis='index', inplace=True)
total_cost_chunk.append(Total_Systems_Cost)
# Checks if gen_cost_out_chunks contains data, if not skips zone and does not return a plot
if not total_cost_chunk:
outputs[zone_input] = MissingZoneData()
continue
Total_Systems_Cost_Out = pd.concat(total_cost_chunk, axis=0, sort=False)
Total_Systems_Cost_Out = Total_Systems_Cost_Out/1000000 #Convert cost to millions
Total_Systems_Cost_Out.index = Total_Systems_Cost_Out.index.str.replace('_',' ')
#Checks if Total_Systems_Cost_Out contains data, if not skips zone and does not return a plot
if Total_Systems_Cost_Out.empty:
outputs[zone_input] = MissingZoneData()
continue
# Data table of values to return to main program
Data_Table_Out = Total_Systems_Cost_Out.add_suffix(" (Million $)")
fig2, ax = plt.subplots(figsize=(self.x,self.y))
Total_Systems_Cost_Out.plot.bar(stacked=True, edgecolor='black', linewidth='0.1', ax=ax)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_ylabel('Total System Cost (Million $)', color='black', rotation='vertical')
# Set x-tick labels
if len(self.custom_xticklabels) > 1:
tick_labels = self.custom_xticklabels
else:
tick_labels = Total_Systems_Cost_Out.index
PlotDataHelper.set_barplot_xticklabels(tick_labels, ax=ax)
ax.tick_params(axis='y', which='major', length=5, width=1)
ax.tick_params(axis='x', which='major', length=5, width=1)
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(lambda x, p: format(x, f',.{self.y_axes_decimalpt}f')))
ax.margins(x=0.01)
handles, labels = ax.get_legend_handles_labels()
ax.legend(reversed(handles), reversed(labels), loc='lower left',bbox_to_anchor=(1,0),
facecolor='inherit', frameon=True)
if mconfig.parser("plot_title_as_region"):
ax.set_title(zone_input)
cost_totals = Total_Systems_Cost_Out.sum(axis=1) #holds total of each bar
#inserts values into bar stacks
for patch in ax.patches:
width, height = patch.get_width(), patch.get_height()
if height<=1:
continue
x, y = patch.get_xy()
ax.text(x+width/2,
y+height/2,
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=12)
#inserts total bar value above each bar
for k, patch in enumerate(ax.patches):
height = cost_totals[k]
width = patch.get_width()
x, y = patch.get_xy()
ax.text(x+width/2,
y+height + 0.05*max(ax.get_ylim()),
'{:,.0f}'.format(height),
horizontalalignment='center',
verticalalignment='center', fontsize=15, color='red')
if k>=len(cost_totals)-1:
break
outputs[zone_input] = {'fig': fig2, 'data_table': Data_Table_Out}
return outputs
def detailed_gen_cost(self, start_date_range: str = None,
end_date_range: str = None, **_):
"""Creates stacked bar plot of total generation cost by cost type (fuel, emission, start cost etc.)
Creates a more deatiled system cost plot.
Each sceanrio is plotted as a separate bar.
Args:
start_date_range (str, optional): Defines a start date at which to represent data from.
Defaults to None.
end_date_range (str, optional): Defines a end date at which | |
routine_name: str,
routine_parameter: dict = None,
function_version: str = None,
function_async: bool = False
) -> object:
'''
Invoke given the specified routine of the cloud function.
Args:
region_id: Data center unique identifier.
namespace_name: Namespace name.
function_name: Serverless cloud function name.
routine_name: Routine name for serverless cloud functions.
routine_parameter: Routine parameters for serverless cloud functions.
function_version: Serverless cloud function version name.
function_async: Make Serverless cloud function asynchronous invoke.
Returns:
Returns the actual return value of the given routine for the
given serverless cloud function.
Raises:
ValueError: Parameter values are not as expected.
InvokeError: Invoke Cloud Function failed.
ActionError: Invoke Cloud Function error.
'''
return self._get_event_loop().run_until_complete(self.routine_invoke_async(
region_id, namespace_name, function_name, routine_name,
routine_parameter, function_version, function_async
))
async def select_function_async(self,
region_id: str,
namespace_name: str,
function_name: str,
function_version: str = None,
function_async: bool = False
):
'''
Create Python's native asynchronous abstract
function for Cloud Function.
Args:
region_id: Unique identifier of the data center campus.
namespace_name: Name of the owning namespace.
function_name: Cloud Function name.
function_version: Cloud Function version.
function_async: Make Cloud Function asynchronous invoke.
Returns:
Returns a Python native asynchronous function instance.
'''
return (lambda **function_event: self.easy_invoke_async(region_id, namespace_name,
function_name, function_event, function_version, function_async))
def select_function(self,
region_id: str,
namespace_name: str,
function_name: str,
function_version: str = None,
function_async: bool = False
):
'''
Create Python's native synchronous abstract
function for Cloud Function.
Args:
region_id: Unique identifier of the data center campus.
namespace_name: Name of the owning namespace.
function_name: Cloud Function name.
function_version: Cloud Function version.
function_async: Make Cloud Function asynchronous invoke.
Returns:
Returns a Python native synchronous function instance.
'''
return (lambda **function_event: self.easy_invoke(region_id,
namespace_name, function_name, function_event, function_version, function_async))
async def select_routine_async(self,
region_id: str,
namespace_name: str,
function_name: str,
routine_name: str,
function_version: str = None,
function_async: bool = False
):
'''
Creates a Python native asynchronous function for a
given routine given a serverless cloud function.
Args:
region_id: Data center unique identifier.
namespace_name: Namespace name.
function_name: Serverless cloud function name.
routine_name: Routine name for serverless cloud functions.
function_version: Serverless cloud function version name.
function_async: Make Serverless cloud function asynchronous invoke.
Returns:
Returns a Python native asynchronous function instance.
'''
return (lambda **routine_parameter: self.routine_invoke_async(
region_id, namespace_name, function_name, routine_name,
routine_parameter, function_version, function_async
))
def select_routine(self,
region_id: str,
namespace_name: str,
function_name: str,
routine_name: str,
function_version: str = None,
function_async: bool = False
):
'''
Creates a Python native synchronous function for a
given routine given a serverless cloud function.
Args:
region_id: Data center unique identifier.
namespace_name: Namespace name.
function_name: Serverless cloud function name.
routine_name: Routine name for serverless cloud functions.
function_version: Serverless cloud function version name.
function_async: Make Serverless cloud function asynchronous invoke.
Returns:
Returns a Python native synchronous function instance.
'''
return (lambda **routine_parameter: self.routine_invoke(
region_id, namespace_name, function_name, routine_name,
routine_parameter, function_version, function_async
))
def bind_function(self,
region_id: str,
namespace_name: str,
function_name: str,
function_version: str = None,
function_async: bool = False,
include_attributes: list = None
) -> object:
'''
Create a Python native synchronous or asynchronous function
binding for a given Cloud Function.
The bound object must be a function, a class method, or a class
instance method, otherwise the behavior is undefined.
The parameter type of the bound callable object must be JSON serializable,
or a JSON serialization related exception will be thrown.
Args:
region_id: Unique identifier of the data center campus.
namespace_name: Name of the owning namespace.
function_name: Cloud Function name.
function_version: Cloud Function version.
function_async: Make Cloud Function asynchronous invoke.
include_attributes: Need to include the property name from
the current instance. These properties will be preferentially
added to the Cloud Function invoke event. If the bound object
is not a class method or a class instance method, this parameter is ignored.
Returns:
Returns the Python decorator handler function.
Raises:
TypeError: The bound object is not supported.
'''
def decorator_handler(
bound_function: object
):
if not bound_function or not callable(bound_function):
raise TypeError('invalid binding object type')
def invoke_handler(*args, **kwargs):
function_event: dict = dict()
parameter_names: list = inspect.getfullargspec(bound_function).args
if len(parameter_names) > 0:
if parameter_names[0] == 'self' or parameter_names[0] == 'cls':
if include_attributes:
for name in include_attributes:
function_event[name] = getattr(args[0], name)
del parameter_names[0]
args = args[1 : ]
for index, value in enumerate(args):
function_event[parameter_names[index]] = value
for name in kwargs:
function_event[name] = kwargs[name]
return (self.easy_invoke_async if inspect.iscoroutinefunction(bound_function)
else self.easy_invoke)(region_id, namespace_name, function_name,
function_event, function_version, function_async)
return invoke_handler
return decorator_handler
def bind_routine(self,
region_id: str,
namespace_name: str,
function_name: str,
function_version: str = None,
function_async: bool = False,
routine_name: str = None
) -> object:
'''
Binds a given Python native synchronous or asynchronous function
to a given routine for a given serverless cloud function.
Args:
region_id: Data center unique identifier.
namespace_name: Namespace name of serverless cloud function.
function_name: Serverless cloud function name.
function_version: Serverless cloud function version name.
function_async: Make Serverless Cloud Function asynchronous invoke.
routine_name: Routine name for serverless cloud functions.
Raises:
ValueError: Parameter values are not as expected.
TypeError: The bound object is not supported.
'''
if routine_name and not isinstance(routine_name, str):
raise ValueError('<routine_name> value invalid')
def decorator_handler(
bound_function: object
) -> object:
if not bound_function or not callable(bound_function):
raise TypeError('invalid binding object type')
if not inspect.isfunction(bound_function):
raise ValueError('invalid binding object type')
method_instance: object = (self.routine_invoke_async if
inspect.iscoroutinefunction(bound_function) else self.routine_invoke)
def invoke_handler(*args, **kwargs):
routine_parameter: dict = dict()
parameter_names: list = inspect.getfullargspec(bound_function).args
for index, value in enumerate(args):
routine_parameter[parameter_names[index]] = value
for name in kwargs:
routine_parameter[name] = kwargs[name]
return method_instance(region_id, namespace_name, function_name,
routine_name if routine_name else bound_function.__name__,
routine_parameter, function_version, function_async)
return invoke_handler
return decorator_handler
def _schedule_created_callback(self,
function_schedule: FunctionSchedule
):
'''
The callback method that the timed invoke task has created.
'''
self.__schedule_invoke_count += 1
def _schedule_completed_callback(self,
function_schedule: FunctionSchedule
):
'''
A callback method that timing a invoked task that has completed
or been cancelled.
If all timed invoke tasks have completed and the instance method
run_schedule is being called, the method will attempt to stop
the event loop to release the instance method run_schedule.
'''
self.__schedule_invoke_count -= 1
if (self.__schedule_invoke_waited and
self.__schedule_invoke_count == 0
):
self._get_event_loop().stop()
def schedule_invoke(self,
region_id: str,
namespace_name: str,
function_name: str,
function_event: dict = None,
function_version: str = None,
function_async: bool = False,
invoke_timestamp: int = None,
invoked_callback: object = None
) -> FunctionSchedule:
'''
Schedule a Cloud Function to invoke at a specified time.
Args:
region_id: Unique identifier of the data center campus.
namespace_name: Name of the owning namespace.
function_name: Cloud Function name.
function_event: Cloud Function invoke event.
function_version: Cloud Function version.
function_async: Make Cloud Function asynchronous invoke.
invoke_timestamp: Specifies the native UNIX timestamp at which invoke begins.
invoked_callback: Callback function after Invoke is over.
Returns:
Returns an instance of the FunctionSchedule type representing this schedule.
Raises:
ValueError: Parameter values are not as expected.
'''
if not invoke_timestamp:
invoke_timestamp = int(time.time()) + 3
else:
if not isinstance(invoke_timestamp, int):
raise ValueError('<invoke_timestamp> value invalid')
return FunctionSchedule(
function_client = self,
invoke_context = {
'region_id': region_id,
'namespace_name': namespace_name,
'function_name': function_name,
'function_event': function_event,
'function_version': function_version,
'function_async': function_async
},
invoke_timestamp = invoke_timestamp,
callback_context = {
'created': self._schedule_created_callback,
'completed': self._schedule_completed_callback,
'invoked': invoked_callback
}
)
def run_schedule(self):
'''
Run the created scheduled invoke task.
Note that this method should ensure that it is used only
in synchronous programming mode, otherwise the behavior
is undefined.
Raises:
StatusError: There are no tasks to run or are running.
'''
if self.__schedule_invoke_count < 1:
raise errors.StatusError('no scheduled invoke tasks')
if self._get_event_loop().is_running():
raise errors.StatusError('cannot be run repeatedly')
self.__schedule_invoke_waited = True
try:
self._get_event_loop().run_forever()
finally:
self.__schedule_invoke_waited = False
__thread_local_attributes: threading.local = threading.local()
def fetch_client() -> Client:
'''
Get the built-in serverless cloud function product client
instance for the current hyperthreading. If the built-in
serverless cloud function product client instance has never
been created in the current hyper-threading, a new one
is created by default.
Returns:
Returns a serverless cloud function product client instance.
'''
global __thread_local_attributes
if not hasattr(__thread_local_attributes, 'builtin_client'):
__thread_local_attributes.builtin_client = Client()
return __thread_local_attributes.builtin_client
def set_client(
function_client: Client
):
'''
Sets the given serverless cloud function product client instance
| |
<reponame>StannisZhou/capacity_hopping<filename>golf_course/estimate/capacity.py
import multiprocessing as mp
import numpy as np
from scipy.cluster.vq import kmeans2
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
import golf_course.estimate.numba as nestimate
from golf_course.utils import uniform_on_sphere
from tqdm import tqdm
def estimate_capacity(
target,
inner,
outer,
num_points,
num_clusters,
num_trials,
time_step=1e-5,
use_parallel=True,
n_split=4,
use_analytical_gradients=True,
estimate_gradients=False,
n_surfaces_gradients_estimation=15,
):
"""
Parameters
----------
inner: int
The number of intermediate layers we are going to use for calculating the inner rate
outer: int
The number of intermediate layers we are going to use for calculating the outer rate
num_points: int
The number of points we are going to have on each layer. We are going to form clusters
based on these points.
num_clusters: int
The number of clusters we are going to have on each layer
num_trials: int
The number of trials we are going to run for each bin in order to decide the transition probabilities
time_step : float
The time step we are going to use for the simulation. Default to 1e-5
use_parallel : bool
Whether we are going to make the code parallel or not
n_split : int
The number of splits we are going to use for making things parallel. Default to 4
n_surfaces_gradients_estimation : int
The number of surfaces we are going to use for numerically estimating the gradients
analytical_gradients : bool
Whether we want to use the gradients estimated analytically
"""
if not use_analytical_gradients:
assert estimate_gradients
hitting_prob, cluster_centers, cluster_labels = estimate_hitting_prob(
target,
target.radiuses,
inner,
outer,
num_points,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
)
middle_index = outer + 1
cluster_labels = cluster_labels[middle_index]
cluster_centers = cluster_centers[middle_index]
n_points_in_clusters = np.array(
[np.sum(cluster_labels == ii) for ii in range(num_clusters)]
)
n_dim = target.center.size
dA = target.radiuses[1]
if estimate_gradients:
delta = (target.radiuses[2] - target.radiuses[1]) / (
n_surfaces_gradients_estimation + 2
)
radiuses_gradients_estimation = np.array(
[target.radiuses[1], target.radiuses[1] + delta, target.radiuses[2]]
)
hitting_prob_gradients, cluster_centers_gradients, cluster_labels_gradients = estimate_hitting_prob(
target,
radiuses_gradients_estimation,
0,
n_surfaces_gradients_estimation,
num_points,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
)
cluster_centers_gradients = cluster_centers_gradients[
n_surfaces_gradients_estimation + 1
]
_, ind = linear_sum_assignment(
cdist(cluster_centers, cluster_centers_gradients)
)
hitting_prob_gradients = hitting_prob_gradients[ind]
gradients = np.abs(hitting_prob_gradients - 1) / delta
else:
gradients = None
if use_analytical_gradients:
rAtilde = target.radiuses[2]
capacity = (
(n_dim - 2)
/ (dA ** (2 - n_dim) - rAtilde ** (2 - n_dim))
* np.sum(n_points_in_clusters * hitting_prob)
/ num_points
)
else:
capacity = (
dA ** (n_dim - 1)
* np.sum(n_points_in_clusters * hitting_prob * gradients)
/ num_points
)
capacity *= target.get_constant()
return capacity, gradients
def estimate_hitting_prob(
target,
radiuses,
inner,
outer,
num_points,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
):
cluster_centers, cluster_labels, propagated_points, statistics_from_propagation = _propagate_and_cluster(
target, radiuses, inner, outer, num_points, num_clusters, time_step
)
forward_probabilities, backward_probabilities, cluster_labels = _get_data_driven_binning_transition_probabilities(
target,
radiuses,
inner,
outer,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
)
print('Transition probabilities calculation done.')
hitting_prob = _get_data_driven_binning_hitting_probability(
forward_probabilities, backward_probabilities, inner, outer, num_clusters
)
return hitting_prob, cluster_centers, cluster_labels
def _get_data_driven_binning_transition_probabilities(
target,
radiuses,
inner,
outer,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
):
forward_probabilities = []
backward_probabilities = []
forward_probabilities, backward_probabilities = _additional_simulations_for_transition_probabilities(
target,
radiuses,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
inner,
outer,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
)
return forward_probabilities, backward_probabilities, cluster_labels
def _propagate_and_cluster(
target, radiuses, inner, outer, num_points, num_clusters, time_step
):
center = target.center
initial_locations = uniform_on_sphere(
center, radiuses[1], num_samples=num_points, reflecting_boundary_radius=1
)
num_surfaces = inner + outer + 3
middle_index = outer + 1
surfaces = _get_surfaces(radiuses, inner, outer)
assert len(surfaces) == num_surfaces, 'The generated surfaces are not right.'
# Propagate the points and gather information
propagated_points = [[] for _ in range(num_surfaces)]
propagated_points[middle_index] = initial_locations
propagated_information = []
extra_information = []
print('Doing propagation.')
# Do the initial propagation from the middle sphere
_propagate_and_get_info(
target,
surfaces,
propagated_points,
propagated_information,
extra_information,
middle_index,
num_points,
time_step,
)
# Do the forward propagation, from the middle sphere to the inner sphere
for index in range(middle_index + 1, num_surfaces - 1):
_propagate_and_get_info(
target,
surfaces,
propagated_points,
propagated_information,
extra_information,
index,
num_points,
time_step,
)
# Do the backward propagation, from the middle sphere to the outer sphere
for index in range(middle_index - 1, 0, -1):
_propagate_and_get_info(
target,
surfaces,
propagated_points,
propagated_information,
extra_information,
index,
num_points,
time_step,
)
# Do the clustering
cluster_centers = [[] for _ in range(num_surfaces)]
cluster_labels = [[] for _ in range(num_surfaces)]
print('Doing clustering.')
for ii in tqdm(range(num_surfaces)):
cluster_centers[ii], cluster_labels[ii] = kmeans2(
propagated_points[ii], num_clusters, minit='points', missing='raise'
)
# Get the statistics
print('Getting statistics.')
statistics_from_propagation = _collect_statistics(
cluster_centers,
cluster_labels,
propagated_information,
extra_information,
inner,
outer,
num_clusters,
)
return (
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
)
def _get_surfaces(radiuses, inner, outer):
inner_surfaces = np.linspace(radiuses[1], radiuses[0], inner + 2)
outer_surfaces = np.linspace(radiuses[2], radiuses[1], outer + 2)
surfaces = np.concatenate((outer_surfaces, inner_surfaces[1:]))
return surfaces
def _propagate_and_get_info(
target,
surfaces,
propagated_points,
propagated_information,
extra_information,
index,
num_points,
time_step,
):
assert (
propagated_points[index].shape[0] == num_points
), 'Number of points not right.'
boundary_radiuses = np.array([surfaces[index + 1], surfaces[index - 1]])
with tqdm() as pbar:
batch_size = 500
while True:
flag = False
random_indices = np.random.randint(0, num_points, size=(batch_size,))
initial_locations = propagated_points[index][random_indices]
ii = 0
for initial_location in initial_locations:
previous_location, current_location, target_flag = nestimate.advance_within_concentric_spheres(
initial_location, target, boundary_radiuses, time_step, 1
)
if target_flag:
indicator = 1
else:
indicator = -1
final_point = nestimate._interpolate(
previous_location,
current_location,
target.center,
surfaces[index + indicator],
)
if len(propagated_points[index + indicator]) == num_points:
extra_temp = np.concatenate(
(
np.array([index, random_indices[ii], index + indicator]),
final_point,
)
)
extra_information.append(extra_temp)
else:
propagated_points[index + indicator].append(final_point)
index_temp = len(propagated_points[index + indicator]) - 1
propagated_information.append(
np.array(
[index, random_indices[ii], index + indicator, index_temp],
dtype=int,
)
)
pbar.update()
ii += 1
if (
len(propagated_points[index + 1]) == num_points
and len(propagated_points[index - 1]) == num_points
):
propagated_points[index + 1] = np.array(
propagated_points[index + 1]
)
propagated_points[index - 1] = np.array(
propagated_points[index - 1]
)
flag = True
break
if flag:
break
def _collect_statistics(
cluster_centers,
cluster_labels,
propagated_information,
extra_information,
inner,
outer,
num_clusters,
):
num_surfaces = inner + outer + 3
statistics_from_propagation = [
[[] for _ in range(num_clusters)] for _ in range(num_surfaces)
]
_process_propagated_info(
cluster_labels, statistics_from_propagation, propagated_information
)
_process_extra_info(
cluster_centers, cluster_labels, statistics_from_propagation, extra_information
)
return statistics_from_propagation
def _process_extra_info(
cluster_centers, cluster_labels, statistics_from_propagation, extra_information
):
for info in extra_information:
centers = cluster_centers[int(info[2])]
point = info[3:]
info_temp = info[:3].astype(int)
index = _assign_clusters(point, centers)
statistics_from_propagation[info_temp[0]][
cluster_labels[info_temp[0]][info_temp[1]]
].append((info_temp[2], index))
def _assign_clusters(point, centers):
distances = np.linalg.norm(point - centers, ord=2, axis=1)
index = np.argmin(distances)
return index
def _process_propagated_info(
cluster_labels, statistics_from_propagation, propagated_information
):
for info in propagated_information:
statistics_from_propagation[info[0]][cluster_labels[info[0]][info[1]]].append(
(info[2], cluster_labels[info[2]][info[3]])
)
def _additional_simulations_for_transition_probabilities(
target,
radiuses,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
inner,
outer,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
):
surfaces = _get_surfaces(radiuses, inner, outer)
num_surfaces = len(surfaces)
if use_parallel:
manager = mp.Manager()
statistics_from_propagation = [
[manager.list(level3) for level3 in level2]
for level2 in statistics_from_propagation
]
print('Doing additional simulations.')
# Do more simulations and update statistics_from_propagation
for ii in range(1, num_surfaces - 1):
for jj in range(num_clusters):
print('Doing simulations for surface {}, cluster {}.'.format(ii, jj))
_do_additional_simulations(
target,
radiuses,
ii,
jj,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
inner,
outer,
num_trials,
time_step,
use_parallel,
n_split,
)
if use_parallel:
for ii in range(len(statistics_from_propagation)):
statistics_from_propagation[ii] = [
list(level3) for level3 in statistics_from_propagation[ii]
]
# Use statistics_from_propagation to calculate forward and backward probabilities
forward_probabilities, backward_probabilities = _process_statistics_from_propagation(
statistics_from_propagation, num_clusters
)
return forward_probabilities, backward_probabilities
def _do_additional_simulations(
target,
radiuses,
surface_index,
cluster_index,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
inner,
outer,
num_trials,
time_step,
use_parallel,
n_split,
):
surfaces = _get_surfaces(radiuses, inner, outer)
cluster_points_indices = np.flatnonzero(
cluster_labels[surface_index] == cluster_index
)
cluster_size = cluster_points_indices.size
random_indices = np.random.randint(0, cluster_size, size=(num_trials,))
initial_locations = propagated_points[surface_index][
cluster_points_indices[random_indices]
]
boundary_radiuses = np.array(
[surfaces[surface_index + 1], surfaces[surface_index - 1]]
)
if use_parallel:
n_locations = initial_locations.shape[0]
def worker(indices, q):
for index in indices:
initial_location = initial_locations[index]
previous_location, current_location, target_flag = nestimate.advance_within_concentric_spheres(
initial_location, target, boundary_radiuses, time_step, 1
)
if target_flag:
indicator = 1
else:
indicator = -1
final_point = nestimate._interpolate(
previous_location,
current_location,
target.center,
surfaces[surface_index + indicator],
)
centers = cluster_centers[surface_index + indicator]
index = _assign_clusters(final_point, centers)
statistics_from_propagation[surface_index][cluster_index].append(
(surface_index + indicator, index)
)
q.put(1)
process_list = []
q = mp.Queue()
def listener(q):
with tqdm(total=n_locations) as pbar:
for item in iter(q.get, None):
pbar.update()
listener_process = mp.Process(target=listener, args=(q,))
listener_process.start()
for indices in kfold_split(n_locations, n_split):
process = mp.Process(target=worker, args=(indices, q))
process_list.append(process)
process.start()
for process in process_list:
process.join()
q.put(None)
listener_process.join()
else:
for initial_location in tqdm(initial_locations):
previous_location, current_location, target_flag = nestimate.advance_within_concentric_spheres(
initial_location, target, boundary_radiuses, time_step, 1
)
if target_flag:
indicator = 1
else:
indicator = -1
final_point = | |
<filename>src/xlsxwriter_celldsl/ops/classes.py
import itertools
from io import BytesIO
from pathlib import Path
from typing import Any, ClassVar, Deque, Dict, Generic, List, Mapping, Optional, Tuple, TypeVar, Union
from attr import Factory, attrib, attrs, evolve
from xlsxwriter.utility import xl_range_formula
from . import traits
from ..errors import ExecutionCellDSLError
from ..formats import FormatDict, FormatsNamespace
from ..utils import WorksheetTriplet
T = TypeVar('T')
@attrs(auto_attribs=True, frozen=True, order=False)
class Command(object):
"""Base class for all commands."""
OVERWRITE_SENSITIVE: ClassVar[bool] = False
NAME_STACK_DATA: List[str] = attrib(factory=list, repr=False)
def absorb_name_stack_data(self, data: Deque):
"""Not for public use; copies `data` to `NAME_STACK_DATA`"""
if data:
return evolve(self, NAME_STACK_DATA=[*data])
return self
@attrs(auto_attribs=True, frozen=True, order=False)
class StackSaveOp(Command):
"""A command to push current location into save stack."""
@attrs(auto_attribs=True, frozen=True, order=False)
class StackLoadOp(Command):
"""A command to pop last location from save stack and jump to it."""
@attrs(auto_attribs=True, frozen=True, order=False)
class LoadOp(Command, traits.NamedPoint):
"""A command to jump to :func:`at` a save point."""
@attrs(auto_attribs=True, frozen=True, order=False)
class SaveOp(Command, traits.NamedPoint):
"""A command to save current location :func:`at` memory location."""
@attrs(auto_attribs=True, frozen=True, order=False)
class RefArrayOp(Command, traits.Range, traits.NamedPoint):
"""A forward reference to an array of cells :func:`with_name` defined using
a rectangle with :func:`top_left` and :func:`bottom_right` specified. This is only used in charts.
This is also a marker for such an array, meaning in commands that support forward references,
you can use RefArray.at('name') to use a reference, which will be replaced
with a string like ``'=SheetName!$C$1:$F$9'``."""
@attrs(auto_attribs=True, frozen=True, order=False)
class SectionBeginOp(Command):
"""A command that does nothing, but may assist in debugging and documentation
of scripts by giving providing segments in script :func:`with_name`.
During execution, if an error occurs, the surrounding names will be displayed, in order from most
recent to least recent."""
name: str = "__UNNAMED"
def with_name(self, name: str):
return evolve(self, name=name)
@attrs(auto_attribs=True, frozen=True, order=False)
class SectionEndOp(Command):
"""A command that indicates an end of the most recent `SectionBeginOp`."""
@attrs(auto_attribs=True, frozen=True, order=False)
class MoveOp(Command, traits.RelativePosition):
"""A command to move :func:`r` rows and :func:`c` columns away from current cell."""
@attrs(auto_attribs=True, frozen=True, order=False)
class AtCellOp(Command, traits.AbsolutePosition):
"""A command to go to a cell :func:`r` and :func:`c`."""
@attrs(auto_attribs=True, frozen=True, order=False)
class BacktrackCellOp(Command):
"""A command to :func:`rewind` the position back in time. 0 stays in current cell, 1 goes to previous cell..."""
n: int = 0
def rewind(self, n_cells: int):
return evolve(self, n=n_cells)
@attrs(auto_attribs=True, frozen=True, order=False)
class WriteOp(Command, traits.Data, traits.DataType, traits.Format, traits.ExecutableCommand):
"""A command to write to this cell :func:`with_data` with data :func:`with_data_type` and :func:`with_format`."""
OVERWRITE_SENSITIVE = True
def execute(self, target: WorksheetTriplet, coords: traits.Coords):
args = *coords, self.data, self.ensure_format(target.fmt)
if self.data_type is not None:
return_code = getattr(target.ws, f'write_{self.data_type}')(*args)
else:
return_code = target.ws.write(*args)
if return_code == -2:
raise ExecutionCellDSLError('Write failed because the string is longer than 32k characters')
if return_code == -3:
raise ExecutionCellDSLError('Write failed because the URL is longer than 2079 characters '
'long')
if return_code == -4:
raise ExecutionCellDSLError('Write failed because there are more than 65530 URLs in the '
'sheet')
@attrs(auto_attribs=True, frozen=True, order=False)
class MergeWriteOp(Command, traits.CardinalSize, traits.Data, traits.DataType, traits.Format, traits.ExecutableCommand):
"""
A command to merge :func:`with_size` cols starting from current col and
write :func:`with_data` with data :func:`with_data_type` and :func:`with_format` into this cell.
"""
OVERWRITE_SENSITIVE = True
def execute(self, target: WorksheetTriplet, coords: traits.Coords):
return_code = target.ws.merge_range(
*coords,
coords[0],
coords[1] + self.size,
self.data,
self.ensure_format(target.fmt)
)
if self.data_type is not None:
# In order to force a data type into a merged cell, we have to perform a second write
# as shown here: <https://xlsxwriter.readthedocs.io/example_merge_rich.html>
return_code = getattr(target.ws, f"write_{self.data_type}")(
*coords,
self.data,
self.ensure_format(target.fmt)
)
if return_code == -2:
raise ExecutionCellDSLError('Merge write failed because the string is longer than 32k '
'characters')
if return_code == -3:
raise ExecutionCellDSLError('Merge write failed because the URL is longer than 2079 '
'characters long')
if return_code == -4:
raise ExecutionCellDSLError(
'Merge write failed because there are more than 65530 URLs in the sheet')
@attrs(auto_attribs=True, frozen=True, order=False)
class WriteRichOp(Command, traits.Data, traits.Format, traits.ExecutableCommand):
"""A command to write a text run :func:`with_data` and
:func:`with_format` to current position, :func:`then` perhaps write some more,
optionally :func:`with_default_format`.
Additionally, the first `WriteRichOp` may also set this cell :func:`with_cell_format`."""
default_format: FormatDict = attrib(factory=FormatDict, converter=FormatDict)
cell_format: Optional[FormatDict] = None
prev_fragment: Optional['WriteRichOp'] = None
OVERWRITE_SENSITIVE = True
def execute(self, target: WorksheetTriplet, coords: traits.Coords):
fragments: List[WriteRichOp] = self.rich_chain
formats_and_data = [*itertools.chain.from_iterable(zip((
fragment.ensure_format(target.fmt)
for fragment in fragments
), (
fragment.data
for fragment in fragments
)))]
if self.cell_format is not None:
formats_and_data.append(target.fmt.verify_format(self.cell_format))
return_code = target.ws.write_rich_string(*coords, *formats_and_data)
if return_code == -5:
return_code = target.ws.write_string(*coords, formats_and_data[1], formats_and_data[0])
if return_code == -2:
raise ExecutionCellDSLError('Rich write failed because the string is longer than 32k '
'characters')
if return_code == -4:
raise ExecutionCellDSLError('Rich write failed because of an empty string')
def then(self, fragment: 'WriteRichOp'):
"""Submit additional fragments of the rich string"""
if isinstance(fragment, WriteRichOp):
return evolve(
fragment,
set_format=fragment.set_format or self.default_format,
default_format=self.default_format,
prev_fragment=self
)
else:
raise TypeError(fragment)
def with_default_format(self, other):
"""Set format for fragments without a format. Should be applied to the first fragment"""
return evolve(
self,
set_format=self.set_format or other,
default_format=other
)
def with_cell_format(self, format: Optional[FormatDict]):
"""If `format` is not None, then set the cell format to be set to the cell upon writing.
This method does not implicitly merge with DEFAULT_FORMAT."""
return evolve(
self,
cell_format=format,
)
@property
def rich_chain(self):
"""Not for public use; the flattened chain of segments"""
chain = self
result = []
while chain.prev_fragment:
result.append(chain)
chain = chain.prev_fragment
result.append(chain)
result.reverse()
return result
@property
def format_(self):
"""Not for public use; the format to be applied"""
return self.set_format or self.default_format or self.FALLBACK_FORMAT
@attrs(auto_attribs=True, frozen=True, order=False)
class ImposeFormatOp(Command, traits.Format):
"""A command to append to merge current cell's format :func:`with_format`."""
set_format: FormatDict = attrib(default=FormatsNamespace.base, converter=FormatDict)
@attrs(auto_attribs=True, frozen=True, order=False)
class OverrideFormatOp(Command, traits.Format):
"""A command to override current cell's format :func:`with_format`."""
@attrs(auto_attribs=True, frozen=True, order=False)
class DrawBoxBorderOp(Command, traits.Range):
"""Draw a box with borders where `top_left_point` and `bottom_right_point` are respective corners using
`(right|top|left|bottom)_formats`."""
right_format: Mapping = FormatsNamespace.right_border
top_format: Mapping = FormatsNamespace.top_border
left_format: Mapping = FormatsNamespace.left_border
bottom_format: Mapping = FormatsNamespace.bottom_border
def with_right_format(self, format_: Mapping):
return evolve(self, right_format=format_)
def with_top_format(self, format_: Mapping):
return evolve(self, top_format=format_)
def with_left_format(self, format_: Mapping):
return evolve(self, left_format=format_)
def with_bottom_format(self, format_: Mapping):
return evolve(self, bottom_format=format_)
@attrs(auto_attribs=True, frozen=True, order=False)
class DefineNamedRangeOp(Command, traits.Range, traits.ExecutableCommand):
"""A command to make a box :func:`with_top_left` and :func:`with_bottom_right` which are
respective corners of a range :func:`with_name`."""
name: str = "__DEFAULT"
def with_name(self, name: str):
return evolve(self, name=name)
def execute(self, target: WorksheetTriplet, coords: traits.Coords):
name = xl_range_formula(
target.ws.name,
*self.top_left_point,
*self.bottom_right_point
)
target.wb.define_name(self.name, f'={name}')
@attrs(auto_attribs=True, frozen=True, order=False)
class SetRowHeightOp(Command, traits.FractionalSize, traits.ExecutableCommand):
"""A command to set current row's height :func:`with_size`."""
def execute(self, target: WorksheetTriplet, coords: traits.Coords):
target.ws.set_row(row=coords[0], height=self.size)
@attrs(auto_attribs=True, frozen=True, order=False)
class SetColumnWidthOp(Command, traits.FractionalSize, traits.ExecutableCommand):
"""A command to set current column's height with :func:`with_size`."""
def execute(self, target: WorksheetTriplet, coords: traits.Coords):
target.ws.set_column(coords[1], coords[1], self.size)
@attrs(auto_attribs=True, frozen=True, order=False)
class SubmitHPagebreakOp(Command):
"""A command to submit a horizontal page break at current row.
This is preserved between several cell_dsl_context."""
@attrs(auto_attribs=True, frozen=True, order=False)
class SubmitVPagebreakOp(Command):
"""A command to submit a vertical page break at current row.
This is preserved between several cell_dsl_context."""
@attrs(auto_attribs=True, frozen=True, order=False)
class ApplyPagebreaksOp(Command):
"""A command to apply all existing pagebreaks.
Should come after all `SubmitHPagebreakOp` and `SubmitVPagebreakOp` have been committed."""
@attrs(auto_attribs=True, frozen=True, order=False)
class AddCommentOp(Command, traits.Data, traits.Options, traits.ExecutableCommand):
"""A command to add a comment to this cell :func:`with_data`, configured :func:`with_options`."""
def execute(self, target: WorksheetTriplet, coords: traits.Coords):
target.ws.write_comment(*coords, self.data, self.options)
class _ChartHelper:
def __getattr__(self, item):
def recorder(*args, **kwargs):
return item, args, kwargs
return recorder
_CHART_FUNC_EXCEPTIONS = {
'set_style',
'show_blanks_as',
'show_hidden_data',
'combine',
}
@attrs(auto_attribs=True, frozen=True, order=False)
class AddChartOp(Command, traits.ExecutableCommand, traits.ForwardRef, Generic[T]):
"""
A command to add a chart to this cell perhaps :func:`with_subtype` and then :func:`do`
call some methods on the associated `target` class.
"""
type: str = 'bar'
subtype: Optional[str] = None
# It's typed T to trick PyCharm or other similar systems
# to autocomplete using methods of the associated
# class. It is also appropriate since
# ChartHelper works kinda like a mock of the associated
# class.
target: T = Factory(_ChartHelper)
action_chain: List[Tuple[str, Tuple[Any, ...], Dict[str, Any]]] = Factory(list)
def with_subtype(self, subtype):
return evolve(self, subtype=subtype)
def do(self, command_list):
"""
Add `command_list` to `action_chain`
Example:
>>> from xlsxwriter_celldsl.ops import AddLineChart, AddBarChart, RefArray
... AddLineChart.do([
... # You really should only use `target` attribute of this class
... AddLineChart.target.add_series({'values': '=SheetName!$A$1:$D$1'}),
... # Charts allow to use `RefArray` in place of literal cell ranges
... AddLineChart.target.add_series({'values': RefArray.at('some ref')}),
... # Combine method accepts AddChartOp
... AddLineChart.target.combine(
... # This will combine this line chart with | |
"""
Module to execute the simulation for a given instance.
"""
""" import packages """
import logging
from importlib import import_module
import numpy.random as rdm
import copy
import numpy as np
""" import project configurations """
import configurations.settings_simulation as config
""" import project libraries """
import modules.data.datamgm as dtm
from modules.simulation.entities import Tram, Stop, Passengers, CargoRequest, write_entities_log, init_entities_log
# Global logger
logger = dtm.initialise_logger(__name__)
"""
GLOBAL VARIABLES
----------------
- These variables must be resetted after every simulation run
"""
#: now Simulation Clock
now = -1
#: last_now Last event
last_now = 0
#:event_queue Event queue
event_queue = []
#:trams List of running trams
trams = []
#:stops List of stops
stops = []
#:cargo List of cargo
cargo = []
#:updates List of updates
updates = set()
#:numEvents Number of total events
numEvents = 0
def reset_variables():
"""
Function to reset all global variables
"""
global now, last_now, numEvents, trams, stops, event_queue, cargo, updates
now = -1
last_now = 0
numEvents = 0
if trams:
trams[0].reset()
trams.clear()
for stop in stops:
stop.reset()
stops.clear()
event_queue.clear()
Passengers.reset()
if cargo:
cargo[0].reset()
cargo.clear()
updates.clear()
"""
SIMULATION LOGGING
------------------
- Simluation log (Text File): Includes all information about the events in the simulation
- Entities Log (csv file): Includes the relevant data information of single entities
"""
# "Simulation Log": What does in a single simulation run happen? (Descriptive)
sim_log = logging.getLogger("simulation")
# "Entities Log": How do the variables change during one simulation run?
ent_log = logging.getLogger("entities")
"""
SIMULATION METHODS
------------------
"""
def run(instance, passengerData, seed=False, index_child_seed=False):
"""
Run the simulation
:param instance: Path to the instance file
:param passengerData: Path to the passenger data file
:param seed: Seed to replicate the simulation
:param index_child_see: Index of the child of the global seedsequence
"""
# Used global variables
global inst, now, last_now, event_queue, numEvents
""" Initialise random generator """
# Check seed for random generator
if seed:
# seed sequence
entropy = seed.entropy
else:
seed = rdm.SeedSequence()
entropy = seed.entropy
# Import instance (from .py-file)
inst = dtm.import_instance(instance)
# Initialize the simulation
passenger = initialize(seed, passengerData)
# Run the simulation
running = True
while running:
# sort the upcoming events according to the time they occur
event_queue = sorted(event_queue,key = lambda i: i['time'])
if event_queue:
if event_queue[0]['time'] != now:
if now >= 0:
status(now)
for entity in updates:
if entity == "passenger":
entity = passenger
entity.last_event = now
write_entities_log(entity,now)
updates.clear()
last_now = now
now = event_queue[0]['time']
sim_log.info("\n-----------------------------------------------------------------------------------")
sim_log.info(f"Events at {now}:")
sim_log.info("***")
next_event()
numEvents+= 1
event_queue.pop(0)
# No more events
else:
last_time_period(inst.numPeriods-1,passenger)
running = False
# Save values for replicability
sim_log.info(f"\nentropy:\n{entropy}\n")
sim_log.info(f"index_child_seed:\n{entropy}\n")
# Reset after simulation run
reset_variables()
# Initialisation
def initialize(seed, passengerData):
"""
This function initialises the simulation run, i.e., creates the needed variables and adds the first events to the event log.
:param seed: Seed for replicability
:type seed: int
:param passengerData: Path to passenger data file
:type passengerData: string or path
:return: Global passenger object to track number of passengers
:rtype: Passengers object
"""
global event_queue
sim_log.info("Initialisation...\n--------------------------------------")
# Create child seedsequence per entity
seeds = seed.spawn(10)
# Entities Log
init_entities_log()
# initialize stops
for s in range(inst.numStops):
#sim_log.info("Creating Stop {}.".format(s))
distance_to = {"Stop": inst.stops_distance[s],"Customer": [0]}
distance_from = {"Stop": [inst.stops_distance[j][s] for j in range(inst.numStops)], "Customer": [0]}
if s == 0:
stops.append(Stop(distance_to,distance_from,True))
else:
stops.append(Stop(distance_to,distance_from))
pas = dtm.import_instance(passengerData)
""" Initialize passengers """
passenger_seeds = seeds[0].spawn(6)
if config.random_passenger_arrival:
arriving = pas.arriving_intensity
config.random_passenger_arrival = passenger_seeds[0]
else:
arriving = pas.passenger_arriving
# instantiate passenger arrivals
nonzero = np.nonzero(arriving)
for i in range(len(nonzero[0])):
p = nonzero[0][i]
s = nonzero[1][i]
create_event(p, 6, [s])
if config.random_passenger_boarding:
config.random_passenger_boarding = passenger_seeds[1]
if config.random_passenger_alighting:
config.random_passenger_boarding = passenger_seeds[2]
if config.random_passenger_changing:
config.random_passenger_changing = passenger_seeds[3]
if config.random_boarding_time:
config.random_boarding_time = passenger_seeds[4]
if config.random_alighting_time:
config.random_alighting_time = passenger_seeds[5]
""" Global passenger variables """
passenger = Passengers(
# passenger arrival
random_arrival = config.random_passenger_arrival,
arriving_passengers = arriving,
arriving_passengers_cum = pas.passenger_arriving_acc,
# passenger boarding
random_boarding = config.random_passenger_boarding,
boarding_rate = [1 for tram in range(inst.numTrams)],
# passenger alighting
random_alighting = config.random_passenger_alighting,
alighting_rate = pas.passenger_allighting_rate,
# passenger changing
random_changing = config.random_passenger_changing,
changing_rate = [0 for tram in range(inst.numStops)],
# time
random_boarding_time = config.random_boarding_time,
random_alighting_time = config.random_alighting_time,
service_time = inst.passenger_service_time_board,
service_time_alight = inst.passenger_service_time_alight,
)
# Initialize the starting times of each tram
tram_seeds = seeds[1].spawn(inst.numTrams)
for t in range(inst.numTrams):
sim_log.info(f"Tram {t} will start at {inst.tram_time_arrival[t][0]}.")
Tram.numTotal += 1
create_event(inst.tram_time_arrival[t][0],1,[t,tram_seeds[t]])
# Initialize the cargo release
cargo_seeds = seeds[2].spawn(inst.numCargo)
for c in range(inst.numCargo):
sim_log.info(f"Cargo request {c} will start at {inst.cargo_release[c]}.")
create_event(inst.cargo_release[c],5,[c,cargo_seeds[c]])
# sort the event queue according to the time
event_queue = sorted(event_queue,key = lambda i: i['time'])
sim_log.info("\n-----------------------------------------------------------------------------------\n")
return passenger
def last_time_period(time,passenger):
"""
Write the log for the last period of the simulation
:param time: last period
:type time: float
:param passenger: passenger object
:type passenger: Passengers object
"""
status(time)
for t in trams:
write_entities_log(t,time)
for s in stops:
write_entities_log(s,time)
write_entities_log(passenger,time)
for c in cargo:
c.estimate_delay(time)
write_entities_log(c,time)
def status(time):
"""
Add the status of all entities to the simulation log
:param time: Time of update
:type time: float
"""
global updates
sim_log.info("\n*~* Status *~*")
for t in trams:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers, "delay": t.delay} )
for t in stops:
t.info()
if len(t.sequences) < t.stopped:
t.sequences.append( {"time": time, "cargo": t.cargosize, "passengers": t.passengers} )
CargoRequest.info()
Passengers.info()
"""
METHODS FOR HANDLING EVENTS
---------------------------
"""
def create_event(t,event_id,par):
"""
Creating a new event given an event id and a list of parameters (if the event is within the time horizon)
:param t: time
:type t: float
:param event_id: event id
:type event_id: int
:param par: event parameters
:type par: list
"""
if np.ceil(t) < inst.numPeriods:
event_queue.append({"time": t, "id":event_id,"par":par})
def next_event():
"""
Execute the next event in the event queue
"""
# Choose the next event
event = event_queue[0]
# Extract event id and parameters
event_id = event["id"]
par = event["par"]
# Event-id: 1
# Description: Starting a new tram
if event_id == 1:
starting_tram(par[0],seed=par[1])
# Event-id: 2
# Description: Tram reaches stop (but does not enter yet)
if event_id == 2:
tram_reaches_stop(par[0])
# Event-id: 3
# Description: Tram enters stop
if event_id == 3:
tram_entering_stop(par[0])
# Event-id: 4
# Description: Tram leaves stop (and next tram can enter this stop)
if event_id == 4:
tram_leaves_stop(par[0])
# Event-id: 5
# Description: Cargo is released
if event_id == 5:
starting_cargo(par[0], seed=par[1])
# Event-id 6:
# Description: Update passengers
if event_id == 6:
passenger_update(par[0])
"""
EVENT METHODS
-----------------------------------
"""
def starting_tram(index,seed):
"""
Event no. 1: Starting a tram
:param index: Index of the tram
:type index: int
:param seed: Seed for replicability
:type seed: int
"""
global now, updates
tram_id = len(trams)
if config.random_travel_time:
config.random_travel_time = seed
# debugging
#logger.debug(f"tram_travel_deviation: {config.tram_travel_deviation}")
# if passengers and cargo share vehicles
if inst.scheme == "SV":
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity-inst.tram_capacity_min_cargo,
capacity_cargo = inst.tram_capacity-inst.tram_capacity_min_passenger,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
# if passengers and cargo have dedicated vehicles
elif inst.scheme == "SI":
if index in inst.cargo_tram_assignment:
# cargo tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = 0,
capacity_cargo = inst.tram_capacity_cargo,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time = config.random_travel_time,
travel_deviation = config.tram_travel_deviation,
max_service = inst.tram_max_service
)
)
else:
# passenger tram
trams.append(Tram(
tour = inst.tram_tour[index],
capacity_passenger = inst.tram_capacity,
capacity_cargo = 0,
capacity_total = inst.tram_capacity,
schedule_arrival = inst.tram_time_arrival[index],
schedule_departure = inst.tram_time_departure[index],
speed = inst.tram_speed,
# Simulation deterministic by default
random_travel_time | |
actual_mem = self._get_mem_usage_values(reset=True)
status_format = " | ".join(["%%%ss" % len(header) for header in self._status_headers])
print(status_format % (
self._num_queries_finished.value,
self._num_queries_started.value - self._num_queries_finished.value,
self._num_queries_exceeded_mem_limit.value,
self._num_queries_timedout.value - self._num_queries_cancelled.value,
self._num_queries_cancelled.value,
self._num_other_errors.value,
self._mem_mb_needed_for_next_query.value,
self._mem_broker.total_mem_mb - self._mem_broker.available_mem_mb,
"" if reported_mem == -1 else reported_mem,
"" if actual_mem == -1 else actual_mem))
def _update_from_query_report(self, report):
LOG.debug("Updating runtime stats")
increment(self._num_queries_finished)
if report.mem_limit_exceeded:
increment(self._num_queries_exceeded_mem_limit)
if report.was_cancelled:
increment(self._num_queries_cancelled)
if report.timed_out:
increment(self._num_queries_timedout)
class QueryTimeout(Exception):
pass
class Query(object):
"""Contains a SQL statement along with expected runtime information."""
def __init__(self):
self.name = None
self.sql = None
self.db_name = None
self.result_hash = None
self.required_mem_mb_with_spilling = None
self.required_mem_mb_without_spilling = None
self.solo_runtime_secs_with_spilling = None
self.solo_runtime_secs_without_spilling = None
def __repr__(self):
return dedent("""
<Query
Mem: %(required_mem_mb_with_spilling)s
Mem no-spilling: %(required_mem_mb_without_spilling)s
Solo Runtime: %(solo_runtime_secs_with_spilling)s
Solo Runtime no-spilling: %(solo_runtime_secs_without_spilling)s
DB: %(db_name)s
SQL: %(sql)s>""".strip() % self.__dict__)
class QueryRunner(object):
"""Encapsulates functionality to run a query and provide a runtime report."""
SPILLED_PATTERN = re.compile("ExecOption:.*Spilled")
BATCH_SIZE = 1024
def __init__(self):
self.impalad = None
self.impalad_conn = None
self.use_kerberos = False
self.result_hash_log_dir = gettempdir()
def connect(self):
self.impalad_conn = self.impalad.impala.connect(impalad=self.impalad)
def disconnect(self):
if self.impalad_conn:
self.impalad_conn.close()
self.impalad_conn = None
def run_query(self, query, timeout_secs, mem_limit_mb):
"""Run a query and return an execution report."""
if not self.impalad_conn:
raise Exception("connect() must first be called")
timeout_unix_time = time() + timeout_secs
report = QueryReport()
try:
with self.impalad_conn.cursor() as cursor:
start_time = time()
cursor.execute("SET ABORT_ON_ERROR=1")
LOG.debug("Setting mem limit to %s MB", mem_limit_mb)
cursor.execute("SET MEM_LIMIT=%sM" % mem_limit_mb)
if query.db_name:
LOG.debug("Using %s database", query.db_name)
cursor.execute("USE %s" % query.db_name)
LOG.debug("Running query with %s MB mem limit at %s with timeout secs %s:\n%s",
mem_limit_mb, self.impalad.host_name, timeout_secs, query.sql)
error = None
try:
cursor.execute_async("/* Mem: %s MB. Coordinator: %s. */\n"
% (mem_limit_mb, self.impalad.host_name) + query.sql)
LOG.debug("Query id is %s",
op_handle_to_query_id(cursor._last_operation_handle))
sleep_secs = 0.1
secs_since_log = 0
while cursor.is_executing():
if time() > timeout_unix_time:
self._cancel(cursor, report)
return report
if secs_since_log > 5:
secs_since_log = 0
LOG.debug("Waiting for query to execute")
sleep(sleep_secs)
secs_since_log += sleep_secs
try:
report.result_hash = self._hash_result(cursor, timeout_unix_time, query)
except QueryTimeout:
self._cancel(cursor, report)
return report
except Exception as error:
LOG.debug("Error running query with id %s: %s",
op_handle_to_query_id(cursor._last_operation_handle), error)
self._check_for_mem_limit_exceeded(report, cursor, error)
if report.non_mem_limit_error or report.mem_limit_exceeded:
return report
report.runtime_secs = time() - start_time
report.profile = cursor.get_profile()
report.mem_was_spilled = \
QueryRunner.SPILLED_PATTERN.search(report.profile) is not None
except Exception as error:
# A mem limit error would have been caught above, no need to check for that here.
report.non_mem_limit_error = error
return report
def _cancel(self, cursor, report):
report.timed_out = True
# Copy the operation handle in case another thread causes the handle to be reset.
operation_handle = cursor._last_operation_handle
if not operation_handle:
return
query_id = op_handle_to_query_id(operation_handle)
try:
LOG.debug("Attempting cancellation of query with id %s", query_id)
cursor.cancel_operation()
LOG.debug("Sent cancellation request for query with id %s", query_id)
except Exception as e:
LOG.debug("Error cancelling query with id %s: %s", query_id, e)
try:
LOG.debug("Attempting to cancel query through the web server.")
self.impalad.cancel_query(query_id)
except Exception as e:
LOG.debug("Error cancelling query %s through the web server: %s", query_id, e)
def _check_for_mem_limit_exceeded(self, report, cursor, caught_exception):
"""To be called after a query failure to check for signs of failed due to a
mem limit. The report will be updated accordingly.
"""
if cursor._last_operation_handle:
try:
report.profile = cursor.get_profile()
except Exception as e:
LOG.debug("Error getting profile for query with id %s: %s",
op_handle_to_query_id(cursor._last_operation_handle), e)
caught_msg = str(caught_exception).lower().strip()
# Exceeding a mem limit may result in the message "cancelled".
# https://issues.cloudera.org/browse/IMPALA-2234
if "memory limit exceeded" in caught_msg or caught_msg == "cancelled":
report.mem_limit_exceeded = True
return
# If the mem limit is very low and abort_on_error is enabled, the message from
# exceeding the mem_limit could be something like:
# Metadata states that in group hdfs://<node>:8020<path> there are <X> rows,
# but only <Y> rows were read.
if "metadata states that in group" in caught_msg \
and "rows were read" in caught_msg:
report.mem_limit_exceeded = True
return
LOG.error("Non-mem limit error for query with id %s: %s",
op_handle_to_query_id(cursor._last_operation_handle), caught_exception,
exc_info=True)
report.non_mem_limit_error = caught_exception
def _hash_result(self, cursor, timeout_unix_time, query):
"""Returns a hash that is independent of row order. 'query' is only used for debug
logging purposes (if the result is not as expected a log file will be left for
investigations).
"""
query_id = op_handle_to_query_id(cursor._last_operation_handle)
# A value of 1 indicates that the hash thread should continue to work.
should_continue = Value("i", 1)
def hash_result_impl():
result_log = None
try:
file_name = query_id.replace(":", "_")
if query.result_hash is None:
file_name += "_initial"
file_name += "_results.txt"
result_log = open(os.path.join(self.result_hash_log_dir, file_name), "w")
result_log.write(query.sql)
result_log.write("\n")
current_thread().result = 1
while should_continue.value:
LOG.debug("Fetching result for query with id %s",
op_handle_to_query_id(cursor._last_operation_handle))
rows = cursor.fetchmany(self.BATCH_SIZE)
if not rows:
LOG.debug("No more results for query with id %s",
op_handle_to_query_id(cursor._last_operation_handle))
return
for row in rows:
for idx, val in enumerate(row):
if val is None:
# The hash() of None can change from run to run since it's based on
# a memory address. A chosen value will be used instead.
val = 38463209
elif isinstance(val, float):
# Floats returned by Impala may not be deterministic, the ending
# insignificant digits may differ. Only the first 6 digits will be used
# after rounding.
sval = "%f" % val
dot_idx = sval.find(".")
val = round(val, 6 - dot_idx)
current_thread().result += (idx + 1) * hash(val)
# Modulo the result to keep it "small" otherwise the math ops can be slow
# since python does infinite precision math.
current_thread().result %= maxint
if result_log:
result_log.write(str(val))
result_log.write("\t")
result_log.write(str(current_thread().result))
result_log.write("\n")
except Exception as e:
current_thread().error = e
finally:
if result_log is not None:
result_log.close()
if current_thread().error is not None \
and current_thread().result == query.result_hash:
os.remove(result_log.name)
hash_thread = create_and_start_daemon_thread(hash_result_impl,
"Fetch Results %s" % query_id)
hash_thread.join(max(timeout_unix_time - time(), 0))
if hash_thread.is_alive():
should_continue.value = 0
raise QueryTimeout()
if hash_thread.error:
raise hash_thread.error
return hash_thread.result
def load_tpc_queries(workload):
"""Returns a list of TPC queries. 'workload' should either be 'tpch' or 'tpcds'."""
LOG.info("Loading %s queries", workload)
queries = list()
query_dir = os.path.join(os.path.dirname(__file__), "..", "..",
"testdata", "workloads", workload, "queries")
file_name_pattern = re.compile(r"-(q\d+).test$")
for query_file in os.listdir(query_dir):
match = file_name_pattern.search(query_file)
if not match:
continue
file_path = os.path.join(query_dir, query_file)
file_queries = load_queries_from_test_file(file_path)
if len(file_queries) != 1:
raise Exception("Expected exactly 1 query to be in file %s but got %s"
% (file_path, len(file_queries)))
query = file_queries[0]
query.name = match.group(1)
queries.append(query)
return queries
def load_queries_from_test_file(file_path, db_name=None):
LOG.debug("Loading queries from %s", file_path)
test_cases = test_file_parser.parse_query_test_file(file_path)
queries = list()
for test_case in test_cases:
query = Query()
query.sql = test_file_parser.remove_comments(test_case["QUERY"])
query.db_name = db_name
queries.append(query)
return queries
def load_random_queries_and_populate_runtime_info(query_generator, model_translator,
tables, db_name, impala, use_kerberos, query_count, query_timeout_secs,
result_hash_log_dir):
"""Returns a list of random queries. Each query will also have its runtime info
populated. The runtime info population also serves to validate the query.
"""
LOG.info("Generating random queries")
def generate_candidates():
while True:
query_model = query_generator.create_query(tables)
sql = model_translator.write_query(query_model)
query = Query()
query.sql = sql
query.db_name = db_name
yield query
return populate_runtime_info_for_random_queries(impala, use_kerberos,
generate_candidates(), query_count, query_timeout_secs, result_hash_log_dir)
def populate_runtime_info_for_random_queries(impala, use_kerberos, candidate_queries,
query_count, query_timeout_secs, result_hash_log_dir):
"""Returns a list of random queries. Each query will also have its runtime info
populated. The runtime info population also serves to validate the query.
"""
start_time = datetime.now()
queries = list()
for query in candidate_queries:
try:
populate_runtime_info(query, impala, use_kerberos, result_hash_log_dir,
timeout_secs=query_timeout_secs)
queries.append(query)
except Exception as e:
# Ignore any non-fatal errors. These could be query timeouts or bad queries (
# query generator bugs).
if print_crash_info_if_exists(impala, start_time):
raise e
LOG.warn("Error running query (the test will continue)\n%s\n%s", e, query.sql,
exc_info=True)
if len(queries) == query_count:
break
return queries
def populate_runtime_info(query, impala, use_kerberos, result_hash_log_dir,
timeout_secs=maxint, samples=1, max_conflicting_samples=0):
"""Runs the given query by itself repeatedly until the minimum memory is determined
with and without spilling. Potentially all fields in the Query class (except
'sql') will be populated by this method. 'required_mem_mb_without_spilling' and
the corresponding runtime field may still be None if the query could not be run
without spilling.
"""
LOG.info("Collecting runtime info for query %s: \n%s", query.name, query.sql)
runner = QueryRunner()
runner.impalad = impala.impalads[0]
runner.result_hash_log_dir = result_hash_log_dir
runner.use_kerberos = use_kerberos
runner.connect()
limit_exceeded_mem = 0
non_spill_mem = None
spill_mem = None
report = None
mem_limit = None
old_required_mem_mb_without_spilling = query.required_mem_mb_without_spilling
old_required_mem_mb_with_spilling = query.required_mem_mb_with_spilling
# TODO: This | |
import numpy as np
import scipy as sp
import logging
import doctest
from pysnptools.snpreader import Bed
from pysnptools.snpreader import SnpHdf5
from pysnptools.snpreader import Dat
from pysnptools.snpreader import Dense
from pysnptools.snpreader import Pheno
from pysnptools.snpreader import Ped
from pysnptools.standardizer import Unit
from pysnptools.standardizer import Beta
from pysnptools.util import create_directory_if_necessary
from pysnptools.kernelreader.test import TestLoader as KrTestLoader
from pysnptools.kernelreader.test import TestDocStrings as KrDocStrings
from pysnptools.pstreader.test import TestLoader as PstTestLoader
from pysnptools.pstreader.test import TestDocStrings as PstDocStrings
from pysnptools.kernelreader.test import _fortesting_JustCheckExists
import unittest
import os.path
import time
class TestLoader(unittest.TestCase):
def xtest_aaa_hdf5_speed(self): #!!too slow to use all the time
#currentFolder + "/examples/toydata"
#currentFolder + "/examples/delme.hdf5"
bedFileName = r"c:\Source\carlk\fastlmm\tests\datasets\all_chr.maf0.001.N300" #!! local paths
hdf5Pattern = r"c:\Source\carlk\fastlmm\tests\datasets\del.{0}.hdf5"#!!
tt0 = time.time()
snpreader_bed = Bed(bedFileName,count_A1=False)
S0 = snpreader_bed.sid_count
snp_index_list0 = list(range(min(S0,15000)))
hdf5FileName = hdf5Pattern.format(len(snp_index_list0))
snpDataBed = snpreader_bed[:,snp_index_list0].read()
tt1 = time.time()
logging.info("Read bed %.2f seconds" % (tt1 - tt0))
SnpHdf5.write(hdf5FileName, snpDataBed)
tt2 = time.time()
logging.info("write SnpHdf5 bed %.2f seconds" % (tt2 - tt1))
snpreader_hdf5 = SnpHdf5(hdf5FileName)
assert(snpreader_hdf5.iid_count == snpreader_bed.iid_count)
S = snpreader_hdf5.sid_count
N_original = snpreader_hdf5.iid_count
iid_index_list = sorted(range(N_original - 1,0,-2))
snp_index_list = sorted(range(S - 1,0,-2))#!!
#snp_index_list = range(S/2)
snpreader_hdf5 = snpreader_hdf5[iid_index_list,:]
snpDataHdf5 = snpreader_hdf5[:,snp_index_list].read()
tt3 = time.time()
logging.info("read SnpHdf5 with reversed indexes bed %.2f seconds" % (tt3 - tt2))
snpDataHdf5C = snpreader_hdf5[:,snp_index_list].read(order = "C")
tt4 = time.time()
logging.info("read SnpHdf5 C with reversed indexes bed %.2f seconds" % (tt4 - tt3))
print("the end")
@classmethod
def setUpClass(self):
self.currentFolder = os.path.dirname(os.path.realpath(__file__))
#TODO: get data set with NANs!
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
self.pheno_fn = self.currentFolder + "/examples/toydata.phe"
self.snpdata = snpreader.read(order='F',force_python_only=True)
self.snps = self.snpdata.val
def test_diagKtoN(self):
"""
make sure standardization on SNPs results in sum(diag(K))=N
"""
np.random.seed(42)
m = np.random.random((100,1000))
from pysnptools.standardizer import DiagKtoN
s = DiagKtoN()
s.standardize(m)
K = m.dot(m.T)
sum_diag = np.sum(np.diag(K))
np.testing.assert_almost_equal(100, sum_diag)
def test_c_reader_bed(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
self.c_reader(snpreader)
def test_c_reader_bed_count_A1(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=True)
snpdata = snpreader.read()
snpdata.val = 2 - snpdata.val
self.c_reader(snpdata)
def test_p_reader_bed(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False).read(force_python_only=True)
self.c_reader(snpreader)
def test_p_reader_bed_count_A1(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=True)
snpdata = snpreader.read(force_python_only=True)
snpdata.val = 2 - snpdata.val
self.c_reader(snpdata)
def test_scalar_index(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
arr=np.int64(1)
snpreader[arr,arr]
def test_c_reader_hdf5(self):
snpreader = SnpHdf5(self.currentFolder + "/examples/toydata.snpmajor.snp.hdf5")
self.c_reader(snpreader)
def test_hdf5_case3(self):
snpreader1 = SnpHdf5(self.currentFolder + "/examples/toydata.snpmajor.snp.hdf5")[::2,:]
snpreader2 = Bed(self.currentFolder + "/examples/toydata",count_A1=False)[::2,:]
self.assertTrue(np.allclose(snpreader1.read().val, snpreader2.read().val, rtol=1e-05, atol=1e-05))
def test_c_reader_dat(self):
snpreader = Dat(self.currentFolder + "/examples/toydata.dat")[:,::100]
_fortesting_JustCheckExists().input(snpreader)
snpdata1 = snpreader.read()
self.assertEqual(np.float64, snpdata1.val.dtype)
self.assertTrue(np.allclose(self.snps[:,::100], snpdata1.val, rtol=1e-05, atol=1e-05))
snpdata1.val[1,2] = np.NaN # Inject a missing value to test writing and reading missing values
output = "tempdir/snpreader/toydata.dat"
create_directory_if_necessary(output)
Dat.write(output,snpdata1)
snpdata2 = Dat(output).read()
np.testing.assert_array_almost_equal(snpdata1.val, snpdata2.val, decimal=10)
snpdata3 = snpdata1[:,0:0].read() #create snpdata with no sids
output = "tempdir/snpreader/toydata3.dat"
Dat.write(output,snpdata3)
snpdata4 = Dat(output).read()
assert snpdata3 == snpdata4
@staticmethod
def assert_match_012_210(snpdata1, snpdata2):
for sid_index in range(snpdata1.sid_count): #Check that every row matches (except OK if 0,1,2 can be 2,1,0)
goods1 = (snpdata1.val[:,sid_index] == snpdata1.val[:,sid_index]) # find non-missing
goods2= (snpdata2.val[:,sid_index] == snpdata2.val[:,sid_index]) # find non-missing
assert (goods1==goods2).all() #assert that they agree on non-missing
is_ok = (snpdata1.val[goods1,sid_index] == snpdata2.val[goods2,sid_index]).all() or (snpdata1.val[goods1,sid_index] == snpdata2.val[goods2,sid_index]*-1+2).all()
assert is_ok
def test_c_reader_ped(self):
if False: #Too slow for routine testing
snpdata1 = Ped(self.currentFolder + "/examples/toydata.ped")[::25,::1000].read()
self.assertEqual(np.float64, snpdata1.val.dtype)
TestLoader.assert_match_012_210(self.snpdata[::25,::1000].read(),snpdata1)
else:
snpdata1 = self.snpdata[::25,::1000].read()
output = "tempdir/snpreader/toydata.ped"
create_directory_if_necessary(output)
snpdata1.val[1,2] = np.NaN # Inject a missing value to test writing and reading missing values
Ped.write(output, snpdata1)
snpreader = Ped(output)
_fortesting_JustCheckExists().input(snpreader)
s = str(snpreader)
snpdata2 = snpreader.read()
TestLoader.assert_match_012_210(snpdata1,snpdata2)
def test_c_reader_pheno(self):
snpdata1 = Pheno(self.currentFolder + "/examples/toydata.phe").read()
self.assertEqual(np.float64, snpdata1.val.dtype)
snpdata1.val[1,0] = np.NaN # Inject a missing value to test writing and reading missing values
output = "tempdir/snpreader/toydata.phe"
create_directory_if_necessary(output)
Pheno.write(output, snpdata1)
snpreader = Pheno(output)
_fortesting_JustCheckExists().input(snpreader)
s = str(snpreader)
snpdata2 = snpreader.read()
np.testing.assert_array_almost_equal(snpdata1.val, snpdata2.val, decimal=10)
snpdata1 = Pheno(self.currentFolder + "/examples/toydata.phe").read()
import pysnptools.util.pheno as pstpheno
dict = pstpheno.loadOnePhen(self.currentFolder + "/examples/toydata.phe",missing="")
snpdata3 = Pheno(dict).read()
np.testing.assert_array_almost_equal(snpdata1.val, snpdata3.val, decimal=10)
dict = pstpheno.loadOnePhen(self.currentFolder + "/examples/toydata.phe",missing="",vectorize=True)
assert len(dict['vals'].shape)==1, "test 1-d array of values"
snpdata3 = Pheno(dict).read()
np.testing.assert_array_almost_equal(snpdata1.val, snpdata3.val, decimal=10)
snpdata4 = Pheno(None,iid_if_none=snpdata1.iid)
assert (snpdata4.row == snpdata1.row).all() and snpdata4.col_count == 0
snpdata5 = Pheno(self.currentFolder + "/examples/toydata.id.phe").read()
np.testing.assert_array_almost_equal(snpdata1.val, snpdata5.val, decimal=10)
snpdata6 = Pheno(self.currentFolder + "/examples/toydata.fid.phe").read()
np.testing.assert_array_almost_equal(snpdata1.val, snpdata6.val, decimal=10)
def test_c_reader_dense(self):
snpdata1 = self.snpdata[:,::100].read()
snpdata1.val[1,2] = np.NaN # Inject a missing value to test writing and reading missing values
output = "tempdir/snpreader/toydata.dense.txt"
create_directory_if_necessary(output)
Dense.write(output, snpdata1)
snpreader = Dense(output)
_fortesting_JustCheckExists().input(snpreader)
s = str(snpreader)
snpdata2 = snpreader.read()
np.testing.assert_array_almost_equal(snpdata1.val, snpdata2.val, decimal=10)
def test_some_std(self):
k0 = self.snpdata.read_kernel(standardizer=Unit()).val
from pysnptools.kernelreader import SnpKernel
k1 = self.snpdata.read_kernel(standardizer=Unit())
np.testing.assert_array_almost_equal(k0, k1.val, decimal=10)
from pysnptools.snpreader import SnpData
snpdata2 = SnpData(iid=self.snpdata.iid,sid=self.snpdata.sid,pos=self.snpdata.pos,val=np.array(self.snpdata.val))
s = str(snpdata2)
snpdata2.standardize()
s = str(snpdata2)
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
k2 = snpreader.read_kernel(standardizer=Unit(),block_size=500).val
np.testing.assert_array_almost_equal(k0, k2, decimal=10)
from pysnptools.standardizer.identity import Identity
from pysnptools.standardizer.diag_K_to_N import DiagKtoN
for dtype in [sp.float64,sp.float32]:
for std in [Unit(),Beta(1,25),Identity(),DiagKtoN()]:
s = str(std)
np.random.seed(0)
x = np.array(np.random.randint(3,size=[60,100]),dtype=dtype)
x2 = x[:,::2]
x2b = np.array(x2)
#LATER what's this about? It doesn't do non-contiguous?
#assert not x2.flags['C_CONTIGUOUS'] and not x2.flags['F_CONTIGUOUS'] #set up to test non contiguous
#assert x2b.flags['C_CONTIGUOUS'] or x2b.flags['F_CONTIGUOUS'] #set up to test non contiguous
#a,b = std.standardize(x2b),std.standardize(x2)
#np.testing.assert_array_almost_equal(a,b)
logging.info("done")
def c_reader(self,snpreader):
"""
make sure c-reader yields same result
"""
snpdata = snpreader.read(order='F',force_python_only=False)
snp_c = snpdata.val
self.assertEqual(np.float64, snp_c.dtype)
self.assertTrue(np.allclose(self.snps, snp_c, rtol=1e-05, atol=1e-05))
return snpdata
def test_standardize_bed(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
self.standardize(snpreader)
def test_standardize_hdf5(self):
snpreader = SnpHdf5(self.currentFolder + "/examples/toydata.iidmajor.snp.hdf5")
self.standardize(snpreader)
def test_standardize_dat(self):
snpreader = Dat(self.currentFolder + "/examples/toydata.dat")
self.standardize(snpreader)
def test_standardize_ped(self):
snpreader = Ped(self.currentFolder + "/examples/toydata")
self.standardize(snpreader)
def standardize(self,snpreader):
"""
make sure blocked standardize yields same result as regular standardize
"""
for dtype in [sp.float64,sp.float32]:
snps = snpreader.read(order='F',force_python_only=True,dtype=dtype).val
self.assertEqual(dtype, snps.dtype)
snp_s1 = Unit().standardize(snps.copy(), force_python_only=True)
snp_s2 = Unit().standardize(snps.copy(), block_size=100, force_python_only=True)
snps_F = np.array(snps, dtype=dtype, order="F")
snp_s3 = Unit().standardize(snps_F)
snps_C = np.array(snps, dtype=dtype, order="C")
snp_s4 = Unit().standardize(snps_C)
snp_beta1 = Beta(1, 25).standardize(snps.copy(), force_python_only=True)
snps_F = np.array(snps, dtype=dtype, order="F")
snp_beta2 = Beta(1, 25).standardize(snps_F)
snps_C = np.array(snps, dtype=dtype, order="C")
snp_beta3 = Beta(1, 25).standardize(snps_C)
self.assertEqual(snp_s1.shape[0], snp_s2.shape[0])
self.assertEqual(snp_s1.shape[1], snp_s2.shape[1])
self.assertEqual(snp_s1.shape[0], snp_s3.shape[0])
self.assertEqual(snp_s1.shape[1], snp_s3.shape[1])
self.assertEqual(snp_s1.shape[0], snp_s4.shape[0])
self.assertEqual(snp_s1.shape[1], snp_s4.shape[1])
self.assertTrue(np.allclose(snp_s1, snp_s2, rtol=1e-05, atol=1e-05))
self.assertTrue(np.allclose(snp_s1, snp_s3, rtol=1e-05, atol=1e-05))
self.assertTrue(np.allclose(snp_s1, snp_s4, rtol=1e-05, atol=1e-05))
self.assertEqual(snp_beta1.shape[0], snp_beta2.shape[0])
self.assertEqual(snp_beta1.shape[1], snp_beta2.shape[1])
self.assertEqual(snp_beta1.shape[0], snp_beta3.shape[0])
self.assertEqual(snp_beta1.shape[1], snp_beta3.shape[1])
self.assertTrue(np.allclose(snp_beta1, snp_beta2, rtol=1e-05, atol=1e-05))
self.assertTrue(np.allclose(snp_beta1, snp_beta3, rtol=1e-05, atol=1e-05))
def test_load_and_standardize_bed(self):
snpreader2 = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
self.load_and_standardize(snpreader2, snpreader2)
def too_slow_test_write_bedbig(self):
iid_count = 100000
sid_count = 50000
from pysnptools.snpreader import SnpData
iid = np.array([[str(i),str(i)] for i in range(iid_count)])
sid = np.array(["sid_{0}".format(i) for i in range(sid_count)])
pos = np.array([[i,i,i] for i in range(sid_count)])
np.random.seed(0)
snpdata = SnpData(iid,sid,np.zeros((iid_count,sid_count)),pos=pos) #random.choice((0.0,1.0,2.0,float("nan")),size=(iid_count,sid_count)))
output = "tempdir/bedbig.{0}.{1}".format(iid_count,sid_count)
create_directory_if_necessary(output)
Bed.write(output, snpdata, count_A1=False)
snpdata2 = Bed(output,count_A1=False).read()
np.testing.assert_array_almost_equal(snpdata.val, snpdata2.val, decimal=10)
def test_write_bed_f64cpp_0(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
iid_index = 0
logging.info("iid={0}".format(iid_index))
#if snpreader.iid_count % 4 == 0: # divisible by 4 isn't a good test
# snpreader = snpreader[0:-1,:]
#assert snpreader.iid_count % 4 != 0
snpdata = snpreader[0:iid_index,:].read(order='F',dtype=np.float64)
if snpdata.iid_count > 0:
snpdata.val[-1,0] = float("NAN")
output = "tempdir/toydata.F64cpp.{0}".format(iid_index)
create_directory_if_necessary(output)
Bed.write(output, snpdata ,count_A1=False)
snpdata2 = Bed(output,count_A1=False).read()
np.testing.assert_array_almost_equal(snpdata.val, snpdata2.val, decimal=10)
def test_write_bed_f64cpp_1(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
iid_index = 1
logging.info("iid={0}".format(iid_index))
#if snpreader.iid_count % 4 == 0: # divisible by 4 isn't a good test
# snpreader = snpreader[0:-1,:]
#assert snpreader.iid_count % 4 != 0
snpdata = snpreader[0:iid_index,:].read(order='F',dtype=np.float64)
if snpdata.iid_count > 0:
snpdata.val[-1,0] = float("NAN")
output = "tempdir/toydata.F64cpp.{0}".format(iid_index)
create_directory_if_necessary(output)
Bed.write(output, snpdata ,count_A1=False)
snpdata2 = Bed(output,count_A1=False).read()
np.testing.assert_array_almost_equal(snpdata.val, snpdata2.val, decimal=10)
def test_write_bed_f64cpp_5(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
from pysnptools.kernelreader.test import _fortesting_JustCheckExists
_fortesting_JustCheckExists().input(snpreader)
iid_index = 5
logging.info("iid={0}".format(iid_index))
#if snpreader.iid_count % 4 == 0: # divisible by 4 isn't a good test
# snpreader = snpreader[0:-1,:]
#assert snpreader.iid_count % 4 != 0
snpdata = snpreader[0:iid_index,:].read(order='F',dtype=np.float64)
if snpdata.iid_count > 0:
snpdata.val[-1,0] = float("NAN")
output = "tempdir/toydata.F64cpp.{0}".format(iid_index)
create_directory_if_necessary(output)
Bed.write(output, snpdata ,count_A1=False) #,force_python_only=True)
snpdata2 = Bed(output,count_A1=False).read()
np.testing.assert_array_almost_equal(snpdata.val, snpdata2.val, decimal=10)
def test_write_bed_f64cpp_5_python(self):
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=False)
iid_index = 5
logging.info("iid={0}".format(iid_index))
#if snpreader.iid_count % 4 == 0: # divisible by 4 isn't a good test
# snpreader = snpreader[0:-1,:]
#assert snpreader.iid_count % 4 != 0
snpdata = snpreader[0:iid_index,:].read(order='F',dtype=np.float64)
if snpdata.iid_count > 0:
snpdata.val[-1,0] = float("NAN")
output = "tempdir/toydata.F64python.{0}".format(iid_index)
create_directory_if_necessary(output)
Bed.write(output,snpdata, force_python_only=True)
snpdata2 = Bed(output,count_A1=False).read()
np.testing.assert_array_almost_equal(snpdata.val, snpdata2.val, decimal=10)
def test_write_x_x_cpp(self):
for count_A1 in [False, True]:
snpreader = Bed(self.currentFolder + "/examples/toydata",count_A1=count_A1)
for order in ['C','F']:
for dtype in [np.float32,np.float64]:
snpdata = snpreader.read(order=order,dtype=dtype)
snpdata.val[-1,0] = float("NAN")
output = "tempdir/toydata.{0}{1}.cpp".format(order,"32" if dtype==np.float32 else "64")
create_directory_if_necessary(output)
Bed.write(output, snpdata, count_A1=count_A1)
snpdata2 = Bed(output,count_A1=count_A1).read()
np.testing.assert_array_almost_equal(snpdata.val, snpdata2.val, decimal=10)
def test_subset_view(self):
| |
B 3 46.550 -68.560 64.661 1.00 25.93 N
ATOM 17 CA GLN B 3 46.391 -69.970 64.997 1.00 24.46 C
ATOM 18 C GLN B 3 44.959 -70.433 64.750 1.00 23.33 C
ATOM 19 O GLN B 3 44.010 -69.863 65.287 1.00 20.10 O
ATOM 20 CB GLN B 3 46.782 -70.222 66.455 1.00 29.74 C
ATOM 21 CG GLN B 3 46.646 -71.671 66.893 1.00 39.72 C
ATOM 22 CD GLN B 3 47.041 -71.883 68.341 1.00 51.31 C
ATOM 23 OE1 GLN B 3 47.031 -73.007 68.841 1.00 57.11 O
ATOM 24 NE2 GLN B 3 47.393 -70.799 69.023 1.00 54.00 N
ATOM 25 N MET B 4 44.812 -71.472 63.933 1.00 24.27 N
ATOM 26 CA MET B 4 43.496 -72.013 63.613 1.00 22.89 C
ATOM 27 C MET B 4 42.969 -72.880 64.752 1.00 21.48 C
ATOM 28 O MET B 4 43.379 -74.029 64.912 1.00 23.73 O
ATOM 29 CB MET B 4 43.551 -72.824 62.316 1.00 22.77 C
ATOM 30 CG MET B 4 43.996 -72.024 61.103 1.00 20.45 C
ATOM 31 SD MET B 4 42.904 -70.635 60.747 1.00 23.95 S
ATOM 32 CE MET B 4 41.353 -71.486 60.468 1.00 24.76 C
ATOM 33 N THR B 5 42.057 -72.320 65.541 1.00 20.55 N
ATOM 34 CA THR B 5 41.472 -73.040 66.666 1.00 20.64 C
ATOM 35 C THR B 5 40.063 -73.521 66.335 1.00 20.08 C
ATOM 36 O THR B 5 39.176 -72.720 66.042 1.00 20.87 O
ATOM 37 CB THR B 5 41.422 -72.165 67.932 1.00 23.39 C
ATOM 38 OG1 THR B 5 42.751 -71.766 68.293 1.00 30.91 O
ATOM 39 CG2 THR B 5 40.798 -72.934 69.087 1.00 26.97 C
TER
ATOM 1 N ASP A 1 46.675 -60.355 58.113 1.00 34.60 N
ATOM 2 CA ASP A 1 46.807 -61.464 59.051 1.00 33.86 C
ATOM 3 C ASP A 1 45.597 -61.546 59.975 1.00 34.02 C
ATOM 4 O ASP A 1 45.693 -62.048 61.095 1.00 32.23 O
ATOM 5 CB ASP A 1 48.090 -61.322 59.873 1.00 33.99 C
ATOM 6 CG ASP A 1 49.337 -61.310 59.010 1.00 36.93 C
ATOM 7 OD1 ASP A 1 49.255 -60.851 57.851 1.00 36.67 O
ATOM 8 OD2 ASP A 1 50.398 -61.759 59.491 1.00 38.81 O
ATOM 9 N VAL A 2 44.460 -61.050 59.499 1.00 31.94 N
ATOM 10 CA VAL A 2 43.231 -61.067 60.281 1.00 29.81 C
ATOM 11 C VAL A 2 42.306 -62.174 59.789 1.00 28.28 C
ATOM 12 O VAL A 2 41.817 -62.132 58.660 1.00 27.24 O
ATOM 13 CB VAL A 2 42.520 -59.704 60.230 1.00 30.66 C
ATOM 14 CG1 VAL A 2 41.241 -59.742 61.052 1.00 29.42 C
ATOM 15 CG2 VAL A 2 43.447 -58.604 60.725 1.00 31.43 C
ATOM 16 N GLN A 3 42.071 -63.165 60.643 1.00 25.93 N
ATOM 17 CA GLN A 3 41.205 -64.286 60.296 1.00 24.46 C
ATOM 18 C GLN A 3 40.490 -64.827 61.530 1.00 23.33 C
ATOM 19 O GLN A 3 41.120 -65.383 62.430 1.00 20.10 O
ATOM 20 CB GLN A 3 42.011 -65.399 59.624 1.00 29.74 C
ATOM 21 CG GLN A 3 41.185 -66.611 59.227 1.00 39.72 C
ATOM 22 CD GLN A 3 42.016 -67.692 58.564 1.00 51.31 C
ATOM 23 OE1 GLN A 3 43.226 -67.542 58.391 1.00 57.11 O
ATOM 24 NE2 GLN A 3 41.370 -68.790 58.190 1.00 54.00 N
ATOM 25 N MET A 4 39.172 -64.662 61.564 1.00 24.27 N
ATOM 26 CA MET A 4 38.369 -65.133 62.686 1.00 22.89 C
ATOM 27 C MET A 4 37.658 -66.437 62.341 1.00 21.48 C
ATOM 28 O MET A 4 37.275 -67.196 63.233 1.00 23.73 O
ATOM 29 CB MET A 4 37.349 -64.070 63.100 1.00 22.77 C
ATOM 30 CG MET A 4 37.968 -62.752 63.535 1.00 20.45 C
ATOM 31 SD MET A 4 39.065 -62.933 64.954 1.00 23.95 S
ATOM 32 CE MET A 4 37.932 -63.588 66.177 1.00 24.76 C
ATOM 33 N THR A 5 37.492 -66.682 61.043 1.00 20.55 N
ATOM 34 CA THR A 5 36.832 -67.882 60.528 1.00 20.64 C
ATOM 35 C THR A 5 35.420 -68.037 61.091 1.00 20.08 C
ATOM 36 O THR A 5 35.211 -68.701 62.108 1.00 20.87 O
ATOM 37 CB THR A 5 37.647 -69.156 60.837 1.00 23.39 C
ATOM 38 OG1 THR A 5 39.009 -68.964 60.435 1.00 30.91 O
ATOM 39 CG2 THR A 5 37.073 -70.352 60.091 1.00 26.97 C
TER
"""
pdb_str_18 = """
ATOM 1 N MET 1 158.070 173.095 147.115 1.00 50.00 A16S N
ATOM 2 CA MET 1 157.408 172.627 148.359 1.00 50.00 A16S C
ATOM 3 CB MET 1 157.550 171.094 148.516 1.00 50.00 A16S C
ATOM 4 CG MET 1 156.748 170.503 149.691 1.00 50.00 A16S C
ATOM 5 SD MET 1 154.968 170.855 149.612 1.00 50.00 A16S S
ATOM 6 CE MET 1 154.505 169.913 151.091 1.00 50.00 A16S C
ATOM 7 C MET 1 157.958 173.331 149.563 1.00 50.00 A16S C
ATOM 8 O MET 1 157.196 173.814 150.399 1.00 50.00 A16S O
TER
ATOM 9 N MET 1 174.781 155.306 150.054 1.00 50.00 B16S N
ATOM 10 CA MET 1 174.332 154.630 151.298 1.00 50.00 B16S C
ATOM 11 CB MET 1 175.016 153.251 151.453 1.00 50.00 B16S C
ATOM 12 CG MET 1 174.481 152.410 152.628 1.00 50.00 B16S C
ATOM 13 SD MET 1 172.693 152.099 152.550 1.00 50.00 B16S S
ATOM 14 CE MET 1 172.601 151.052 154.028 1.00 50.00 B16S C
ATOM 15 C MET 1 174.594 155.484 152.502 1.00 50.00 B16S C
ATOM 16 O MET 1 173.710 155.660 153.339 1.00 50.00 B16S O
TER
END
"""
pdb_str_19 = """
ATOM 1 N MET A 1 158.070 173.095 147.115 1.00 50.00 A16S N
ATOM 2 CA MET A 1 157.408 172.627 148.359 1.00 50.00 A16S C
ATOM 3 CB MET A 1 157.550 171.094 148.516 1.00 50.00 A16S C
ATOM 4 CG MET A 1 156.748 170.503 149.691 1.00 50.00 A16S C
ATOM 5 SD MET A 1 154.968 170.855 149.612 1.00 50.00 A16S S
ATOM 6 CE MET A 1 154.505 169.913 151.091 1.00 50.00 A16S C
ATOM 7 C MET A 1 157.958 173.331 149.563 1.00 50.00 A16S C
ATOM 8 O MET A 1 157.196 173.814 150.399 1.00 50.00 A16S O
TER
ATOM 9 N MET 1 174.781 155.306 150.054 1.00 50.00 B16S N
ATOM 10 CA MET 1 174.332 154.630 151.298 1.00 50.00 B16S C
ATOM 11 CB MET 1 175.016 153.251 151.453 1.00 50.00 B16S C
ATOM 12 CG MET 1 174.481 152.410 152.628 1.00 50.00 B16S C
ATOM 13 SD MET 1 172.693 152.099 152.550 1.00 50.00 B16S S
ATOM 14 CE MET 1 172.601 151.052 154.028 1.00 50.00 B16S C
ATOM 15 C MET 1 174.594 155.484 152.502 1.00 50.00 B16S C
ATOM 16 O MET 1 173.710 155.660 153.339 1.00 50.00 B16S O
TER
END
"""
pdb_str_20 = """
CRYST1 25.287 40.217 65.471 90.00 90.00 90.00 P 21 21 21
SCALE1 0.039546 0.000000 0.000000 0.00000
SCALE2 0.000000 0.024865 0.000000 0.00000
SCALE3 0.000000 0.000000 0.015274 0.00000
ATOM 1 P DA A 5 -8.062 -5.965 -15.755 1.00 42.17 P
ATOM 2 OP1 DA A 5 -8.426 -7.228 -16.405 1.00 50.61 O
ATOM 3 OP2 DA A 5 -8.689 -5.557 -14.457 1.00 51.75 O
ATOM 4 O5' DA A 5 -6.496 -5.961 -15.638 1.00 34.89 O
ATOM 5 C5' DA A 5 -5.791 -6.321 -16.790 1.00 30.71 C
ATOM 6 C4' DA A 5 -4.355 -5.917 -16.600 1.00 34.43 C
ATOM 7 O4' DA A 5 -4.303 -4.509 -16.239 1.00 33.96 O
ATOM 8 C3' DA A 5 -3.630 -6.687 -15.491 1.00 35.56 C
ATOM 9 O3' DA A 5 -2.407 -7.257 -16.020 1.00 33.08 O
ATOM 10 C2' DA A 5 -3.531 -5.654 -14.384 1.00 32.41 C
ATOM 11 C1' DA A 5 -3.435 -4.334 -15.130 1.00 28.44 C
ATOM 12 N9 DA A 5 -3.904 -3.143 -14.449 1.00 28.37 N
ATOM 13 C8 DA A 5 -5.187 -2.933 -14.022 1.00 27.53 C
ATOM 14 N7 DA A 5 -5.401 -1.724 -13.565 1.00 29.33 N
ATOM 15 C5 DA A 5 -4.187 -1.082 -13.747 1.00 23.78 C
ATOM 16 C6 DA A 5 -3.761 0.226 -13.474 1.00 25.22 C
ATOM 17 N6 DA A 5 -4.519 1.150 -12.896 1.00 25.69 N
ATOM 18 N1 DA A 5 -2.485 0.535 -13.749 1.00 24.39 N
ATOM 19 C2 DA A | |
# This file populates the database
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Genre, Movie
"""
SQLAlchemy executes CRUD operations via an interface called a session
Sessions allow us to write down all commands we want to execute but not
send to the database until calling a commit
Use these to begin interpeter queries:
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Genre, Movie
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
This gives us a staging zone for all the objects loaded into the DBSession
object
Any change made to the objects in the session won't be persisted into the
database until we call session.commit()
Some Categories:
Action
Adventure
Comedy
Drama
Fantasy
Horry
Mystery
Romance
Science Fiction
Thriller
CREATE
Syntax for making a New Entry (Genre)
>>> new_entry = ClassName(property='value', ...)
>>> session.add(new_entry)
>>> session.commit()
Add an Movie
>>> alien = Movie(name='Alien', description='A 1979 film directed by Ridley
... Scott, it follows the crew of the commercial space tug Nostromo who
... encounter the eponymous Alien, a deadly and aggressive extraterrestrial
... set loose on the ship.', genre=first_genre)
>>> session.add(alien)
>>> session.commit()
READ
Use the Session to Interact with the Database
We can check that a new entry was added by using
>>> session.query(Category).all()
[<database_setup.Category object at 0x10d4aeef0>]
This goes into the database and finds the table that corresponds to the
Category class and finds all the entries in the table and returns them in a
list
Check that the Movie was added
>>> session.query(Movie).all()
[<database_setup.Movie object at 0x10e47c400>]
UPDATE
If we want to edit anything we can query the database for a specific item
The .one() at the end makes sure SQLAlchemy only gives the one object we want
instead of a list we have to iterate over
>>> alien = session.query(Movie).filter_by(id=1).one()
Just to check
>>> alien.name
'Alien'
>>> alien.id
1
>>> alien.description
'A 1979 film directed by <NAME>, it follows the crew of the commercial
space tug Nostromo who encounter the eponymous Alien, a deadly and aggressive
extraterrestrial set loose on the ship'
>>> alien.description = 'A 1979 film directed by <NAME>, it follows the
... crew of the commercial space tug Nostromo who encounter the eponymous
... alien, a deadly and aggressive extraterrestrial set loose on the ship'
>>> session.add(alien)
>>> session.commit()
To check the change was made
>>> alien = session.query(Movie).filter_by(id=1).one()
>>> alien.description
'A 1979 film directed by <NAME>, it follows the crew of the commercial
space tug Nostromo who encounter the eponymous alien, a deadly and aggressive
extraterrestrial set loose on the ship'
DELETE
In order to delete from our database, we want to query for the object we want
to delete, delete it, then commit the session
>>> schindler = session.query(Movie).filter_by(name="Schindler's List").one()
>>> session.delete(schindler)
>>> session.commit()
Now we can search again to see if the movie was deleted
>>> schindler = session.query(Movie).filter_by(name="Schindler's List").one()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python3.7/site-packages/sqlalchemy/orm/query.py",
line 3282, in one
raise orm_exc.NoResultFound("No row was found for one()")
sqlalchemy.orm.exc.NoResultFound: No row was found for one()
"""
# This lets program know which database to communicate with
engine = create_engine('sqlite:///catalog.db')
# This binds the engine to the Base class which makes the connection between
# the class definitions and their corresponding tables within the database
Base.metadata.bind = engine
# Establish a link between code executions and the engine
DBSession = sessionmaker(bind=engine)
# This gives a staging zone for all objects loaded into DBSession object
session = DBSession()
# Create dummy User
User1 = User(name="<NAME>", email="<EMAIL>",
picture='https://pbs.twimg.com/profile_images/2671170543/18debd694829ed78203a5a36dd364160_300x300.png')
session.add(User1)
session.commit()
# The Action Genre and Alien (as Action) has been added to the database
# Add the rest of the genres and some movies as part of those genres
# Action Genre
action = Genre(name="Action")
session.add(action)
session.commit()
# Action Movies
alien = Movie(user_id=1, name="Alien",
description=("A 1979 film directed by <NAME>, it follows the crew"
" of the commercial space tug Nostromo who encounter the"
" eponymous Alien, a deadly and aggressive extraterrestrial"
" set loose on the ship"),
genre=action)
session.add(alien)
session.commit()
die_hard = Movie(user_id=1, name="Die Hard",
description=("A 1988 film directed by <NAME> that follows"
" off-duty New York City Police Department officer John"
" McClane (<NAME>) who is caught in a Los Angeles"
" skyscraper during a heist led by <NAME> (<NAME>)"),
genre=action)
session.add(die_hard)
session.commit()
predator = Movie(user_id=1, name="Predator",
description=("A 1987 film directed by <NAME> that follows an"
" elite military rescue team on a mission to save hostages"
" in guerrilla-held territory in Central America. The"
" Predator, a technologically advanced space alien, stalks"
" and hunts the main characters"),
genre=action)
session.add(predator)
session.commit()
matrix = Movie(user_id=1, name="The Matrix",
description=("A 1999 film directed by the Wachowskis, it depicts a"
" dystopian future in which humanity is unknowingly"
" trapped inside a simulated reality, the Matrix, created"
" by thought-capable machines to distract humans while"
" using their bodies as an energy source"),
genre=action)
session.add(matrix)
session.commit()
gladiator = Movie(user_id=1, name="Gladiator",
description=("A 2000 film directed by <NAME> that follows general"
" Maximus <NAME>, who is betrayed when Commodus,"
" the ambitious son of Emperor <NAME>, murders his"
" father and seizes the throne"),
genre=action)
session.add(gladiator)
session.commit()
print("Action movies added!")
# Adventure Genre
adventure = Genre(name="Adventure")
session.add(adventure)
session.commit()
print("Adventure genre added!")
# Adventure Movies
kong = Movie(user_id=1, name="Kong: Skull Island",
description=("A 2017 film directed by <NAME> that is a reboot"
" of the King Kong franchise"),
genre=adventure)
session.add(kong)
session.commit()
captain_america = Movie(user_id=1, name="Captain America: The First Avenger",
description=("A 2011 film based on the Marvel Comics character Captain"
" America. The film tells the story of <NAME>, a man"
" from Brooklyn who is transformed into the super-soldier"
" Captain America and must stop the Red Skull, who intends"
" to use an artifact called the 'Tesseract' as a source for"
" world domination"),
genre=adventure)
session.add(captain_america)
session.commit()
avengers = Movie(user_id=1, name="The Avengers",
description=("A 2012 film based on the Marvel Comics superhero team of"
" the same name. In the film, <NAME>, director of the"
" spy agency S.H.I.E.L.D., recruits <NAME>, <NAME>,"
" <NAME>, and Thor to form a team that must stop Thor's"
" brother Loki from subjugating Earth"),
genre=adventure)
session.add(avengers)
session.commit()
guardians = Movie(user_id=1, name="Guardians of the Galaxy",
description=("A 2014 film based on the Marvel Comics superhero team of"
" the same name. In the film, <NAME> forms an uneasy"
" alliance with a group of extraterrestrial criminals who"
" are on the run after stealing a powerful artifact"),
genre=adventure)
session.add(guardians)
session.commit()
doctor_strange = Movie(user_id=1, name="<NAME>",
description=("A 2016 film based on the Marvel Comics character of the"
" same name. In the film, former surgeon <NAME>"
" learns the mystic arts after a career-ending car crash"),
genre=adventure)
session.add(doctor_strange)
session.commit()
print("Adventure movies added!")
# Comedy Genre
comedy = Genre(name="Comedy")
session.add(comedy)
session.commit()
print("Comedy genre added!")
# Comedy Movies
jump_street = Movie(user_id=1, name="2<NAME>",
description=("A 2012 film directed by <NAME> and <NAME>,"
" it follows two police officers who are forced to relive"
" high school when they are assigned to go undercover as"
" high school students to prevent the outbreak of a new"
" synthetic drug and arrest its supplier"),
genre=comedy)
session.add(jump_street)
session.commit()
bridesmaids = Movie(user_id=1, name="Bridesmaids",
description=("A 2011 film directed by <NAME>, it centers on Annie"
" (<NAME>), who suffers a series of misfortunes after"
" being asked to serve as maid of honor for her best friend,"
" Lillian (<NAME>)"),
genre=comedy)
session.add(bridesmaids)
session.commit()
hangover = Movie(user_id=1, name="<NAME>",
description=("A 2009 film directed by <NAME>, it tells the story of"
" <NAME>, <NAME>, <NAME>, and <NAME>,"
" who travel to Las Vegas for a bachelor party to celebrate"
" Doug's impending marriage"),
genre=comedy)
session.add(hangover)
session.commit()
step_brothers = Movie(user_id=1, name="Step Brothers",
description=("A 2008 film directed by <NAME>, it follows Brennan"
" (<NAME>) and Dale (<NAME>), two grown men who"
" are forced to live together as brothers after their single"
" parents marry each other"),
genre=comedy)
session.add(step_brothers)
session.commit()
tropic_thunder = Movie(user_id=1, name="<NAME>",
description=("A 2008 film directed by <NAME>, it follows a group of"
" prima donna actors who, when their frustrated director"
" (<NAME>) drops them in the middle of a jungle, are"
" forced to rely on their acting skills to survive the real"
" action and danger"),
genre=comedy)
session.add(tropic_thunder)
session.commit()
print("Comedy movies added!")
# Drama Genre
drama = Genre(name="Drama")
session.add(drama)
session.commit()
print("Drama genre added!")
# Drama Movies
first_man = Movie(user_id=1, name="<NAME>",
description=("A 2018 film directed by <NAME> that follows the"
" years leading up to the Apollo 11 mission to the Moon"
" in 1969"),
genre=drama)
session.add(first_man)
session.commit()
true_story = Movie(user_id=1, name="True Story",
description=("A 2015 film directed by <NAME>, it follows the story"
" of <NAME>, a man on the FBI's most wanted list"
" accused of murdering his wife and three children in Oregon."
" He hid in Mexico using the identity | |
"121243": ("Distance from skin", [6165]),
},
"DistancePupillaryDistance": {
"111679": ("Distance Pupillary Distance", []),
},
"DistanceSourceToDetector": {
"113750": ("Distance Source to Detector", [10008]),
},
"DistanceSourceToIsocenter": {
"113748": ("Distance Source to Isocenter", [10008]),
},
"DistanceSourceToReferencePoint": {
"113737": ("Distance Source to Reference Point", [10008]),
},
"DistanceSourceToTablePlane": {
"113792": ("Distance Source to Table Plane", [10008]),
},
"DistinctlyDefined": {
"112138": ("Distinctly defined", [6120]),
},
"DistortedImplant": {
"111506": ("Distorted implant", [6072]),
},
"DistortionArtifactInTheAreaOfInterest": {
"130586": ("Distortion artifact in the area of interest", [6315, 6318]),
},
"DistributedDiffusionCoefficient": {
"113298": ("Distributed Diffusion Coefficient", [218, 7180, 7272, 7469]),
},
"DistributionDescriptor": {
"112006": ("Distribution Descriptor", []),
},
"DistributionRepresentation": {
"128413": ("Distribution Representation", []),
},
"DivideByZero": {
"114003": ("Divide by zero", [42, 43]),
},
"DoctorCanceledProcedure": {
"110500": ("Doctor canceled procedure", [9300, 9301, 9303, 9561]),
},
"Document": {
"121003": ("Document", []),
"DOC": ("Document", [32, 33]),
},
"DocumentDigitizerEquipment": {
"DOCD": ("Document Digitizer Equipment", [7005]),
},
"DocumentTitle": {
"121144": ("Document Title", []),
},
"DocumentTitleModifier": {
"113011": ("Document Title Modifier", []),
},
"DogRegistryOfAmerica": {
"109212": ("Dog Registry of America", [7481]),
},
"DoorInterlock": {
"130478": ("Door Interlock", [9561, 9568]),
},
"DopplerAngle": {
"125106": ("Doppler Angle", []),
},
"DopplerAudio": {
"109113": ("Doppler audio", [3000]),
},
"DopplerOCTA": {
"128256": ("Doppler OCT-A", [4270]),
},
"DopplerVolumeFlow": {
"125219": ("Doppler Volume Flow", [12227, 12231]),
},
"DoseAreaProduct": {
"122130": ("Dose Area Product", []),
},
"DoseAreaProductTotal": {
"113722": ("Dose Area Product Total", [10050]),
},
"DoseCalculationBoundingVolume": {
"130065": ("Dose Calculation Bounding Volume", [9535]),
},
"DoseCalculationImageSeries": {
"128186": ("Dose Calculation Image Series", [7010, 7023]),
},
"DoseCalibrator": {
"113541": ("Dose Calibrator", [10041]),
},
"DoseCheckAlertDetails": {
"113900": ("Dose Check Alert Details", []),
},
"DoseCheckNotificationDetails": {
"113908": ("Dose Check Notification Details", []),
},
"DoseFrequency": {
"111578": ("Dose frequency", [6092]),
},
"DoseGradient": {
"128488": ("Dose Gradient", [10063]),
},
"DoseImage": {
"121342": ("Dose Image", [10063]),
},
"DoseMeasurementDevice": {
"113794": ("Dose Measurement Device", []),
},
"DosePointCloud": {
"128496": ("Dose Point Cloud", [10063]),
},
"DoseRP": {
"113738": ("Dose (RP)", []),
},
"DoseRPTotal": {
"113725": ("Dose (RP) Total", []),
},
"DoseRateToleranceViolation": {
"130474": ("Dose Rate Tolerance Violation", [9567]),
},
"DosimeterType": {
"113818": ("Dosimeter Type", []),
},
"DosimetricResult": {
"128190": ("Dosimetric Result", [7010, 7023]),
},
"DosingFactor": {
"130190": ("Dosing Factor", []),
},
"DoubleExposure": {
"113026": ("Double exposure", [7011]),
},
"DoubleProduct": {
"122708": ("Double Product", []),
},
"DrugAdministered": {
"122083": ("Drug administered", [3409]),
},
"DrugEnd": {
"122082": ("Drug end", [3409]),
},
"DrugProductIdentifier": {
"113510": ("Drug Product Identifier", []),
},
"DrugStart": {
"122081": ("Drug start", [3409]),
},
"DuctNarrowing": {
"111429": ("Duct narrowing", [6054, 6057]),
},
"DuctalAdenoma": {
"111258": ("Ductal adenoma", [6030, 6031]),
},
"DuctalHyperplasia": {
"111249": ("Ductal hyperplasia", []),
},
"Ductography": {
"111139": ("Ductography", []),
},
"DuplexDoppler": {
"DD": ("Duplex Doppler", []),
},
"DuplicateOrder": {
"110510": ("Duplicate order", [9300, 9301, 9302, 9303]),
},
"DurationOfAdministration": {
"122095": ("Duration of administration", [3410]),
},
"DurationOfIntervention": {
"122106": ("Duration of Intervention", []),
},
"DurationOfTimePeriod": {
"130532": ("Duration of Time Period", [10073]),
},
"DuringTransport": {
"127102": ("During transport", [634]),
},
"DuringVoiding": {
"109137": ("During voiding", [91]),
},
"DwConversionFactorCoefficients": {
"113991": ("Dw Conversion Factor Coefficients", []),
},
"DynamicContrastEnhancedAcquisition": {
"130567": ("Dynamic Contrast-Enhanced Acquisition", [6311]),
},
"DynamicContrastEnhancedAcquisitionSubtractionImage": {
"130568": ("Dynamic Contrast-Enhanced Acquisition Subtraction image", [6311]),
},
"DynamicContrastEnhancedTemporalResolution": {
"130547": ("Dynamic contrast-enhanced temporal resolution", []),
},
"DynamicContrastMRMeasurementReport": {
"126002": ("Dynamic Contrast MR Measurement Report", [7021]),
},
"DynamicLight": {
"111630": ("Dynamic light", [4203]),
},
"DynamicWedge": {
"130348": ("Dynamic Wedge", [9546]),
},
"DyssynchronyIndex": {
"125325": ("Dyssynchrony Index", [12304]),
},
"E4G1089Zr": {
"126519": ("E4G10 ^89^Zr", [4021]),
},
"ECGBasedGatingSignalProcessed": {
"109002": ("ECG-based gating signal, processed", [3090]),
},
"ECGGlobalMeasurements": {
"122158": ("ECG Global Measurements", []),
},
"ECGLeadMeasurements": {
"122159": ("ECG Lead Measurements", []),
},
"ECST": {
"122656": ("ECST", [3804]),
},
"EEMCrossSectionalArea": {
"122333": ("EEM Cross-Sectional Area", [3482]),
},
"EEMDiameter": {
"122330": ("EEM Diameter", [3481]),
},
"EEMDiameterRatio": {
"122352": ("EEM Diameter Ratio", [3484]),
},
"EEMVolume": {
"122371": ("EEM Volume", [3485]),
},
"EPProcedurePhase": {
"109061": ("EP Procedure Phase", []),
},
"ESUR2012ProstateMRIAcquisitionRequirements": {
"130606": ("ESUR 2012 prostate MRI acquisition requirements", [6326, 6353]),
},
"EarlyContrastEnhancement": {
"130602": ("Early Contrast Enhancement", [6335, 6345, 6346]),
},
"EarlyGestation": {
"125009": ("Early Gestation", []),
},
"EchoPattern": {
"111360": ("Echo pattern", []),
},
"Echocardiography": {
"EC": ("Echocardiography", []),
},
"EchogenicHalo": {
"111359": ("Echogenic halo", [6153]),
},
"Echogenicity": {
"110849": ("Echogenicity", [218, 7180, 7469]),
},
"Echovist": {
"125902": ("Echovist", [12030]),
},
"Ecromeximab89Zr": {
"126732": ("Ecromeximab ^89^Zr", [4021]),
},
"EdgeDetection": {
"123103": ("Edge Detection", [7162]),
},
"EdgeEnhancement": {
"113086": ("Edge enhancement", [7203]),
},
"EditedModel": {
"129010": ("Edited Model", [7062]),
},
"EducationalIntent": {
"129012": ("Educational Intent", [7064]),
},
"EffectiveAtomicNumber": {
"129320": ("Effective Atomic Number", [301]),
},
"EffectiveDose": {
"113839": ("Effective Dose", [10050]),
},
"EffectiveDoseConversionFactor": {
"113840": ("Effective Dose Conversion Factor", []),
},
"EffectiveDosePhantomType": {
"113817": ("Effective Dose Phantom Type", []),
},
"EffectiveOrificeArea": {
"125326": ("Effective Orifice Area", [12304]),
},
"EffectiveZ": {
"130084": ("Effective Z", []),
},
"EffectiveZPerA": {
"130085": ("Effective Z per A", []),
},
"Elasticity": {
"110830": ("Elasticity", [218, 7180, 7469, 12308]),
},
"ElectricalFailure": {
"111217": ("Electrical failure", [6041, 6135, 7011]),
},
"Electrocardiography": {
"ECG": ("Electrocardiography", [29, 30, 33]),
},
"ElectrodePlacement": {
"122141": ("Electrode Placement", []),
},
"Electroencephalography": {
"EEG": ("Electroencephalography", [29, 30, 33]),
},
"ElectromechanicalDelay": {
"125320": ("Electromechanical Delay", [12307]),
},
"Electromyography": {
"EMG": ("Electromyography", [29, 30, 33]),
},
"ElectronFixedAperture": {
"130343": ("Electron Fixed Aperture", [9541, 9545]),
},
"Electrooculography": {
"EOG": ("Electrooculography", [29, 30, 33]),
},
"ElementalCompositionAtomicMassFraction": {
"130094": ("Elemental Composition Atomic Mass Fraction", []),
},
"ElevationBasedCornealTomographer": {
"111945": ("Elevation-based corneal tomographer", [4210]),
},
"Eligibility": {
"126075": ("Eligibility", [6146]),
},
"EligibilityReader": {
"129001": ("Eligibility Reader", [7453]),
},
"EligibleForReimbursement": {
"128613": ("Eligible for reimbursement", [800]),
},
"EligibleForReimbursementOnPerPatientBasis": {
"128614": ("Eligible for reimbursement on per patient basis", [800]),
},
"Elliptic": {
"112134": ("Elliptic", [6119]),
},
"Email": {
"110031": ("Email", [405]),
},
"EmbryonicKidney": {
"127801": ("<NAME>", [645, 8134]),
},
"EmergencyOverrideStarted": {
"110127": ("Emergency Override Started", [401, 403]),
},
"EmergencyOverrideStopped": {
"110138": ("Emergency Override Stopped", [401, 403]),
},
"EmpiricalAlgorithm": {
"128481": ("Empirical Algorithm", [10068]),
},
"EndCirculatorySupport": {
"121158": ("End Circulatory Support", [3550]),
},
"EndDiastole": {
"109022": ("End diastole", []),
},
"EndOfExpiration": {
"109023": ("End of expiration", [3337]),
},
"EndOfInspiration": {
"109024": ("End of inspiration", [3337]),
},
"EndOfSystole": {
"109070": ("End of systole", [3337]),
},
"EndOfTimePeriod": {
"130534": ("End of Time Period", [10073]),
},
"EndOfXRayIrradiation": {
"113810": ("End of X-Ray Irradiation", []),
},
"EndOxygenAdministration": {
"121162": ("End oxygen administration", [3530]),
},
"EndPacing": {
"121167": ("End Pacing", [3552]),
},
"EndProcedureAction": {
"121131": ("End Procedure Action", [3421]),
},
"EndProcedureActionItem": {
"121131": ("End Procedure Action Item", []),
},
"EndVentilation": {
"121169": ("End Ventilation", [3551]),
},
"EndingFlowRateOfAdministration": {
"130209": ("Ending Flow Rate of administration", []),
},
"Endoleak": {
"122680": ("endoleak", [3813]),
},
"EndorectalCoilType": {
"130544": ("Endorectal coil type", []),
},
"EndorectalCoilUsed": {
"130543": ("Endorectal coil used", []),
},
"EndorectalTransducer": {
"125264": ("Endorectal Transducer", [12035]),
},
"Endoscopy": {
"ES": ("Endoscopy", [29, 30, 33]),
},
"EndovaginalTransducer": {
"125263": ("Endovaginal Transducer", [12035]),
},
"EnhancedMultiFrameConversionEquipment": {
"109106": ("Enhanced Multi-frame Conversion Equipment", [7005]),
},
"EnrichmentManufacturer": {
"127191": ("Enrichment manufacturer", []),
},
"EnrichmentMaterial": {
"127190": ("Enrichment material", []),
},
"EnrichmentMaterialPresent": {
"127192": ("Enrichment material present", []),
},
"EntireBodyTargetVolume": {
"130055": ("Entire Body Target Volume", [9534]),
},
"EntirePullback": {
"122383": ("Entire Pullback", []),
"122384": ("Entire Pullback", [3487]),
},
"EntranceExposureAtRP": {
"111636": ("Entrance Exposure at RP", [10050]),
},
"EntranceExposureToA4Point2cmBreastThickness": {
"113865": ("Entrance exposure to a 4.2 cm breast thickness", [10025]),
},
"EnvironmentalExposureHistory": {
"111547": ("Environmental Exposure History", []),
},
"EnvironmentalFactor": {
"111548": ("Environmental Factor", []),
},
"EpidermalInclusionCyst": {
"111262": ("Epidermal inclusion cyst", []),
},
"EpifluorescenceIllumination": {
"111743": ("Epifluorescence illumination", [8123]),
},
"Equation": {
"121420": ("Equation", [228]),
},
"EquationCitation": {
"121421": ("Equation Citation", [228]),
},
"EquidistantMethod": {
"122574": ("Equidistant method", [3456]),
},
"EquipmentBroughtToProcedureRoom": {
"122047": ("Equipment brought to procedure room", [3427]),
},
"EquipmentChange": {
"110516": ("Equipment change", [9300, 9301]),
},
"EquipmentFailure": {
"110501": ("Equipment failure", [71, 3427, 4221, 4222, 9300, 9301, 9302, 9561]),
"122046": ("Equipment failure", []),
},
"EquipmentIdentification": {
"121122": ("Equipment Identification", []),
},
"EquipmentLandmark": {
"128750": ("Equipment Landmark", []),
},
"EquipmentLandmarkToPatientFiducialZDistance": {
"128756": ("Equipment Landmark to Patient Fiducial Z Distance", []),
},
"EquipmentLandmarkXPosition": {
"128752": | |
<filename>tag_to_header.py
#!/usr/bin/env python3
#
# Tag To Header
# Version 2.0.1
# By <NAME>, <NAME>(1), <NAME> (1) and <NAME>(1)
# (1) Department of Pathology, University of Washington School of
# Medicine, Seattle, WA 98195
# March 24, 2014
#
"""
BERKELEY SOFTWARE DISTRIBUTION LICENSE
Copyright (c) 2016, <NAME>, <NAME>edy University of Washington
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
# Isolate duplex tags, move them from within the sequenced read to the
# header region, and remove the spacer region.
#
# usage: tag_to_header.py [-h] [--infile1 INFILE1] [--infile2 INFILE2]
# [--outfile1 OUTFILE1] [--outfile2 OUTFILE2]
# [--taglen BLENGTH] [--spacerlen SLENGTH]
# [--read_out ROUT] [--filt_spacer ADAPTERSEQ]
# --tagstats
#
# Optional arguments:
# -h, --help show this help message and exit
# --infile1 INFILE1 First input raw fastq file.
# --infile2 INFILE2 Second input raw fastq file.
# --outfile1 OUTFILE1 Output file for first fastq reads.
# --outfile2 OUTFILE2 Output file for second fastq reads.
# --taglen BLENGTH Length of the duplex tag sequence. [12]
# --spacerlen SLENGTH Length of the spacer sequences used. [5]
# --read_out ROUT How often you want to be told what the
# program is doing. [1000000]
# --filt_spacer ADAPTERSEQ Optional: Spacer sequence for filtering
# on the presence of the spacer. This
# could be thrown off by low quality
# scores.
# --tagstats Optional: Output tagstats file and make
# distribution plot of tag family sizes.
# Requires matplotlib to be installed
import sys
import gzip
from argparse import ArgumentParser
from collections import defaultdict
def fastq_general_iterator(read1_fastq, read2_fastq):
read1_readline = read1_fastq.readline
read2_readline = read2_fastq.readline
while True:
read1_line = read1_readline()
read2_line = read2_readline()
if not read1_line and read2_line:
return()
if read1_line[0] == '@' and read2_line[0] == '@':
break
if (
isinstance(read1_line[0], int)
or isinstance(read2_line[0], int)
):
raise ValueError((f"FASTQ files may contain binary "
f"information or are compressed"
))
while read1_line and read2_line:
if read1_line[0] != '@' or read2_line[0] != '@':
print(f"{read1_line}, {read2_line}")
raise ValueError((f"Records in FASTQ files should start "
f"with a '@' character. Files may be "
f"malformed or out of synch."
))
title_read1_line = read1_line[1:].rstrip()
title_read2_line = read2_line[1:].rstrip()
read1_seq_string = read1_readline().rstrip()
read2_seq_string = read2_readline().rstrip()
while True:
read1_line = read1_readline()
read2_line = read2_readline()
if not read1_line and read2_line:
raise ValueError((f"End of file without quality "
f"information. Files may be malformed"
f" or out of synch"
))
if read1_line[0] == '+' and read2_line[0] == '+':
break
read1_seq_string += read1_line.rstrip()
read2_seq_string += read2_line.rstrip()
read1_quality_string = read1_readline().rstrip()
read2_quality_string = read2_readline().rstrip()
while True:
read1_line = read1_readline()
read2_line = read2_readline()
if not read1_line or read2_line:
break # end of file
if (
read1_line[0] == '@'
and read2_line[0] == '@'
and read1_line.isalpha() is not True
and read2_line.isalpha() is not True
):
break
read1_quality_string += read1_line.rstrip()
read2_quality_string += read2_line.rstrip()
yield (title_read1_line,
title_read2_line,
read1_seq_string,
read2_seq_string,
read1_quality_string,
read2_quality_string
)
#return()
#raise StopIteration
def tag_extract_fxn(read_seq, blen, slen, llen):
# This is the function that extracts the UID tags from both the
# forward and reverse read. Assigns read1 the sequence from some
# position to the end, then read2 from some position to the end,
# then assigns tag1 from the 5'-end to length of the UID tag for
# read1 and then read 2.
return (
f"{read_seq[0][:blen]}{read_seq[0][blen+slen:blen+slen+llen]}",
f"{read_seq[1][:blen]}{read_seq[1][blen+slen:blen+slen+llen]}")
def hdr_rename_fxn(read_title, read1_tag, read2_tag):
# This function renames the header with the formatting of
# *header coordinates,etc*, *tag from read1*, *tag from read2*,
# *read designation from original header (for paired reads)*
illumina = read_title.split(" ")[0].split(":")
if len(illumina) == 7:
#Illumina CASAVA >=1.8
#e.g. @EAS139:136:FC706VJ:2:2104:15343:197393 1:Y:18:ATCACG
readnum = read_title.split(" ")[1].split(":")[0]
return(f"{read_title.split(' ')[0]}"
f"|{read1_tag}{read2_tag}"
f"/{readnum}"
)
elif len(illumina) == 5:
#Illumina CASAVA >=1.4?
#e.g. @HWUSI-EAS100R:6:73:941:1973#ATCGAT/1
read_title = read_title.replace(' ', '_')
return(f"{read_title.split('/')[0]}"
f"|{read1_tag}{read2_tag}"
f"/{read_title.split('/')[1]}"
)
else :
raise ValueError("Unknown read name format: %s" % read_title)
def tag_stats(barcode_counts, outfile):
family_size_dict = defaultdict(lambda:0)
tagstat_file = open(outfile + '.tagstats', 'w')
total_tags = 0
for value in barcode_counts:
family_size_dict[value] += 1
for family_size in family_size_dict.keys():
family_size_dict[family_size] *= int(family_size)
total_tags += int(family_size_dict[family_size])
for family_size in sorted(family_size_dict.keys()):
tagstat_file.write(
f"{family_size}\t"
f"{float(family_size_dict[family_size])/float(total_tags)}"
f"\n"
)
tagstat_file.close()
return(family_size_dict, total_tags)
def open_fastq(infile, outfile):
if infile.endswith(".gz"):
in_fh = gzip.open(infile, 'rt')
out_fh = gzip.open(outfile + ".gz", 'wt')
else:
in_fh = open(infile, 'r')
out_fh = open(outfile, 'w')
return(in_fh, out_fh)
def main():
parser = ArgumentParser()
parser.add_argument(
'--infile1',
dest = 'infile1',
help = 'Path to FASTQ file for Read 1.',
required = True
)
parser.add_argument(
'--infile2',
dest = 'infile2',
help = 'Path to FASTQ file for Read 2.',
required = True
)
parser.add_argument(
'--outprefix',
dest = 'outfile',
help = (f'Prefix for output files. Will prepend onto file name '
f'of ".fq.smi"'
),
required=True
)
parser.add_argument(
'--taglen',
dest = 'taglen',
type = int,
default = 12,
help = 'Length in bases of the tag sequences.[12]'
)
parser.add_argument(
'--spacerlen',
dest = 'spclen',
type = int,
default = 5,
help = (f'Length in bases of the spacer sequence between the'
f' tag and the start of target DNA. [5]'
)
)
parser.add_argument(
"--loclen",
dest = 'loclen',
type = int,
default = 0,
action = "store",
help = (f"Number of base pairs to add to barcode for location "
f"specificity. Bases are not removed from read. [0]"
)
)
parser.add_argument(
'--readout',
dest = 'readout',
type = int,
default = 1000000,
help = (f'How many reads are processed before progress is '
f'reported. [1000000]'
)
)
parser.add_argument(
'--filtspacer',
dest = 'spacer_seq',
type = str,
default = None,
help = (f'Optional: Filter out sequences lacking the inputed '
f'spacer sequence. Not recommended due to significant '
f'base calling issues with the invariant spacer '
f'sequence'
)
)
parser.add_argument(
'--tagstats',
dest = 'tagstats',
action = "store_true",
help = (f'Optional: Output tagstats file and make distribution '
f'plot of tag family sizes. Requires matplotlib to be '
f'installed.'
)
)
parser.add_argument(
'--reduce',
dest = 'reduce',
action = "store_true",
help = (f'Optional: Only output reads that will make a final '
f'DCS read. Will only work when the --tagstats option '
f'is invoked.'
)
)
o = parser.parse_args()
if o.reduce and not o.tagstats:
raise ValueError(f"--reduce option must be invoked with the "
f"--tagstats option."
)
(read1_fastq, read1_output) = open_fastq(o.infile1,
o.outfile + '.seq1.smi.fq'
)
(read2_fastq, read2_output) = open_fastq(o.infile2,
o.outfile + '.seq2.smi.fq'
)
readctr = 0
nospacer = 0
goodreads = 0
badtag = 0
oldBad = 0
barcode_dict = defaultdict(lambda:0)
for readParts in fastq_general_iterator(read1_fastq, read2_fastq):
(read1_title, read2_title, read1_seq,
read2_seq, read1_qual, read2_qual) = readParts
readctr += 1
if (
o.spacer_seq != None
and (
read1_seq[o.taglen:o.taglen + o.spclen]
!= o.spacer_seq
or read2_seq[o.taglen:o.taglen + o.spclen]
!= o.spacer_seq
)
):
nospacer += 1
else:
tag1, tag2 = tag_extract_fxn((read1_seq, read2_seq),
o.taglen, o.spclen, o.loclen
)
if | |
<filename>wbia/web/apis_microsoft.py<gh_stars>0
# -*- coding: utf-8 -*-
"""Dependencies: flask, tornado."""
import logging
from wbia.control import controller_inject
from flask_swagger import swagger
import wbia.constants as const
from flask import current_app
from flask import jsonify
from flask import url_for
import utool as ut
import traceback
import uuid
(print, rrr, profile) = ut.inject2(__name__)
logger = logging.getLogger('wbia')
CLASS_INJECT_KEY, register_ibs_method = controller_inject.make_ibs_register_decorator(
__name__
)
PREFIX = controller_inject.MICROSOFT_API_PREFIX
register_api = controller_inject.get_wbia_flask_api(__name__)
register_route = controller_inject.get_wbia_flask_route(__name__)
def _prefix(route=''):
rule = '/%s/%s/' % (PREFIX, route)
while '//' in rule:
rule = rule.replace('//', '/')
return rule
def _image(ibs, gid):
return {
'uuid': str(ibs.get_image_uuids(gid)),
}
def _annotation(ibs, aid):
return {
'uuid': str(ibs.get_annot_uuids(aid)),
}
def _name(ibs, nid):
return {
'uuid': str(ibs.get_name_uuids(nid)),
}
# DEPRICATE
# def _detection(ibs, aid):
# gid = ibs.get_annot_gids(aid)
# bbox = ibs.get_annot_bboxes(aid)
# return {
# '_image' : _image(ibs, gid),
# '_annotation' : _annotation(ibs, aid),
# 'xtl' : bbox[0],
# 'ytl' : bbox[1],
# 'width' : bbox[2],
# 'height' : bbox[3],
# 'theta' : ibs.get_annot_thetas(aid),
# 'label' : ibs.get_annot_species_texts(aid),
# 'score' : ibs.get_annot_detect_confidence(aid),
# }
def _detection(ibs, gid, result):
bbox, theta, conf, label = result
detection = {
'image': _image(ibs, gid),
'bbox': bbox,
'xtl': bbox[0],
'ytl': bbox[1],
'xbr': bbox[0] + bbox[2],
'ybr': bbox[1] + bbox[3],
'width': bbox[2],
'height': bbox[3],
'theta': theta,
'species': label,
'viewpoint': None,
'score': conf,
}
return detection
def _task(ibs, taskid):
return {
'uuid': taskid,
}
def _ensure_general(ibs, models, tag, rowid_from_uuid_func, unpack=True, *args, **kwargs):
if isinstance(models, dict):
models = [models]
length = len(models)
single = length == 1
if length == 0:
return []
rowid_list = []
for index, model in enumerate(models):
try:
if single:
parameter = 'models'
else:
parameter = 'models:%d' % (index,)
assert 'uuid' in model, '%s Model provided is invalid, missing UUID key' % (
tag,
)
uuid_ = uuid.UUID(model['uuid'])
assert uuid_ is not None, "%s Model's UUID is invalid" % (tag,)
rowid = rowid_from_uuid_func(uuid_)
assert rowid is not None, '%s Model is unrecognized, please upload' % (tag,)
except AssertionError as ex:
raise controller_inject.WebInvalidInput(str(ex), parameter)
rowid_list.append(rowid)
if single and unpack:
return rowid_list[0]
else:
return rowid_list
def _ensure_images(ibs, images, *args, **kwargs):
return _ensure_general(
ibs, images, 'Image', ibs.get_image_gids_from_uuid, *args, **kwargs
)
def _ensure_annotations(ibs, annotations, *args, **kwargs):
return _ensure_general(
ibs, annotations, 'Annotation', ibs.get_annot_aids_from_uuid, *args, **kwargs
)
def _ensure_names(ibs, names, *args, **kwargs):
return _ensure_general(
ibs, names, 'Name', ibs.get_name_rowids_from_uuid, *args, **kwargs
)
@register_route(_prefix('swagger'), methods=['GET'])
def microsoft_core_specification_swagger(*args, **kwargs):
r"""
Returns the API specification in the Swagger 2.0 (OpenAPI) JSON format.
The Swagger API specification (https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md) provides a standardized method to export REST API documentation and examples. Our documentation is built on-demand with the help of the Python package flask-swagger (https://github.com/gangverk/flask-swagger).
The API specification includes GET, POST, PUT, and DELETE methods and Model definitions.
---
definitions:
- schema:
id: Image
description: An Image is a semantic construct that represents an uploaded image. Images can be uploaded for later processing or be used immediately for detection. Object detection will create Annotation models, which have a required Image parent. An Image can have multiple detections (i.e. Annotation models).
required:
- uuid
properties:
uuid:
description: a deterministically-derived UUID based on the image pixels, which can be used to identify duplicate Images.
type: string
format: uuid
- schema:
id: Annotation
description: An Annotation is a semantic construct that represents a *committed* detection, with a bounding box and species classification assigned as stored attributes. An Annotation is required to have a parent Image. All bounding box coordinates are relative to the size of the parent Image that was uploaded.
required:
- uuid
- bbox
properties:
uuid:
description: a deterministically-derived UUID based on the parent image's UUID and the bounding box coordinate (xtl, ytl, width, height) and orientation (theta), which can be used to identify duplicate Annotations.
type: string
format: uuid
bbox:
description: a 4-tuple of coordinates that defines a rectangle in the format (x-axis top left corner, y-axis top left corner, width, height) in pixels. These values are expected to be bounded by the size of the parent image.
type: array
theta:
description: a rotation around the center of the annotation, in radians
type: number
species:
description: a user-defined string to specify the species of the annotation (e.g. 'zebra' or 'massai_giraffe'). This value is used to filter matches and run-time models for ID.
type: string
viewpoint:
description: a user-defined string to specify the viewpoint of the annotation (e.g. 'right' or 'front_left'). This value is used to filter matches and run-time models for ID.
type: string
name:
description: the name of the individual
format: uuid
type: string
- schema:
id: Detection
description: A Detection is a semantic constrict that represents an *un-committed* detection. A Detection can be committed to an Annotation to be stored permanently on the parent Image.
required:
- _image
- score
- bbox
- xtl
- ytl
- xbr
- ybr
- width
- height
- theta
- label
properties:
image:
description: The Image that this Detection was found in
$ref: "#/definitions/Image"
score:
description: The detection's classification score
type: integer
format: int32
bbox:
description: The bounding box for this annotation, represented in the format (xtl, ytl, width, height)
type: array
items:
type: number
format: float
xtl:
description: The pixel coordinate for the top-left corner along the x-axis (xtl = x-axis top left) for the bounding box
type: integer
format: int32
ytl:
description: The pixel coordinate for the top-left corner along the y-axis (ytl = y-axis top left) for the bounding box
type: integer
format: int32
xbr:
description: The pixel coordinate for the bottom-right corner along the x-axis (ytl = x-axis bottom right) for the bounding box
type: integer
format: int32
ybr:
description: The pixel coordinate for the bottom-right corner along the y-axis (ytl = y-axis bottom right) for the bounding box
type: integer
format: int32
width:
description: The pixel width for the bounding box
type: integer
format: int32
height:
description: The pixel height for the bounding box
type: integer
format: int32
theta:
description: The rotation of the bounding box around its center, represented in radians
type: number
format: float
species:
description: The semantic species classification (class label) of the bounding box
type: string
viewpoint:
description: The semantic viewpoint classification (class label) of the bounding box
type: string
- schema:
id: Name
description: A Name is the identification label for a group of Annotations
required:
- uuid
- alias
properties:
uuid:
description: a deterministically-derived UUID based on the image pixels, which can be used to identify duplicate Images.
type: string
format: uuid
alias:
description: a string alias for this individual, helpful for user-facing interfaces
type: string
- schema:
id: Task
description: A Task is a semantic construct that represents a background task (i.e. detection) in an asynchronous call. A Task has an optional callback on completion or the status (and result) can be checked via the API
required:
- uuid
properties:
uuid:
description: a random UUID to identify a given asynchronous call, used to check status and results of a background task
type: string
format: uuid
produces:
- application/json
responses:
200:
description: Returns the Swagger 2.0 JSON format
"""
try:
swag = swagger(current_app)
except Exception:
logger.info(str(traceback.format_exc()))
# ut.embed()
swag['info']['title'] = 'Wild Me - IA (Image Analysis)'
swag['info'][
'description'
] = 'Documentation for all classification, detection, and identification calls provided by Wild Me for the AI for Earth (AI4E) collaboration'
swag['info']['version'] = 'v0.1'
swag['info']['contact'] = {
'name': '<NAME> (AI4E)',
'url': 'http://wildme.org',
'email': '<EMAIL>',
}
swag['info']['license'] = {
'name': 'Apache 2.0',
'url': 'http://www.apache.org/licenses/LICENSE-2.0.html',
}
swag['host'] = 'demo.wildbook.org:5010'
swag['schemes'] = [
'http',
]
# swag['basePath'] = PREFIX
# "securityDefinitions": {
# "apiKeyHeader": {
# "type": "apiKey",
# "name": "Ocp-Apim-Subscription-Key",
# "in": "header"
# },
# "apiKeyQuery": {
# "type": "apiKey",
# "name": "subscription-key",
# "in": "query"
# }
# },
# "security": [
# {
# "apiKeyHeader": []
# },
# {
# "apiKeyQuery": []
# }
# ],
response = jsonify(swag)
return response
@register_api(_prefix('status'), methods=['GET'], __api_plural_check__=False)
def microsoft_core_status(ibs, *args, **kwargs):
r"""
Returns the health status of the API back-end; optionally can be used as a service heatbeat.
---
produces:
- application/json
responses:
200:
description: Returns the status of the server
schema:
type: object
properties:
status:
type: string
enum:
- healthy
- warning
- critical
examples:
application/json:
| |
# MIT License
#
# Copyright (c) 2020 <NAME> (Talendar)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ==============================================================================
""" Implements the genome and its main operations.
A genome is a collection of genes that encode a neural network (the genome's
phenotype). In this implementation, there is no distinction between a genome and
the network it encodes. In NEAT, the genome is the entity subject to evolution.
"""
import logging
import os
from typing import Any, cast, Dict, List, Optional, Sequence, Tuple
import numpy as np
np.warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) \
# pylint: disable=wrong-import-position
from tensorflow import reshape
import nevopy as ne
_logger = logging.getLogger(__name__)
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide"
class NeatGenome(ne.base_genome.BaseGenome):
""" Linear representation of a neural network's connectivity.
In the context of NEAT, a genome is a collection of genes that encode a
neural network (the genome's phenotype). In this implementation, there is no
distinction between a genome and the network it encodes. A genome processes
inputs based on its nodes and connections in order to produce an output,
emulating a neural network.
Note:
The instances of this class are the entities subject to evolution by the
NEAT algorithm.
Note:
The encoded networks are Graph Neural Networks (GNNs), connectionist
models that capture the dependence of graphs via message passing between
the nodes of graphs.
Note:
When declaring a subclass of this class, you should always override the
methods :meth:`.simple_copy()`, :meth:`deep_copy()` and
:meth:`random_copy()`, so that they return an instance of your subclass
and not of :class:`.NeatGenome`. It's recommended (although optional) to
also override the methods :meth:`.distance()` and :meth:`.mate()`.
Args:
num_inputs (int): Number of input nodes in the network.
num_outputs (int): Number of output nodes in the network.
config (NeatConfig): Settings of the current evolution session.
initial_connections (bool): If True, connections between the input nodes
and the output nodes of the network will be created.
Attributes:
species_id (int): Indicates the species to which the genome belongs.
fitness (float): The last calculated fitness of the genome.
adj_fitness (float): The last calculated adjusted fitness of the genome.
hidden_nodes (:obj:`list` of :obj:`.NodeGene`): List with all the node
genes of the type :attr:`.NodeGene.Type.HIDDEN` in the genome.
connections (:obj:`list` of :obj:`.ConnectionGene`): List with all the
connection genes in the genome.
_existing_connections_dict (Dict[int, Set]): Used as a fast lookup table
to consult existing connections in the network. Given a node N, it
maps N's ID to the IDs of all the nodes that have a connection with
N as the source.
"""
def __init__(self,
num_inputs: int,
num_outputs: int,
config: "ne.neat.config.NeatConfig",
initial_connections: bool = True) -> None:
super().__init__()
self._config = config
self.species_id = None # type: Optional[int]
self._activated_nodes = None # type: Optional[Dict[int, bool]]
self.fitness = 0.0
self.adj_fitness = 0.0
self.input_nodes = [] # type: List["ne.neat.NodeGene"]
self.hidden_nodes = [] # type: List["ne.neat.NodeGene"]
self.output_nodes = [] # type: List["ne.neat.NodeGene"]
self.bias_node = None # type: Optional["ne.neat.NodeGene"]
self.connections = [] # type: List["ne.neat.ConnectionGene"]
self._existing_connections_dict = {} \
# type: Dict[int, Dict[int, "ne.neat.ConnectionGene"]]
self._output_activation = self.config.out_nodes_activation
self._hidden_activation = self.config.hidden_nodes_activation
# init input nodes
node_counter = 0
for _ in range(num_inputs):
self.input_nodes.append(
ne.neat.NodeGene(
node_id=node_counter,
node_type=ne.neat.NodeGene.Type.INPUT,
activation_func=ne.activations.linear,
initial_activation=self.config.initial_node_activation)
)
node_counter += 1
# init bias node
if self.config.bias_value is not None:
self.bias_node = ne.neat.NodeGene(
node_id=node_counter,
node_type=ne.neat.NodeGene.Type.BIAS,
activation_func=ne.activations.linear,
initial_activation=self.config.bias_value,
)
node_counter += 1
# init output nodes
connection_counter = 0
for _ in range(num_outputs):
out_node = ne.neat.NodeGene(
node_id=node_counter,
node_type=ne.neat.NodeGene.Type.OUTPUT,
activation_func=self._output_activation,
initial_activation=self.config.initial_node_activation,
)
self.output_nodes.append(out_node)
node_counter += 1
# connecting all input nodes to all output nodes
if initial_connections:
for in_node in self.input_nodes:
connection_counter += 1
self.add_connection(connection_counter, in_node, out_node)
@property
def input_shape(self) -> int:
""" Number of input nodes in the genome. """
return len(self.input_nodes)
@property
def output_shape(self) -> int:
""" Number of output nodes in the genome. """
return len(self.output_nodes)
@property
def config(self) -> Any:
return self._config
@config.setter
def config(self, c) -> None:
self._config = c
def reset_activations(self) -> None:
""" Resets cached activations of the genome's nodes.
It restores the current activation value of all the nodes in the network
to their initial value.
"""
self._activated_nodes = None
for n in self.nodes():
n.reset_activation()
def reset(self) -> None:
""" Wrapper for :meth:`.reset_activations`. """
self.reset_activations()
def distance(self, other: "NeatGenome") -> float:
""" Calculates the distance between two genomes.
The shorter the distance between two genomes, the greater the similarity
between them is. In the context of NEAT, the similarity between genomes
increases as:
1) the number of matching connection genes increases;
2) the absolute difference between the matching connections weights
decreases;
The distance between genomes is used for speciation and for sexual
reproduction (mating).
The formula used is shown below. It's the same as the one presented in
the original NEAT paper :cite:`stanley:ec02`. All the coefficients are
configurable.
.. math::
\\delta = c_1 \\cdot \\frac{E}{N} + c_2 \\cdot \\frac{D}{N} \\
+ c_3 \\cdot W
:label: neat_genome_distance
Args:
other (NeatGenome): The other genome (an instance of
:class:`.NeatGenome` or one of its subclasses).
Returns:
The distance between the genomes.
"""
genes = ne.neat.align_connections(self.connections, other.connections)
excess = disjoint = num_matches = 0
weight_diff = 0.0
g1_max_innov = np.amax([c.id for c in self.connections])
g2_max_innov = np.amax([c.id for c in other.connections])
for cn1, cn2 in zip(*genes):
# non-matching genes:
if cn1 is None or cn2 is None:
# if c1 is None, c2 can't be None (and vice-versa)
# noinspection PyUnresolvedReferences
if ((cn1 is None and cn2.id > g1_max_innov)
or (cn2 is None and cn1.id > g2_max_innov)):
excess += 1
else:
disjoint += 1
# matching genes:
else:
num_matches += 1
weight_diff += abs(cn1.weight - cn2.weight)
c1 = self.config.excess_genes_coefficient
c2 = self.config.disjoint_genes_coefficient
c3 = self.config.weight_difference_coefficient
n = max(len(self.connections), len(other.connections))
return (((c1 * excess + c2 * disjoint) / n)
+ c3 * weight_diff / num_matches)
def connection_exists(self, src_id: int, dest_id: int) -> bool:
""" Checks whether a connection between the given nodes exists.
Args:
src_id (int): ID of the connection's source node.
dest_id (int): ID of the connection's destination node.
Returns:
`True` if the specified connection exists in the genome's network
and `False` otherwise.
"""
try:
return dest_id in self._existing_connections_dict[src_id]
except KeyError:
return False
def add_connection(self,
cid: int,
src_node: "ne.neat.genes.NodeGene",
dest_node: "ne.neat.genes.NodeGene",
enabled: bool = True,
weight: Optional[float] = None) -> None:
""" Adds a new connection gene to the genome.
Args:
cid (int): ID of the connection. It's used as a historical marker
of the connection's creation, acting as an "innovation number".
src_node (NodeGene): Node from where the connection leaves (source
node).
dest_node (NodeGene): Node to where the connection is headed
(destination node).
enabled (bool): Whether the new connection should be enabled or not.
weight (Optional[float]): The weight of the connection. If `None`, a
random value (within the interval specified in the settings)
will be chosen.
Raises:
ConnectionExistsError: If the connection `src_node->dest_node`
already exists in the genome.
ConnectionToBiasNodeError: If `dest_node` is an input or bias node
(nodes of these types do not process inputs!).
"""
if self.connection_exists(src_node.id, dest_node.id):
raise ConnectionExistsError(
f"Attempt to create an already existing connection "
f"({src_node.id}->{dest_node.id}).")
if (dest_node.type == ne.neat.NodeGene.Type.BIAS
or dest_node.type == ne.neat.NodeGene.Type.INPUT):
raise ConnectionToBiasNodeError(
f"Attempt to create a connection pointing to a | |
'')
elif 'access_token' in request.headers:
token = request.headers['access_token']
if secret and token:
# ENV variables sometimes don't pass newlines, spec says white space
# doesn't matter, but pyjwt cares about it, so fix it
secret = secret.replace(' PUBLIC ', '_PLACEHOLDER_')
secret = secret.replace(' ', '\n')
secret = secret.replace('_PLACEHOLDER_', ' PUBLIC ')
try:
r = jwt.decode(token, secret, algorithms='RS256')
#TODO(hikevin): Check scope is valid for InterUSS Platform
uss_id = r['client_id'] if 'client_id' in r else r['sub']
except jwt.ExpiredSignatureError:
log.error('Access token has expired.')
abort(status.HTTP_401_UNAUTHORIZED,
'OAuth access_token is invalid: token has expired.')
except jwt.DecodeError:
log.error('Access token is invalid and cannot be decoded.')
abort(status.HTTP_400_BAD_REQUEST,
'OAuth access_token is invalid: token cannot be decoded.')
else:
log.error('Attempt to access resource without access_token in header.')
abort(status.HTTP_403_FORBIDDEN,
'Valid OAuth access_token must be provided in header.')
return uss_id
def _GetGridCellMetaData(zoom, x, y):
"""Provides an instantaneous snapshot of the metadata for a specific GridCell.
GridCellMetaData provides an instantaneous snapshot of the metadata stored
in a specific GridCell, along with a token to be used when updating.
Args:
zoom: zoom level in slippy tile format
x: x tile number in slippy tile format
y: y tile number in slippy tile format
Returns:
200 with token and JSON metadata,
or the nominal 4xx error codes as necessary.
"""
log.info('Grid cell metadata request instantiated for %sz, %s,%s...', zoom, x,
y)
result = wrapper.get(zoom, x, y)
return result
def _PutGridCellMetaData(zoom, x, y, uss_id):
"""Updates the metadata stored in a specific slippy GridCell.
Updates the metadata stored in a specific GridCell using optimistic locking
behavior. Operation fails if the metadata has been updated since
GET GridCellMetadata was originally called (based on token).
Args:
zoom: zoom level in slippy tile format
x: x tile number in slippy tile format
y: y tile number in slippy tile format
uss_id: the plain text identifier for the USS from OAuth
Plus posted webargs:
sync_token: the token retrieved in the original GET GridCellMetadata,
scope: The submitting USS scope for the web service endpoint (used for OAuth
access),
operation_endpoint: the submitting USS endpoint where all flights in this
cell can be retrieved from,
operation_format: The output format for the USS web service endpoint (i.e.
NASA, GUTMA),
minimum_operation_timestamp: the lower time bound of all of the USSs flights
in this grid cell.
maximum_operation_timestamp: the upper time bound of all of the USSs flights
in this grid cell.
Returns:
200 and a new token if updated successfully,
409 if there is a locking conflict that could not be resolved, or
the other nominal 4xx error codes as necessary.
"""
log.info('Grid cell metadata submit instantiated for %sz, %s,%s...', zoom, x,
y)
sync_token = _GetRequestParameter('sync_token', None)
if not sync_token and 'sync_token' in request.headers:
sync_token = request.headers['sync_token']
scope = _GetRequestParameter('scope', None)
operation_endpoint = _GetRequestParameter('operation_endpoint', None)
operation_format = _GetRequestParameter('operation_format', None)
minimum_operation_timestamp = _GetRequestParameter(
'minimum_operation_timestamp', None)
maximum_operation_timestamp = _GetRequestParameter(
'maximum_operation_timestamp', None)
errorfield = errormsg = None
if not sync_token:
errorfield = 'sync_token'
elif not uss_id:
errorfield = 'uss_id'
errormsg = 'USS identifier not received from OAuth token check.'
elif not scope:
errorfield = 'scope'
elif not operation_endpoint:
errorfield = 'operation_endpoint'
elif not operation_format:
errorfield = 'operation_format'
elif not minimum_operation_timestamp:
errorfield = 'minimum_operation_timestamp'
elif not maximum_operation_timestamp:
errorfield = 'maximum_operation_timestamp'
if errorfield:
if not errormsg:
errormsg = errorfield + (
' must be provided in the form data request to add to a '
'GridCell.')
result = {
'status': 'error',
'code': status.HTTP_400_BAD_REQUEST,
'message': errormsg
}
else:
result = wrapper.set(zoom, x, y, sync_token, uss_id, scope,
operation_format, operation_endpoint,
minimum_operation_timestamp,
maximum_operation_timestamp)
return result
def _DeleteGridCellMetaData(zoom, x, y, uss_id):
"""Removes the USS entry in the metadata stored in a specific GridCell.
Removes the USS entry in the metadata using optimistic locking behavior.
Args:
zoom: zoom level in slippy tile format
x: x tile number in slippy tile format
y: y tile number in slippy tile format
uss_id: the plain text identifier for the USS from OAuth
Returns:
200 and a new sync_token if updated successfully,
409 if there is a locking conflict that could not be resolved, or
the other nominal 4xx error codes as necessary.
"""
log.info('Grid cell metadata delete instantiated for %sz, %s,%s...', zoom, x,
y)
if uss_id:
result = wrapper.delete(zoom, x, y, uss_id)
else:
result = {
'status':
'fail',
'code':
status.HTTP_400_BAD_REQUEST,
'message':
"""uss_id must be provided in the request to
delete a USS from a GridCell."""
}
return result
def _GetGridCellsMetaData(zoom, tiles):
"""Provides an instantaneous snapshot of the metadata for a multiple GridCells
Args:
zoom: zoom level in slippy tile format
tiles: array of x,y tiles to retrieve
Returns:
200 with token and JSON metadata,
or the nominal 4xx error codes as necessary.
"""
log.info('Grid cells metadata request instantiated for %sz, %s...',
zoom, str(tiles))
result = wrapper.get_multi(zoom, tiles)
return result
def _PutGridCellsMetaData(zoom, tiles, uss_id):
"""Updates the metadata stored in multiple GridCells.
Updates the metadata stored in a multiple GridCell using optimistic locking
behavior. Operation fails if the metadata has been updated since
GET GridCellsMetadata was originally called (based on sync_token).
Args:
zoom: zoom level in slippy tile format
tiles: array of x,y tiles to retrieve
uss_id: the plain text identifier for the USS from OAuth
Plus posted webargs:
sync_token: the composite sync_token retrieved in the
original GET GridCellsMetadata,
scope: The submitting USS scope for the web service endpoint (used for OAuth
access),
operation_endpoint: the submitting USS endpoint where all flights in these
cells can be retrieved from (variables {zoom}, {x}, and {y} can be used in
the endpoint, and will be replaced with the actual grid values),
operation_format: The output format for the USS web service endpoint (i.e.
NASA, GUTMA),
minimum_operation_timestamp: the lower time bound of all of the USSs flights
in these grid cells.
maximum_operation_timestamp: the upper time bound of all of the USSs flights
in these grid cells.
Returns:
200 and a new composite token if updated successfully,
409 if there is a locking conflict that could not be resolved, or
the other nominal 4xx error codes as necessary.
"""
log.info('Grid cells metadata submit instantiated for %s at %sz, %s...',
uss_id, zoom, str(tiles))
sync_token = _GetRequestParameter('sync_token', None)
if not sync_token and 'sync_token' in request.headers:
sync_token = request.headers['sync_token']
scope = _GetRequestParameter('scope', None)
operation_endpoint = _GetRequestParameter('operation_endpoint', None)
operation_format = _GetRequestParameter('operation_format', None)
minimum_operation_timestamp = _GetRequestParameter(
'minimum_operation_timestamp', None)
maximum_operation_timestamp = _GetRequestParameter(
'maximum_operation_timestamp', None)
errorfield = errormsg = None
if not sync_token:
errorfield = 'sync_token'
elif not uss_id:
errorfield = 'uss_id'
errormsg = 'USS identifier not received from OAuth token check.'
elif not scope:
errorfield = 'scope'
elif not operation_endpoint:
errorfield = 'operation_endpoint'
elif not operation_format:
errorfield = 'operation_format'
elif not minimum_operation_timestamp:
errorfield = 'minimum_operation_timestamp'
elif not maximum_operation_timestamp:
errorfield = 'maximum_operation_timestamp'
if errorfield:
if not errormsg:
errormsg = errorfield + (
' must be provided in the form data request to add to a '
'GridCell.')
result = {
'status': 'error',
'code': status.HTTP_400_BAD_REQUEST,
'message': errormsg
}
else:
result = wrapper.set_multi(zoom, tiles, sync_token, uss_id, scope,
operation_format, operation_endpoint,
minimum_operation_timestamp,
maximum_operation_timestamp)
return result
def _DeleteGridCellsMetaData(zoom, tiles, uss_id):
"""Removes the USS entry in multiple GridCells.
Args:
zoom: zoom level in slippy tile format
tiles: array of x,y tiles to delete the uss from
uss_id: the plain text identifier for the USS from OAuth
Returns:
200 and a new sync_token if updated successfully,
409 if there is a locking conflict that could not be resolved, or
the other nominal 4xx error codes as necessary.
"""
log.info('Grid cells metadata delete instantiated for %s, %sz, %s...',
uss_id, zoom, str(tiles))
if uss_id:
result = wrapper.delete_multi(zoom, tiles, uss_id)
else:
result = {
'status':
'fail',
'code':
status.HTTP_400_BAD_REQUEST,
'message':
"""uss_id must be provided in the request to
delete a USS from a GridCell."""
}
return result
def _ConvertRequestToTiles(zoom):
"""Converts an CSV of coords into slippy tile format at the specified zoom
and the specified coordinate type (path, polygon, point) """
tiles = []
coords = _GetRequestParameter('coords', '')
coord_type = _GetRequestParameter('coord_type', 'point').lower()
log.debug('Retrieved coords from web params and split to %s...', coords)
coordinates = slippy_util.convert_csv_to_coordinates(coords)
| |
= 2**len(qubits) # e.g., 00, 01 ,10 and 11 in the case of 2q
# nr_shots = nr_shots_per_case*nr_cases
# off and on, not including post selection init measurements yet
nr_cases = 2 ** len(qubits) # e.g., 00, 01 ,10 and 11 in the case of 2q
if initialize:
nr_shots = 2 * nr_shots_per_case * nr_cases
else:
nr_shots = nr_shots_per_case * nr_cases
self.ro_acq_digitized(False)
if prepare_for_timedomain:
self.prepare_for_timedomain(qubits, bypass_flux=True)
if MC is None:
MC = self.instr_MC.get_instr()
qubit_idxs = [self.find_instrument(qn).cfg_qubit_nr() for qn in qubits]
p = mqo.multi_qubit_off_on(
qubit_idxs,
initialize=initialize,
second_excited_state=False,
platf_cfg=self.cfg_openql_platform_fn(),
)
s = swf.OpenQL_Sweep(openql_program=p, CCL=self.instr_CC.get_instr())
# right is LSQ
d = self.get_int_logging_detector(
qubits, result_logging_mode=result_logging_mode
)
# This assumes qubit names do not contain spaces
det_qubits = [v.split()[-1] for v in d.value_names]
if (qubits != det_qubits) and (self.ro_acq_weight_type() == 'optimal'):
# this occurs because the detector groups qubits per feedline.
# If you do not pay attention, this will mess up the analysis of
# this experiment.
raise ValueError('Detector qubits do not match order specified.{} vs {}'.format(qubits, det_qubits))
shots_per_meas = int(
np.floor(np.min([shots_per_meas, nr_shots]) / nr_cases) * nr_cases
)
d.set_child_attr("nr_shots", shots_per_meas)
old_soft_avg = MC.soft_avg()
old_live_plot_enabled = MC.live_plot_enabled()
MC.soft_avg(1)
MC.live_plot_enabled(False)
MC.set_sweep_function(s)
MC.set_sweep_points(np.arange(nr_shots))
MC.set_detector_function(d)
MC.run("{}_{}_{}".format(label, qubits, self.msmt_suffix))
MC.soft_avg(old_soft_avg)
MC.live_plot_enabled(old_live_plot_enabled)
if analyze:
if initialize:
thresholds = [
self.find_instrument(qubit).ro_acq_threshold()
for qubit in qubits]
a = ma2.Multiplexed_Readout_Analysis(
label=label,
nr_qubits=len(qubits),
post_selection=True,
post_selec_thresholds=thresholds)
# Print fraction of discarded shots
# Dict = a.proc_data_dict['Post_selected_shots']
# key = next(iter(Dict))
# fraction=0
# for comb in Dict[key].keys():
# fraction += len(Dict[key][comb])/(2**12 * 4)
# print('Fraction of discarded results was {:.2f}'.format(1-fraction))
else:
a = ma2.Multiplexed_Readout_Analysis(
label=label,
nr_qubits=len(qubits))
# Set thresholds
for i, qubit in enumerate(qubits):
label = a.Channels[i]
threshold = a.qoi[label]['threshold_raw']
self.find_instrument(qubit).ro_acq_threshold(threshold)
return
def measure_ssro_single_qubit(
self,
qubits: list,
q_target: str,
nr_shots: int = 2 ** 13, # 8192
prepare_for_timedomain: bool = True,
second_excited_state: bool = False,
result_logging_mode='raw',
initialize: bool = False,
analyze=True,
shots_per_meas: int = 2 ** 16,
nr_flux_dance: int = None,
wait_time: float = None,
label='Mux_SSRO',
MC=None):
'''
Performs MUX single shot readout experiments of all possible
combinations of prepared states of <qubits>. Outputs analysis
of a single qubit <q_target>. This function is meant to
assess a particular qubit readout in the multiplexed context.
Args:
qubits: List of qubits adressed in the mux readout.
q_target: Qubit targeted in the analysis.
nr_shots: number of shots for each prepared state of
q_target. That is the experiment will include
<nr_shots> shots of the qubit prepared in the ground state
and <nr_shots> shots of the qubit prepared in the excited
state. The remaining qubits will be prepared such that the
experiment goes through all 2**n possible combinations of
computational states.
initialize: Include measurement post-selection by
initialization.
'''
log.info('{}.measure_ssro_multi_qubit for qubits{}'.format(
self.name, qubits))
# off and on, not including post selection init measurements yet
nr_cases = 2 ** len(qubits) # e.g., 00, 01 ,10 and 11 in the case of 2q
if second_excited_state:
nr_cases = 3 ** len(qubits)
if initialize == True:
nr_shots = 4 * nr_shots
else:
nr_shots = 2 * nr_shots
if prepare_for_timedomain:
self.prepare_for_timedomain(qubits)
if MC is None:
MC = self.instr_MC.get_instr()
qubit_idxs = [self.find_instrument(qn).cfg_qubit_nr()
for qn in qubits]
p = mqo.multi_qubit_off_on(qubit_idxs,
initialize=initialize,
nr_flux_dance=nr_flux_dance,
wait_time=wait_time,
second_excited_state=second_excited_state,
platf_cfg=self.cfg_openql_platform_fn())
s = swf.OpenQL_Sweep(openql_program=p,
CCL=self.instr_CC.get_instr())
# right is LSQ
d = self.get_int_logging_detector(qubits,
result_logging_mode=result_logging_mode)
# This assumes qubit names do not contain spaces
det_qubits = [v.split()[-1] for v in d.value_names]
if (qubits != det_qubits) and (self.ro_acq_weight_type() == 'optimal'):
# this occurs because the detector groups qubits per feedline.
# If you do not pay attention, this will mess up the analysis of
# this experiment.
raise ValueError('Detector qubits do not match order specified.{} vs {}'.format(qubits, det_qubits))
shots_per_meas = int(np.floor(
np.min([shots_per_meas, nr_shots]) / nr_cases) * nr_cases)
d.set_child_attr('nr_shots', shots_per_meas)
old_soft_avg = MC.soft_avg()
old_live_plot_enabled = MC.live_plot_enabled()
MC.soft_avg(1)
MC.live_plot_enabled(False)
MC.set_sweep_function(s)
MC.set_sweep_points(np.arange(nr_shots))
MC.set_detector_function(d)
MC.run('{}_{}_{}'.format(label, q_target, self.msmt_suffix))
MC.soft_avg(old_soft_avg)
MC.live_plot_enabled(old_live_plot_enabled)
if analyze:
if initialize == True:
thresholds = [self.find_instrument(qubit).ro_acq_threshold() \
for qubit in qubits]
a = ma2.Multiplexed_Readout_Analysis(label=label,
nr_qubits=len(qubits),
q_target=q_target,
post_selection=True,
post_selec_thresholds=thresholds)
# Print fraction of discarded shots
# Dict = a.proc_data_dict['Post_selected_shots']
# key = next(iter(Dict))
# fraction=0
# for comb in Dict[key].keys():
# fraction += len(Dict[key][comb])/(2**12 * 4)
# print('Fraction of discarded results was {:.2f}'.format(1-fraction))
else:
a = ma2.Multiplexed_Readout_Analysis(label=label,
nr_qubits=len(qubits),
q_target=q_target)
q_ch = [ch for ch in a.Channels if q_target in ch.decode()][0]
# Set thresholds
for i, qubit in enumerate(qubits):
label = a.raw_data_dict['value_names'][i]
threshold = a.qoi[label]['threshold_raw']
self.find_instrument(qubit).ro_acq_threshold(threshold)
return a.qoi[q_ch]
def measure_transients(self,
qubits: list,
q_target: str,
cases: list = ['off', 'on'],
MC=None,
prepare_for_timedomain: bool = True,
analyze: bool = True):
'''
Documentation.
'''
if q_target not in qubits:
raise ValueError("q_target must be included in qubits.")
# Ensure all qubits use same acquisition instrument
instruments = [self.find_instrument(q).instr_acquisition() for q in qubits]
if instruments[1:] != instruments[:-1]:
raise ValueError("All qubits must have common acquisition instrument")
qubits_nr = [self.find_instrument(q).cfg_qubit_nr() for q in qubits]
q_target_nr = self.find_instrument(q_target).cfg_qubit_nr()
if MC is None:
MC = self.instr_MC.get_instr()
if prepare_for_timedomain:
self.prepare_for_timedomain(qubits)
p = mqo.targeted_off_on(
qubits=qubits_nr,
q_target=q_target_nr,
pulse_comb='on',
platf_cfg=self.cfg_openql_platform_fn()
)
analysis = [None for case in cases]
for i, pulse_comb in enumerate(cases):
if 'off' in pulse_comb.lower():
self.find_instrument(q_target).instr_LO_mw.get_instr().off()
elif 'on' in pulse_comb.lower():
self.find_instrument(q_target).instr_LO_mw.get_instr().on()
else:
raise ValueError(
"pulse_comb {} not understood: Only 'on' and 'off' allowed.".
format(pulse_comb))
s = swf.OpenQL_Sweep(openql_program=p,
parameter_name='Transient time', unit='s',
CCL=self.instr_CC.get_instr())
if 'UHFQC' in instruments[0]:
sampling_rate = 1.8e9
else:
raise NotImplementedError()
nr_samples = self.ro_acq_integration_length() * sampling_rate
d = det.UHFQC_input_average_detector(
UHFQC=self.find_instrument(instruments[0]),
AWG=self.instr_CC.get_instr(),
nr_averages=self.ro_acq_averages(),
nr_samples=int(nr_samples))
MC.set_sweep_function(s)
MC.set_sweep_points(np.arange(nr_samples) / sampling_rate)
MC.set_detector_function(d)
MC.run('Mux_transients_{}_{}_{}'.format(q_target, pulse_comb,
self.msmt_suffix))
if analyze:
analysis[i] = ma2.Multiplexed_Transient_Analysis(
q_target='{}_{}'.format(q_target, pulse_comb))
return analysis
def measure_msmt_induced_dephasing_matrix(self, qubits: list,
analyze=True, MC=None,
prepare_for_timedomain=True,
amps_rel=np.linspace(0, 1, 11),
verbose=True,
get_quantum_eff: bool = False,
dephasing_sequence='ramsey',
selected_target=None,
selected_measured=None,
target_qubit_excited=False,
extra_echo=False,
echo_delay=0e-9):
"""
Measures the msmt induced dephasing for readout the readout of qubits
i on qubit j. Additionally measures the SNR as a function of amplitude
for the diagonal elements to obtain the quantum efficiency.
In order to use this: make sure that
- all readout_and_depletion pulses are of equal total length
- the cc light to has the readout time configured equal to the
measurement and depletion time + 60 ns buffer
FIXME: not sure if the weight function assignment is working correctly.
the qubit objects will use SSB for the dephasing measurements.
"""
lpatt = "_trgt_{TQ}_measured_{RQ}"
if prepare_for_timedomain:
# for q in qubits:
# q.prepare_for_timedomain()
self.prepare_for_timedomain(qubits=qubits)
# Save old qubit suffixes
old_suffixes = [self.find_instrument(q).msmt_suffix for q in qubits]
old_suffix = self.msmt_suffix
# Save the start-time of the experiment for analysis
start = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
# Loop over all target and measurement qubits
target_qubits = [self.find_instrument(q) for q in qubits]
measured_qubits = [self.find_instrument(q) for q in qubits]
if selected_target != None:
target_qubits = [target_qubits[selected_target]]
if selected_measured != None:
measured_qubits = [measured_qubits[selected_measured]]
for target_qubit in target_qubits:
for measured_qubit in measured_qubits:
# Set measurement label suffix
s = lpatt.replace("{TQ}", target_qubit.name)
s = s.replace("{RQ}", measured_qubit.name)
measured_qubit.msmt_suffix = s
target_qubit.msmt_suffix = s
# Print label
if verbose:
print(s)
# Slight differences if diagonal element
if target_qubit == measured_qubit:
amps_rel = amps_rel
mqp = None
list_target_qubits = None
else:
# t_amp_max = max(target_qubit.ro_pulse_down_amp0(),
# target_qubit.ro_pulse_down_amp1(),
# target_qubit.ro_pulse_amp())
# amp_max = max(t_amp_max, measured_qubit.ro_pulse_amp())
# amps_rel = np.linspace(0, 0.49/(amp_max), n_amps_rel)
amps_rel = amps_rel
mqp = self.cfg_openql_platform_fn()
list_target_qubits = [
target_qubit,
]
# If a diagonal element, consider doing the full quantum
# efficiency matrix.
if target_qubit == measured_qubit and get_quantum_eff:
res = measured_qubit.measure_quantum_efficiency(
verbose=verbose,
amps_rel=amps_rel,
dephasing_sequence=dephasing_sequence,
)
else:
res = measured_qubit.measure_msmt_induced_dephasing_sweeping_amps(
verbose=verbose,
amps_rel=amps_rel,
cross_target_qubits=list_target_qubits,
multi_qubit_platf_cfg=mqp,
analyze=True,
sequence=dephasing_sequence,
target_qubit_excited=target_qubit_excited,
extra_echo=extra_echo,
# buffer_time=buffer_time
)
# Print the result of the measurement
if verbose:
print(res)
# Save the end-time of the experiment
stop = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
# reset the msmt_suffix'es
for qi, q in enumerate(qubits):
self.find_instrument(q).msmt_suffix = old_suffixes[qi]
self.msmt_suffix = old_suffix
# Run the analysis for this experiment
if analyze:
options_dict = {
"verbose": True,
}
qarr = qubits
labelpatt = 'ro_amp_sweep_dephasing'+lpatt
ca = ma2.CrossDephasingAnalysis(t_start=start, t_stop=stop,
label_pattern=labelpatt,
qubit_labels=qarr,
options_dict=options_dict)
def measure_chevron(
self,
q0: str,
q_spec: str,
q_parks=None,
amps=np.arange(0, 1, 0.05),
lengths=np.arange(5e-9, 51e-9, 5e-9),
adaptive_sampling=False,
adaptive_sampling_pts=None,
adaptive_pars: dict = None,
prepare_for_timedomain=True,
MC=None,
freq_tone=6e9,
pow_tone=-10,
spec_tone=False,
target_qubit_sequence: str = "ramsey",
waveform_name="square",
recover_q_spec: bool = | |
= sys.exc_info()
print "ERROR checkMergeGenerationStatus : %s %s" % (type,value)
return EC_Failed,None
# get full job status
def getFullJobStatus(ids,verbose):
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getFullJobStatus'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR getFullJobStatus : %s %s" % (type,value)
return EC_Failed,None
# get slimmed file info
def getSlimmedFileInfoPandaIDs(ids,verbose):
# serialize
strIDs = pickle.dumps(ids)
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getSlimmedFileInfoPandaIDs'
data = {'ids':strIDs}
status,output = curl.post(url,data)
try:
return status,pickle.loads(output)
except:
type, value, traceBack = sys.exc_info()
print "ERROR getSlimmedFileInfoPandaIDs : %s %s" % (type,value)
return EC_Failed,None
# get input files currently in used for analysis
def getFilesInUseForAnal(outDataset,verbose):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getDisInUseForAnal'
data = {'outDataset':outDataset}
status,output = curl.post(url,data)
try:
inputDisList = pickle.loads(output)
# failed
if inputDisList == None:
print "ERROR getFilesInUseForAnal : failed to get shadow dis list from the panda server"
sys.exit(EC_Failed)
# split to small chunks to avoid timeout
retLFNs = []
nDis = 3
iDis = 0
while iDis < len(inputDisList):
# serialize
strInputDisList = pickle.dumps(inputDisList[iDis:iDis+nDis])
# get LFNs
url = baseURLSSL + '/getLFNsInUseForAnal'
data = {'inputDisList':strInputDisList}
status,output = curl.post(url,data)
tmpLFNs = pickle.loads(output)
if tmpLFNs == None:
print "ERROR getFilesInUseForAnal : failed to get LFNs in shadow dis from the panda server"
sys.exit(EC_Failed)
retLFNs += tmpLFNs
iDis += nDis
time.sleep(1)
return retLFNs
except:
type, value, traceBack = sys.exc_info()
print "ERROR getFilesInUseForAnal : %s %s" % (type,value)
sys.exit(EC_Failed)
# set debug mode
def setDebugMode(pandaID,modeOn,verbose):
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/setDebugMode'
data = {'pandaID':pandaID,'modeOn':modeOn}
status,output = curl.post(url,data)
try:
return status,output
except:
type, value = sys.exc_info()[:2]
errStr = "setDebugMode failed with %s %s" % (type,value)
return EC_Failed,errStr
# set tmp dir
def setGlobalTmpDir(tmpDir):
global globalTmpDir
globalTmpDir = tmpDir
# exclude site
def excludeSite(excludedSiteList,origFullExecString='',infoList=[]):
if excludedSiteList == []:
return
# decompose
excludedSite = []
for tmpItemList in excludedSiteList:
for tmpItem in tmpItemList.split(','):
if tmpItem != '' and not tmpItem in excludedSite:
excludedSite.append(tmpItem)
# get list of original excludedSites
origExcludedSite = []
if origFullExecString != '':
# extract original excludedSite
origFullExecString = urllib.unquote(origFullExecString)
matchItr = re.finditer('--excludedSite\s*=*([^ "]+)',origFullExecString)
for match in matchItr:
origExcludedSite += match.group(1).split(',')
else:
# use excludedSite since this is the first loop
origExcludedSite = excludedSite
# remove empty
if '' in origExcludedSite:
origExcludedSite.remove('')
# sites composed of long/short queues
compSites = ['CERN','LYON','BNL']
# remove sites
global PandaSites
for tmpPatt in excludedSite:
# skip empty
if tmpPatt == '':
continue
# check if the user sepcified
userSpecified = False
if tmpPatt in origExcludedSite:
userSpecified = True
# check if it is a composite
for tmpComp in compSites:
if tmpComp in tmpPatt:
# use generic ID to remove all queues
tmpPatt = tmpComp
break
sites = PandaSites.keys()
for site in sites:
# look for pattern
if tmpPatt in site:
try:
# add brokerage info
if userSpecified and PandaSites[site]['status'] == 'online' and not isExcudedSite(site):
msgBody = 'action=exclude site=%s reason=useroption - excluded by user' % site
if not msgBody in infoList:
infoList.append(msgBody)
PandaSites[site]['status'] = 'excluded'
else:
# already used by previous submission cycles
PandaSites[site]['status'] = 'panda_excluded'
except:
pass
# use certain sites
def useCertainSites(sitePat):
if re.search(',',sitePat) == None:
return sitePat,[]
# remove sites
global PandaSites
sites = PandaSites.keys()
cloudsForRandom = []
for site in sites:
# look for pattern
useThisSite = False
for tmpPatt in sitePat.split(','):
if tmpPatt in site:
useThisSite = True
break
# delete
if not useThisSite:
PandaSites[site]['status'] = 'skip'
else:
if not PandaSites[site]['cloud'] in cloudsForRandom:
cloudsForRandom.append(PandaSites[site]['cloud'])
# return
return 'AUTO',cloudsForRandom
# get client version
def getPandaClientVer(verbose):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# execute
url = baseURL + '/getPandaClientVer'
status,output = curl.get(url,{})
# failed
if status != 0:
return status,output
# check format
if re.search('^\d+\.\d+\.\d+$',output) == None:
return EC_Failed,"invalid version '%s'" % output
# return
return status,output
# get list of cache prefix
def getCachePrefixes(verbose):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# execute
url = baseURL + '/getCachePrefixes'
status,output = curl.get(url,{})
# failed
if status != 0:
print output
errStr = "cannot get the list of Athena projects"
tmpLog.error(errStr)
sys.exit(EC_Failed)
# return
try:
tmpList = pickle.loads(output)
tmpList.append('AthAnalysisBase')
return tmpList
except:
print output
errType,errValue = sys.exc_info()[:2]
print "ERROR: getCachePrefixes : %s %s" % (errType,errValue)
sys.exit(EC_Failed)
# get list of cmtConfig
def getCmtConfigList(athenaVer,verbose):
# instantiate curl
curl = _Curl()
curl.verbose = verbose
# execute
url = baseURL + '/getCmtConfigList'
data = {}
data['relaseVer'] = athenaVer
status,output = curl.get(url,data)
# failed
if status != 0:
print output
errStr = "cannot get the list of cmtconfig for %s" % athenaVer
tmpLog.error(errStr)
sys.exit(EC_Failed)
# return
try:
return pickle.loads(output)
except:
print output
errType,errValue = sys.exc_info()[:2]
print "ERROR: getCmtConfigList : %s %s" % (errType,errValue)
sys.exit(EC_Failed)
# get files in dataset with filte
def getFilesInDatasetWithFilter(inDS,filter,shadowList,inputFileListName,verbose,dsStringFlag=False,isRecursive=False,
antiFilter='',notSkipLog=False):
# get logger
tmpLog = PLogger.getPandaLogger()
# query files in dataset
if not isRecursive or verbose:
tmpLog.info("query files in %s" % inDS)
if dsStringFlag:
inputFileMap,inputDsString = queryFilesInDataset(inDS,verbose,getDsString=True)
else:
inputFileMap = queryFilesInDataset(inDS,verbose)
# read list of files to be used
filesToBeUsed = []
if inputFileListName != '':
rFile = open(inputFileListName)
for line in rFile:
line = re.sub('\n','',line)
line = line.strip()
if line != '':
filesToBeUsed.append(line)
rFile.close()
# get list of filters
filters = []
if filter != '':
filters = filter.split(',')
antifilters = []
if antiFilter != '':
antifilters = antiFilter.split(',')
# remove redundant files
tmpKeys = inputFileMap.keys()
filesPassFilter = []
for tmpLFN in tmpKeys:
# remove log
if not notSkipLog:
if re.search('\.log(\.tgz)*(\.\d+)*$',tmpLFN) != None or \
re.search('\.log(\.\d+)*(\.tgz)*$',tmpLFN) != None:
del inputFileMap[tmpLFN]
continue
# filename matching
if filter != '':
matchFlag = False
for tmpFilter in filters:
if re.search(tmpFilter,tmpLFN) != None:
matchFlag = True
break
if not matchFlag:
del inputFileMap[tmpLFN]
continue
# anti matching
if antiFilter != '':
antiMatchFlag = False
for tmpFilter in antifilters:
if re.search(tmpFilter,tmpLFN) != None:
antiMatchFlag = True
break
if antiMatchFlag:
del inputFileMap[tmpLFN]
continue
# files to be used
if filesToBeUsed != []:
# check matching
matchFlag = False
for pattern in filesToBeUsed:
# normal matching
if pattern == tmpLFN:
matchFlag =True
break
# doesn't match
if not matchFlag:
del inputFileMap[tmpLFN]
continue
# files which pass the matching filters
filesPassFilter.append(tmpLFN)
# files in shadow
if tmpLFN in shadowList:
if inputFileMap.has_key(tmpLFN):
del inputFileMap[tmpLFN]
continue
# no files in filelist are available
if inputFileMap == {} and (filter != '' or antiFilter != '' or inputFileListName != '') and filesPassFilter == []:
if inputFileListName != '':
errStr = "Files specified in %s are unavailable in %s. " % (inputFileListName,inDS)
elif filter != '':
errStr = "Files matching with %s are unavailable in %s. " % (filters,inDS)
else:
errStr = "Files unmatching with %s are unavailable in %s. " % (antifilters,inDS)
errStr += "Make sure that you specify correct file names or matching patterns"
tmpLog.error(errStr)
sys.exit(EC_Failed)
# return
if dsStringFlag:
return inputFileMap,inputDsString
return inputFileMap
# check if DQ2-free site
def isDQ2free(site):
if PandaSites.has_key(site) and PandaSites[site]['ddm'] == 'local':
return True
return False
# check queued analysis jobs at a site
def checkQueuedAnalJobs(site,verbose=False):
# get logger
tmpLog = PLogger.getPandaLogger()
# instantiate curl
curl = _Curl()
curl.sslCert = _x509()
curl.sslKey = _x509()
curl.verbose = verbose
# execute
url = baseURLSSL + '/getQueuedAnalJobs'
data = {'site':site}
status,output = curl.post(url,data)
try:
# get queued analysis
queuedMap = pickle.loads(output)
if queuedMap.has_key('running') and queuedMap.has_key('queued'):
if queuedMap['running'] > 20 and queuedMap['queued'] > 2 * queuedMap['running']:
warStr = 'Your job might be delayed since %s is busy. ' % site
warStr += 'There are %s jobs already queued by other users while %s jobs are running. ' \
% (queuedMap['queued'],queuedMap['running'])
warStr += 'Please consider replicating the input dataset to a free site '
warStr += 'or avoiding the | |
21168 * uk_138
+ 1500282 * uk_139
+ 8949717 * uk_14
+ 1722546 * uk_140
+ 333396 * uk_141
+ 1977738 * uk_142
+ 382788 * uk_143
+ 74088 * uk_144
+ 1728 * uk_145
+ 27216 * uk_146
+ 31248 * uk_147
+ 6048 * uk_148
+ 428652 * uk_149
+ 10275601 * uk_15
+ 492156 * uk_150
+ 95256 * uk_151
+ 565068 * uk_152
+ 109368 * uk_153
+ 21168 * uk_154
+ 6751269 * uk_155
+ 7751457 * uk_156
+ 1500282 * uk_157
+ 8899821 * uk_158
+ 1722546 * uk_159
+ 1988826 * uk_16
+ 333396 * uk_160
+ 10218313 * uk_161
+ 1977738 * uk_162
+ 382788 * uk_163
+ 74088 * uk_164
+ 3969 * uk_17
+ 6489 * uk_18
+ 2646 * uk_19
+ 63 * uk_2
+ 756 * uk_20
+ 11907 * uk_21
+ 13671 * uk_22
+ 2646 * uk_23
+ 10609 * uk_24
+ 4326 * uk_25
+ 1236 * uk_26
+ 19467 * uk_27
+ 22351 * uk_28
+ 4326 * uk_29
+ 103 * uk_3
+ 1764 * uk_30
+ 504 * uk_31
+ 7938 * uk_32
+ 9114 * uk_33
+ 1764 * uk_34
+ 144 * uk_35
+ 2268 * uk_36
+ 2604 * uk_37
+ 504 * uk_38
+ 35721 * uk_39
+ 42 * uk_4
+ 41013 * uk_40
+ 7938 * uk_41
+ 47089 * uk_42
+ 9114 * uk_43
+ 1764 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 230957580727 * uk_47
+ 94176877578 * uk_48
+ 26907679308 * uk_49
+ 12 * uk_5
+ 423795949101 * uk_50
+ 486580534153 * uk_51
+ 94176877578 * uk_52
+ 187944057 * uk_53
+ 307273617 * uk_54
+ 125296038 * uk_55
+ 35798868 * uk_56
+ 563832171 * uk_57
+ 647362863 * uk_58
+ 125296038 * uk_59
+ 189 * uk_6
+ 502367977 * uk_60
+ 204849078 * uk_61
+ 58528308 * uk_62
+ 921820851 * uk_63
+ 1058386903 * uk_64
+ 204849078 * uk_65
+ 83530692 * uk_66
+ 23865912 * uk_67
+ 375888114 * uk_68
+ 431575242 * uk_69
+ 217 * uk_7
+ 83530692 * uk_70
+ 6818832 * uk_71
+ 107396604 * uk_72
+ 123307212 * uk_73
+ 23865912 * uk_74
+ 1691496513 * uk_75
+ 1942088589 * uk_76
+ 375888114 * uk_77
+ 2229805417 * uk_78
+ 431575242 * uk_79
+ 42 * uk_8
+ 83530692 * uk_80
+ 250047 * uk_81
+ 408807 * uk_82
+ 166698 * uk_83
+ 47628 * uk_84
+ 750141 * uk_85
+ 861273 * uk_86
+ 166698 * uk_87
+ 668367 * uk_88
+ 272538 * uk_89
+ 2242306609 * uk_9
+ 77868 * uk_90
+ 1226421 * uk_91
+ 1408113 * uk_92
+ 272538 * uk_93
+ 111132 * uk_94
+ 31752 * uk_95
+ 500094 * uk_96
+ 574182 * uk_97
+ 111132 * uk_98
+ 9072 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 144396 * uk_100
+ 164052 * uk_101
+ 77868 * uk_102
+ 2298303 * uk_103
+ 2611161 * uk_104
+ 1239399 * uk_105
+ 2966607 * uk_106
+ 1408113 * uk_107
+ 668367 * uk_108
+ 5451776 * uk_109
+ 8334128 * uk_11
+ 3190528 * uk_110
+ 371712 * uk_111
+ 5916416 * uk_112
+ 6721792 * uk_113
+ 3190528 * uk_114
+ 1867184 * uk_115
+ 217536 * uk_116
+ 3462448 * uk_117
+ 3933776 * uk_118
+ 1867184 * uk_119
+ 4877359 * uk_12
+ 25344 * uk_120
+ 403392 * uk_121
+ 458304 * uk_122
+ 217536 * uk_123
+ 6420656 * uk_124
+ 7294672 * uk_125
+ 3462448 * uk_126
+ 8287664 * uk_127
+ 3933776 * uk_128
+ 1867184 * uk_129
+ 568236 * uk_13
+ 1092727 * uk_130
+ 127308 * uk_131
+ 2026319 * uk_132
+ 2302153 * uk_133
+ 1092727 * uk_134
+ 14832 * uk_135
+ 236076 * uk_136
+ 268212 * uk_137
+ 127308 * uk_138
+ 3757543 * uk_139
+ 9044423 * uk_14
+ 4269041 * uk_140
+ 2026319 * uk_141
+ 4850167 * uk_142
+ 2302153 * uk_143
+ 1092727 * uk_144
+ 1728 * uk_145
+ 27504 * uk_146
+ 31248 * uk_147
+ 14832 * uk_148
+ 437772 * uk_149
+ 10275601 * uk_15
+ 497364 * uk_150
+ 236076 * uk_151
+ 565068 * uk_152
+ 268212 * uk_153
+ 127308 * uk_154
+ 6967871 * uk_155
+ 7916377 * uk_156
+ 3757543 * uk_157
+ 8993999 * uk_158
+ 4269041 * uk_159
+ 4877359 * uk_16
+ 2026319 * uk_160
+ 10218313 * uk_161
+ 4850167 * uk_162
+ 2302153 * uk_163
+ 1092727 * uk_164
+ 3969 * uk_17
+ 11088 * uk_18
+ 6489 * uk_19
+ 63 * uk_2
+ 756 * uk_20
+ 12033 * uk_21
+ 13671 * uk_22
+ 6489 * uk_23
+ 30976 * uk_24
+ 18128 * uk_25
+ 2112 * uk_26
+ 33616 * uk_27
+ 38192 * uk_28
+ 18128 * uk_29
+ 176 * uk_3
+ 10609 * uk_30
+ 1236 * uk_31
+ 19673 * uk_32
+ 22351 * uk_33
+ 10609 * uk_34
+ 144 * uk_35
+ 2292 * uk_36
+ 2604 * uk_37
+ 1236 * uk_38
+ 36481 * uk_39
+ 103 * uk_4
+ 41447 * uk_40
+ 19673 * uk_41
+ 47089 * uk_42
+ 22351 * uk_43
+ 10609 * uk_44
+ 106179944855977 * uk_45
+ 141265316367 * uk_46
+ 394645963184 * uk_47
+ 230957580727 * uk_48
+ 26907679308 * uk_49
+ 12 * uk_5
+ 428280562319 * uk_50
+ 486580534153 * uk_51
+ 230957580727 * uk_52
+ 187944057 * uk_53
+ 525050064 * uk_54
+ 307273617 * uk_55
+ 35798868 * uk_56
+ 569798649 * uk_57
+ 647362863 * uk_58
+ 307273617 * uk_59
+ 191 * uk_6
+ 1466806528 * uk_60
+ 858415184 * uk_61
+ 100009536 * uk_62
+ 1591818448 * uk_63
+ 1808505776 * uk_64
+ 858415184 * uk_65
+ 502367977 * uk_66
+ 58528308 * uk_67
+ 931575569 * uk_68
+ 1058386903 * uk_69
+ 217 * uk_7
+ 502367977 * uk_70
+ 6818832 * uk_71
+ 108533076 * uk_72
+ 123307212 * uk_73
+ 58528308 * uk_74
+ 1727484793 * uk_75
+ 1962639791 * uk_76
+ 931575569 * uk_77
+ 2229805417 * uk_78
+ 1058386903 * uk_79
+ 103 * uk_8
+ 502367977 * uk_80
+ 250047 * uk_81
+ 698544 * uk_82
+ 408807 * uk_83
+ 47628 * uk_84
+ 758079 * uk_85
+ 861273 * uk_86
+ 408807 * uk_87
+ 1951488 * uk_88
+ 1142064 * uk_89
+ 2242306609 * uk_9
+ 133056 * uk_90
+ 2117808 * uk_91
+ 2406096 * uk_92
+ 1142064 * uk_93
+ 668367 * uk_94
+ 77868 * uk_95
+ 1239399 * uk_96
+ 1408113 * uk_97
+ 668367 * uk_98
+ 9072 * uk_99,
uk_0
+ 47353 * uk_1
+ 2983239 * uk_10
+ 97272 * uk_100
+ 109368 * uk_101
+ 88704 * uk_102
+ 2346687 * uk_103
+ 2638503 * uk_104
+ 2139984 * uk_105
+ 2966607 * uk_106
+ 2406096 * uk_107
+ 1951488 * uk_108
+ 314432 * uk_109
+ 3220004 * uk_11
+ 813824 * uk_110
+ 36992 * uk_111
+ 892432 * uk_112
+ 1003408 * uk_113
+ 813824 * uk_114
+ 2106368 * uk_115
+ 95744 * uk_116
+ 2309824 * uk_117
+ 2597056 * uk_118
+ 2106368 * uk_119
+ 8334128 * uk_12
+ 4352 * uk_120
+ 104992 * uk_121
+ 118048 * uk_122
+ 95744 * uk_123
+ 2532932 * uk_124
+ 2847908 * uk_125
+ 2309824 * uk_126
+ 3202052 * uk_127
+ 2597056 * uk_128
+ 2106368 * uk_129
+ 378824 * | |
###
# Copyright (c) 2002-2005, <NAME>
# Copyright (c) 2008-2009,2011, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import sys
import time
import socket
import discord
from . import registry, utils
###
# The standard registry.
###
red = registry.Group()
red.setName('red')
def validServer(name):
try:
if int(name) > 1000000000000000:
return True
except:
pass
return False
def registerGroup(Group, name, group=None, **kwargs):
if kwargs:
group = registry.Group(**kwargs)
return Group.register(name, group)
def registerGlobalValue(group, name, value):
value.serverValue = False
value.channelValue = False
return group.register(name, value)
def registerServerValue(group, name, value):
value._supplyDefault = True
value.serverValue = True
value.channelValue = False
g = group.register(name, value)
gname = g._name.lower()
for name in registry._cache.keys():
if name.lower().startswith(gname) and len(gname) < len(name):
name = name[len(gname)+1:] # +1 for .
parts = registry.split(name)
if len(parts) == 1 and validServer(parts[0]):
# This gets the server values so they always persist.
g.get(parts[0])()
def registerChannelValue(group, name, value):
value._supplyDefault = True
value.serverValue = False
value.channelValue = True
g = group.register(name, value)
gname = g._name.lower()
for name in registry._cache.keys():
if name.lower().startswith(gname) and len(gname) < len(name):
name = name[len(gname)+1:] # +1 for .
parts = registry.split(name)
if len(parts) == 1 and validServer(parts[0]):
# This gets the server values so they always persist.
g.get(parts[0])()
def registerPlugin(name, currentValue=None, public=True):
group = registerGlobalValue(red.plugins, name,
registry.Boolean(False, ("""Determines whether this plugin is loaded
by default."""), showDefault=False))
red.plugins().add(name)
registerGlobalValue(group, 'public',
registry.Boolean(public, ("""Determines whether this plugin is
publicly visible.""")))
if currentValue is not None:
red.plugins.get(name).setValue(currentValue)
registerGroup(users.plugins, name)
return group
def get(group, server=None, channel=None):
if group.serverValue and \
server is not None:
return group.get(server)()
elif group.channelValue and channel is not None:
return group.get(channel)()
else:
return group()
###
# The user info registry.
###
users = registry.Group()
users.setName('users')
registerGroup(users, 'plugins', orderAlphabetically=True)
def registerUserValue(group, name, value):
assert group._name.startswith('users')
value._supplyDefault = True
group.register(name, value)
registerGlobalValue(red, 'nick',
registry.String('red', ("""Determines the bot's default nick.""")))
registerGlobalValue(red, 'owner',
registry.String('red', ("""Owner ID used to identify an owner on
Discord network.""")))
registerGlobalValue(red, 'email',
registry.String('red', ("""Email used to login."""),private=True))
registerGlobalValue(red, 'password',
registry.String('red', ("""Password used to login."""),private=True))
###
# Reply/error tweaking.
###
registerGroup(red, 'reply')
registerGroup(red.reply, 'format')
registerServerValue(red.reply.format, 'url',
registry.String('<%s>', ("""Determines how urls should be formatted.""")))
def url(s):
if s:
return red.reply.format.url() % s
else:
return ''
utils.str.url = url
registerServerValue(red.reply.format, 'time',
registry.String('%Y-%m-%dT%H:%M:%S%z', ("""Determines how timestamps
printed for human reading should be formatted. Refer to the Python
documentation for the time module to see valid formatting characters for
time formats.""")))
def timestamp(t):
if t is None:
t = time.time()
if isinstance(t, float) or isinstance(t, int):
t = time.localtime(t)
format = get(red.reply.format.time, dynamic.server)
return time.strftime(format, t)
utils.str.timestamp = timestamp
registerGroup(red.reply.format.time, 'elapsed')
registerServerValue(red.reply.format.time.elapsed, 'short',
registry.Boolean(False, ("""Determines whether elapsed times will be given
as "1 day, 2 hours, 3 minutes, and 15 seconds" or as "1d 2h 3m 15s".""")))
originalTimeElapsed = utils.timeElapsed
def timeElapsed(*args, **kwargs):
kwargs['short'] = red.reply.format.time.elapsed.short()
return originalTimeElapsed(*args, **kwargs)
utils.timeElapsed = timeElapsed
registerGlobalValue(red.reply, 'maximumLength',
registry.Integer(2000, ("""Determines the absolute maximum length of
the bot's reply -- no reply will be passed through the bot with a length
greater than this.""")))
registerServerValue(red.reply, 'mores',
registry.Boolean(True, ("""Determines whether the bot will break up long
messages into chunks and allow users to use the 'more' command to get the
remaining chunks.""")))
registerServerValue(red.reply.mores, 'maximum',
registry.PositiveInteger(50, ("""Determines what the maximum number of
chunks (for use with the 'more' command) will be.""")))
registerServerValue(red.reply.mores, 'length',
registry.NonNegativeInteger(0, ("""Determines how long individual chunks
will be. If set to 0, uses our super-tweaked,
get-the-most-out-of-an-individual-message default.""")))
registerServerValue(red.reply.mores, 'instant',
registry.PositiveInteger(1, ("""Determines how many mores will be sent
instantly (i.e., without the use of the more command, immediately when
they are formed). Defaults to 1, which means that a more command will be
required for all but the first chunk.""")))
registerServerValue(red.reply, 'oneToOne',
registry.Boolean(True, ("""Determines whether the bot will send
multi-message replies in a single message. This defaults to True
in order to prevent the bot from flooding. If this is set to False
the bot will send multi-message replies on multiple lines.""")))
registerServerValue(red.reply, 'whenNotCommand',
registry.Boolean(True, ("""Determines whether the bot will reply with an
error message when it is addressed but not given a valid command. If this
value is False, the bot will remain silent, as long as no other plugins
override the normal behavior.""")))
registerGroup(red.reply, 'error')
registerGlobalValue(red.reply.error, 'detailed',
registry.Boolean(False, ("""Determines whether error messages that result
from bugs in the bot will show a detailed error message (the uncaught
exception) or a generic error message.""")))
registerServerValue(red.reply.error, 'inPrivate',
registry.Boolean(False, ("""Determines whether the bot will send error
messages to users in private. You might want to do this in order to keep
server traffic to minimum. This can be used in combination with
red.reply.error.withNotice.""")))
registerServerValue(red.reply.error, 'noCapability',
registry.Boolean(False, ("""Determines whether the bot will *not* provide
details in the error
message to users who attempt to call a command for which they do not have
the necessary capability. You may wish to make this True if you don't want
users to understand the underlying security system preventing them from
running certain commands.""")))
registerServerValue(red.reply, 'inPrivate',
registry.Boolean(False, ("""Determines whether the bot will reply
privately when replying in a server, rather than replying to the whole
server.""")))
registerServerValue(red.reply, 'withNickPrefix',
registry.Boolean(True, ("""Determines whether the bot will always prefix
the user's nick to its reply to that user's command.""")))
registerServerValue(red.reply, 'whenNotAddressed',
registry.Boolean(False, ("""Determines whether the bot should attempt to
reply to all messages even if they don't address it (either via its nick
or a prefix character). If you set this to True, you almost certainly want
to set red.reply.whenNotCommand to False.""")))
registerServerValue(red.reply, 'requireServerCommandsToBeSentInServer',
registry.Boolean(False, ("""Determines whether the bot will allow you to
send server-related commands outside of that server. Sometimes people
find it confusing if a server-related command (like Filter.outfilter)
changes the behavior of the server but was sent outside the server
itself.""")))
registerServerValue(red, 'alwaysJoinOnInvite',
registry.Boolean(False, ("""Determines whether the bot will always join a
server when it's invited. If this value is False, the bot will only join
a server if the user inviting it has the 'admin' capability (or if it's
explicitly told to join the server using the Admin.join command).""")))
registerServerValue(red.reply, 'showSimpleSyntax',
registry.Boolean(False, ("""red normally replies with the full help
whenever a user misuses a command. If this value is set to True, the bot
will only reply with the syntax of the command (the first line of the
help) rather than the full help.""")))
class ValidPrefixChars(registry.String):
"""Value must contain only ~!@#$%^&*()_-+=[{}]\\|'\";:,<.>/?"""
def setValue(self, v):
if any([x not in '`~!@#$%^&*()_-+=[{}]\\|\'";:,<.>/?' for x in v]):
self.error()
registry.String.setValue(self, v)
registerGroup(red.reply, 'whenAddressedBy')
registerServerValue(red.reply.whenAddressedBy, 'chars',
ValidPrefixChars('', ("""Determines what prefix characters the bot will
reply to. A prefix character is a single character that the bot will use
to determine what messages are addressed to it; when there are no prefix
characters set, it just uses its nick. Each character in this string is
interpreted individually; you can have multiple prefix chars
simultaneously, and if any one of them is used as a prefix the bot will
assume it is being addressed.""")))
registerServerValue(red.reply.whenAddressedBy, 'strings',
registry.SpaceSeparatedSetOfStrings([], ("""Determines what strings the
bot will reply to when they | |
<reponame>SomiAfiuni/CPplugins
# coding=utf-8
"""
ColorToGray
===========
**ColorToGray** converts an image with multiple color channels to one or more
grayscale images.
This module converts color and channel-stacked
images to grayscale. All channels can be merged into one grayscale image
(*Combine*), or each channel can be extracted into a separate grayscale image
(*Split*). If you use *Combine*, the relative weights you provide allow
adjusting the contribution of the colors relative to each other.
Note that all **Identify** modules require grayscale images.
|
============ ============ ===============
Supports 2D? Supports 3D? Respects masks?
============ ============ ===============
YES NO NO
============ ============ ===============
See also
^^^^^^^^
See also **GrayToColor**.
"""
import re
import matplotlib.colors
import numpy as np
import cellprofiler.image as cpi
import cellprofiler.module as cpm
import cellprofiler.setting as cps
from cellprofiler.setting import YES, NO
COMBINE = "Combine"
SPLIT = "Split"
CH_RGB = "RGB"
CH_HSV = "HSV"
CH_CHANNELS = "Channels"
MAX_CHANNELS_PER_IMAGE = 60
SLOT_CHANNEL_COUNT = 19
SLOT_FIXED_COUNT = 20
SLOTS_PER_CHANNEL = 3
SLOT_CHANNEL_CHOICE = 0
class ColorToGray(cpm.Module):
module_name = "ColorToGray bb"
variable_revision_number = 3
category = "Image Processing"
def create_settings(self):
self.image_name = cps.ImageNameSubscriber(
"Select the input image", cps.NONE, doc="""Select the multichannel image you want to convert to grayscale.""")
self.combine_or_split = cps.Choice(
"Conversion method",
[COMBINE, SPLIT], doc='''\
How do you want to convert the color image?
- *%(SPLIT)s:* Splits the channels of a color
image (e.g., red, green, blue) into separate grayscale images.
- *%(COMBINE)s:* Converts a color image to a grayscale image by
combining channels together (e.g., red, green, blue).''' % globals())
self.rgb_or_channels = cps.Choice(
"Image type", [CH_RGB, CH_HSV, CH_CHANNELS], doc="""\
This setting provides three options to choose from:
- *%(CH_RGB)s:* The RGB (red, green, blue) color space is the typical
model in which color images are stored. Choosing this option will
split the image into red, green, and blue component images.
- *%(CH_HSV)s:* The HSV (hue, saturation, value) color space is based
on color characteristics such as tint, shade, and tone.
Choosing this option will split the image into the hue,
saturation, and value component images.
- *%(CH_CHANNELS)s:* Many images contain color channels other than RGB
or HSV. For instance, GIF and PNG formats can have an alpha
channel that encodes transparency. TIF formats can have an arbitrary
number of channels which represent pixel measurements made by
different detectors, filters or lighting conditions. This setting
allows you to handle a more complex model for images that
have more than three channels.""" % globals())
# The following settings are used for the combine option
self.grayscale_name = cps.ImageNameProvider(
"Name the output image", "OrigGray", doc="""\
*(Used only when combining channels)*
Enter a name for the resulting grayscale image.""")
self.red_contribution = cps.Float(
"Relative weight of the red channel",
1, 0, doc='''\
*(Used only when combining channels)*
Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.''')
self.green_contribution = cps.Float(
"Relative weight of the green channel",
1, 0, doc='''\
*(Used only when combining channels)*
Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.''')
self.blue_contribution = cps.Float(
"Relative weight of the blue channel",
1, 0, doc='''\
*(Used only when combining channels)*
Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.''')
# The following settings are used for the split RGB option
self.use_red = cps.Binary('Convert red to gray?', True, doc="""\
*(Used only when splitting RGB images)*
Select *"%(YES)s"* to extract the red channel to grayscale. Otherwise, the
red channel will be ignored.
""" % globals())
self.red_name = cps.ImageNameProvider('Name the output image', "OrigRed", doc="""\
*(Used only when splitting RGB images)*
Enter a name for the resulting grayscale image coming from the red channel.""")
self.use_green = cps.Binary('Convert green to gray?', True, doc="""\
*(Used only when splitting RGB images)*
Select *"%(YES)s"* to extract the green channel to grayscale. Otherwise, the
green channel will be ignored.
""" % globals())
self.green_name = cps.ImageNameProvider('Name the output image', "OrigGreen", doc="""\
*(Used only when splitting RGB images)*
Enter a name for the resulting grayscale image coming from the green channel.""")
self.use_blue = cps.Binary('Convert blue to gray?', True, doc="""\
*(Used only when splitting RGB images)*
Select *"%(YES)s"* to extract the blue channel to grayscale. Otherwise, the
blue channel will be ignored.
""" % globals())
self.blue_name = cps.ImageNameProvider('Name the output image', "OrigBlue", doc="""\
*(Used only when splitting RGB images)*
Enter a name for the resulting grayscale image coming from the blue channel.""")
# The following settings are used for the split HSV option
self.use_hue = cps.Binary('Convert hue to gray?', True, doc="""\
*(Used only when splitting HSV images)*
Select *"%(YES)s"* to extract the hue to grayscale. Otherwise, the hue
will be ignored.
""" % globals())
self.hue_name = cps.ImageNameProvider('Name the output image', "OrigHue", doc="""\
*(Used only when splitting HSV images)*
Enter a name for the resulting grayscale image coming from the hue.""")
self.use_saturation = cps.Binary('Convert saturation to gray?', True, doc="""\
*(Used only when splitting HSV images)*
Select *"%(YES)s"* to extract the saturation to grayscale. Otherwise, the
saturation will be ignored.
""" % globals())
self.saturation_name = cps.ImageNameProvider('Name the output image', "OrigSaturation", doc="""\
*(Used only when splitting HSV images)*
Enter a name for the resulting grayscale image coming from the saturation.""")
self.use_value = cps.Binary('Convert value to gray?', True, doc="""\
*(Used only when splitting HSV images)*
Select *"%(YES)s"* to extract the value to grayscale. Otherwise, the
value will be ignored.
""" % globals())
self.value_name = cps.ImageNameProvider('Name the output image', "OrigValue", doc="""\
*(Used only when splitting HSV images)*
Enter a name for the resulting grayscale image coming from the value.""")
# The alternative model:
self.channels = []
self.add_channel(False)
self.channel_button = cps.DoSomething(
"", "Add another channel", self.add_channel)
self.channel_count = cps.HiddenCount(self.channels, "Channel count")
channel_names = (["Red: 1", "Green: 2", "Blue: 3", "Alpha: 4"] +
[str(x) for x in range(5, MAX_CHANNELS_PER_IMAGE+1)])
def add_channel(self, can_remove=True):
'''Add another channel to the channels list'''
group = cps.SettingsGroup()
group.can_remove = can_remove
group.append("channel_choice", cps.Choice(
"Channel number", self.channel_names,
self.channel_names[len(self.channels) % len(self.channel_names)], doc="""\
*(Used only when splitting images)*
This setting chooses a channel to be processed. For example, *Red: 1*
is the first
channel in a .TIF or the red channel in a traditional image file.
*Green: 2* and *Blue: 3* are the second and third channels of a TIF or
the green and blue channels in other formats. *Alpha: 4* is the
transparency channel for image formats that support transparency and is
channel # 4 for a .TIF file. **ColorToGray** will fail to process an
image if you select a channel that is not supported by that image, for
example, “5” for a three-channel .PNG file."""))
group.append("contribution", cps.Float(
"Relative weight of the channel", 1, 0, doc='''\
*(Used only when combining channels)*
Relative weights: If all relative weights are equal, all three colors
contribute equally in the final image. To weight colors relative to each
other, increase or decrease the relative weights.'''))
group.append("image_name", cps.ImageNameProvider(
"Image name", value="Channel%d" % (len(self.channels) + 1), doc="""\
*(Used only when splitting images)*
Select the name of the output grayscale image."""))
if group.can_remove:
group.append("remover", cps.RemoveSettingButton(
"", "Remove this channel", self.channels, group))
self.channels.append(group)
def visible_settings(self):
"""Return either the "combine" or the "split" settings"""
vv = [self.image_name, self.combine_or_split]
if self.should_combine():
vv += [self.grayscale_name, self.rgb_or_channels]
if self.rgb_or_channels in (CH_RGB, CH_HSV):
vv.extend([self.red_contribution,
self.green_contribution, self.blue_contribution])
else:
for channel in self.channels:
vv += [channel.channel_choice, channel.contribution]
if channel.can_remove:
vv += [channel.remover]
vv += [self.channel_button]
else:
vv += [self.rgb_or_channels]
if self.rgb_or_channels == CH_RGB:
for v_use, v_name in ((self.use_red, self.red_name),
(self.use_green, self.green_name),
(self.use_blue, self.blue_name)):
vv.append(v_use)
if v_use.value:
vv.append(v_name)
elif self.rgb_or_channels == CH_HSV:
for v_use, v_name in ((self.use_hue, self.hue_name),
(self.use_saturation, self.saturation_name),
(self.use_value, self.value_name)):
vv.append(v_use)
if v_use.value:
vv.append(v_name)
else:
for channel in self.channels:
vv += [channel.channel_choice, channel.image_name]
if channel.can_remove:
vv += [channel.remover]
vv += [self.channel_button]
return vv
def settings(self):
"""Return all of the settings in a consistent order"""
return [self.image_name, self.combine_or_split,
self.rgb_or_channels,
self.grayscale_name, self.red_contribution,
self.green_contribution, self.blue_contribution,
self.use_red, self.red_name,
self.use_green, self.green_name,
self.use_blue, self.blue_name,
self.use_hue, self.hue_name,
self.use_saturation, self.saturation_name,
self.use_value, self.value_name,
self.channel_count
] + sum([[channel.channel_choice, channel.contribution,
channel.image_name] for channel in self.channels],
[])
def should_combine(self):
"""True if we are supposed to combine RGB to gray"""
return self.combine_or_split == COMBINE
def should_split(self):
"""True if we are supposed to split each color into an image"""
return self.combine_or_split == SPLIT
def validate_module(self, pipeline):
"""Test to see if the module is in a valid state to run
Throw a ValidationError exception with an | |
None:
transform = Matrix3()
result = dll.pixie_path_compute_bounds(self, transform)
if check_error():
raise PixieError(take_error())
return result
def fill_overlaps(self, test, transform = None, winding_rule = WR_NON_ZERO):
"""
Returns whether or not the specified point is contained in the current path.
"""
if transform is None:
transform = Matrix3()
result = dll.pixie_path_fill_overlaps(self, test, transform, winding_rule)
if check_error():
raise PixieError(take_error())
return result
def stroke_overlaps(self, test, transform = None, stroke_width = 1.0, line_cap = LC_BUTT, line_join = LJ_MITER, miter_limit = DEFAULT_MITER_LIMIT, dashes = None):
"""
Returns whether or not the specified point is inside the area contained
by the stroking of a path.
"""
if transform is None:
transform = Matrix3()
if dashes is None:
dashes = SeqFloat32()
result = dll.pixie_path_stroke_overlaps(self, test, transform, stroke_width, line_cap, line_join, miter_limit, dashes)
if check_error():
raise PixieError(take_error())
return result
def move_to(self, x, y):
"""
Begins a new sub-path at the point (x, y).
"""
dll.pixie_path_move_to(self, x, y)
def line_to(self, x, y):
"""
Adds a straight line to the current sub-path by connecting the sub-path's
last point to the specified (x, y) coordinates.
"""
dll.pixie_path_line_to(self, x, y)
def bezier_curve_to(self, x_1, y_1, x_2, y_2, x_3, y_3):
"""
Adds a cubic Bézier curve to the current sub-path. It requires three
points: the first two are control points and the third one is the end
point. The starting point is the latest point in the current path,
which can be changed using moveTo() before creating the Bézier curve.
"""
dll.pixie_path_bezier_curve_to(self, x_1, y_1, x_2, y_2, x_3, y_3)
def quadratic_curve_to(self, x_1, y_1, x_2, y_2):
"""
Adds a quadratic Bézier curve to the current sub-path. It requires two
points: the first one is a control point and the second one is the end
point. The starting point is the latest point in the current path,
which can be changed using moveTo() before creating the quadratic
Bézier curve.
"""
dll.pixie_path_quadratic_curve_to(self, x_1, y_1, x_2, y_2)
def elliptical_arc_to(self, rx, ry, x_axis_rotation, large_arc_flag, sweep_flag, x, y):
"""
Adds an elliptical arc to the current sub-path, using the given radius
ratios, sweep flags, and end position.
"""
dll.pixie_path_elliptical_arc_to(self, rx, ry, x_axis_rotation, large_arc_flag, sweep_flag, x, y)
def arc(self, x, y, r, a_0, a_1, ccw = False):
"""
Adds a circular arc to the current sub-path.
"""
dll.pixie_path_arc(self, x, y, r, a_0, a_1, ccw)
if check_error():
raise PixieError(take_error())
def arc_to(self, x_1, y_1, x_2, y_2, r):
"""
Adds a circular arc using the given control points and radius.
Commonly used for making rounded corners.
"""
dll.pixie_path_arc_to(self, x_1, y_1, x_2, y_2, r)
if check_error():
raise PixieError(take_error())
def rect(self, x, y, w, h, clockwise = True):
"""
Adds a rectangle.
Clockwise param can be used to subtract a rect from a path when using
even-odd winding rule.
"""
dll.pixie_path_rect(self, x, y, w, h, clockwise)
def rounded_rect(self, x, y, w, h, nw, ne, se, sw, clockwise = True):
"""
Adds a rounded rectangle.
Clockwise param can be used to subtract a rect from a path when using
even-odd winding rule.
"""
dll.pixie_path_rounded_rect(self, x, y, w, h, nw, ne, se, sw, clockwise)
def ellipse(self, cx, cy, rx, ry):
"""
Adds a ellipse.
"""
dll.pixie_path_ellipse(self, cx, cy, rx, ry)
def circle(self, cx, cy, r):
"""
Adds a circle.
"""
dll.pixie_path_circle(self, cx, cy, r)
def polygon(self, x, y, size, sides):
"""
Adds an n-sided regular polygon at (x, y) with the parameter size.
"""
dll.pixie_path_polygon(self, x, y, size, sides)
class Typeface(Structure):
_fields_ = [("ref", c_ulonglong)]
def __bool__(self):
return self.ref != None
def __eq__(self, obj):
return self.ref == obj.ref
def __del__(self):
dll.pixie_typeface_unref(self)
@property
def file_path(self):
return dll.pixie_typeface_get_file_path(self).decode("utf8")
@file_path.setter
def file_path(self, file_path):
dll.pixie_typeface_set_file_path(self, file_path.encode("utf8"))
def ascent(self):
"""
The font ascender value in font units.
"""
result = dll.pixie_typeface_ascent(self)
return result
def descent(self):
"""
The font descender value in font units.
"""
result = dll.pixie_typeface_descent(self)
return result
def line_gap(self):
"""
The font line gap value in font units.
"""
result = dll.pixie_typeface_line_gap(self)
return result
def line_height(self):
"""
The default line height in font units.
"""
result = dll.pixie_typeface_line_height(self)
return result
def has_glyph(self, rune):
"""
Returns if there is a glyph for this rune.
"""
result = dll.pixie_typeface_has_glyph(self, rune)
return result
def get_glyph_path(self, rune):
"""
The glyph path for the rune.
"""
result = dll.pixie_typeface_get_glyph_path(self, rune)
if check_error():
raise PixieError(take_error())
return result
def get_advance(self, rune):
"""
The advance for the rune in pixels.
"""
result = dll.pixie_typeface_get_advance(self, rune)
return result
def get_kerning_adjustment(self, left, right):
"""
The kerning adjustment for the rune pair, in pixels.
"""
result = dll.pixie_typeface_get_kerning_adjustment(self, left, right)
return result
def new_font(self):
result = dll.pixie_typeface_new_font(self)
return result
class Font(Structure):
_fields_ = [("ref", c_ulonglong)]
def __bool__(self):
return self.ref != None
def __eq__(self, obj):
return self.ref == obj.ref
def __del__(self):
dll.pixie_font_unref(self)
@property
def typeface(self):
return dll.pixie_font_get_typeface(self)
@typeface.setter
def typeface(self, typeface):
dll.pixie_font_set_typeface(self, typeface)
@property
def size(self):
return dll.pixie_font_get_size(self)
@size.setter
def size(self, size):
dll.pixie_font_set_size(self, size)
@property
def line_height(self):
return dll.pixie_font_get_line_height(self)
@line_height.setter
def line_height(self, line_height):
dll.pixie_font_set_line_height(self, line_height)
class FontPaints:
def __init__(self, font):
self.font = font
def __len__(self):
return dll.pixie_font_paints_len(self.font)
def __getitem__(self, index):
return dll.pixie_font_paints_get(self.font, index)
def __setitem__(self, index, value):
dll.pixie_font_paints_set(self.font, index, value)
def __delitem__(self, index):
dll.pixie_font_paints_delete(self.font, index)
def append(self, value):
dll.pixie_font_paints_add(self.font, value)
def clear(self):
dll.pixie_font_paints_clear(self.font)
@property
def paints(self):
return self.FontPaints(self)
@property
def paint(self):
return dll.pixie_font_get_paint(self)
@paint.setter
def paint(self, paint):
dll.pixie_font_set_paint(self, paint)
@property
def text_case(self):
return dll.pixie_font_get_text_case(self)
@text_case.setter
def text_case(self, text_case):
dll.pixie_font_set_text_case(self, text_case)
@property
def underline(self):
return dll.pixie_font_get_underline(self)
@underline.setter
def underline(self, underline):
dll.pixie_font_set_underline(self, underline)
@property
def strikethrough(self):
return dll.pixie_font_get_strikethrough(self)
@strikethrough.setter
def strikethrough(self, strikethrough):
dll.pixie_font_set_strikethrough(self, strikethrough)
@property
def no_kerning_adjustments(self):
return dll.pixie_font_get_no_kerning_adjustments(self)
@no_kerning_adjustments.setter
def no_kerning_adjustments(self, no_kerning_adjustments):
dll.pixie_font_set_no_kerning_adjustments(self, no_kerning_adjustments)
def scale(self):
"""
The scale factor to transform font units into pixels.
"""
result = dll.pixie_font_scale(self)
return result
def default_line_height(self):
"""
The default line height in pixels for the current font size.
"""
result = dll.pixie_font_default_line_height(self)
return result
def typeset(self, text, bounds = None, h_align = HA_LEFT, v_align = VA_TOP, wrap = True):
"""
Lays out the character glyphs and returns the arrangement.
Optional parameters:
bounds: width determines wrapping and hAlign, height for vAlign
hAlign: horizontal alignment of the text
vAlign: vertical alignment of the text
wrap: enable/disable text wrapping
"""
if bounds is None:
bounds = Vector2(0, 0)
result = dll.pixie_font_typeset(self, text.encode("utf8"), bounds, h_align, v_align, wrap)
return result
def compute_bounds(self, text):
"""
Computes the width and height of the text in pixels.
"""
result = dll.pixie_font_compute_bounds(self, text.encode("utf8"))
return result
class Span(Structure):
_fields_ = [("ref", c_ulonglong)]
def __bool__(self):
return self.ref != None
def __eq__(self, obj):
return self.ref == obj.ref
def __del__(self):
dll.pixie_span_unref(self)
def __init__(self, text, font):
result = dll.pixie_new_span(text.encode("utf8"), font)
self.ref = result
@property
def text(self):
return dll.pixie_span_get_text(self).decode("utf8")
@text.setter
def text(self, text):
dll.pixie_span_set_text(self, text.encode("utf8"))
@property
def font(self):
return dll.pixie_span_get_font(self)
@font.setter
def font(self, font):
dll.pixie_span_set_font(self, font)
class Arrangement(Structure):
_fields_ = [("ref", c_ulonglong)]
def __bool__(self):
return self.ref != None
def __eq__(self, obj):
return self.ref == obj.ref
def __del__(self):
dll.pixie_arrangement_unref(self)
def compute_bounds(self):
"""
Computes the width and height of the arrangement in pixels.
"""
result = dll.pixie_arrangement_compute_bounds(self)
return result
class Context(Structure):
_fields_ = [("ref", c_ulonglong)]
def __bool__(self):
return self.ref != None
def __eq__(self, obj):
return self.ref == obj.ref
def __del__(self):
dll.pixie_context_unref(self)
def __init__(self, width, height):
result = dll.pixie_new_context(width, height)
if check_error():
raise PixieError(take_error())
self.ref = result
@property
def image(self):
return dll.pixie_context_get_image(self)
@image.setter
def image(self, image):
dll.pixie_context_set_image(self, image)
@property
def fill_style(self):
return dll.pixie_context_get_fill_style(self)
@fill_style.setter
def fill_style(self, fill_style):
dll.pixie_context_set_fill_style(self, fill_style)
@property
def stroke_style(self):
return dll.pixie_context_get_stroke_style(self)
@stroke_style.setter
def stroke_style(self, stroke_style):
dll.pixie_context_set_stroke_style(self, stroke_style)
@property
def global_alpha(self):
return dll.pixie_context_get_global_alpha(self)
@global_alpha.setter
def global_alpha(self, global_alpha):
dll.pixie_context_set_global_alpha(self, global_alpha)
@property
def line_width(self):
return dll.pixie_context_get_line_width(self)
@line_width.setter
def line_width(self, line_width):
dll.pixie_context_set_line_width(self, line_width)
@property
def miter_limit(self):
return dll.pixie_context_get_miter_limit(self)
@miter_limit.setter
def miter_limit(self, miter_limit):
dll.pixie_context_set_miter_limit(self, miter_limit)
@property
def line_cap(self):
return dll.pixie_context_get_line_cap(self)
@line_cap.setter
def line_cap(self, line_cap):
dll.pixie_context_set_line_cap(self, line_cap)
@property
def line_join(self):
return dll.pixie_context_get_line_join(self)
@line_join.setter
def line_join(self, line_join):
dll.pixie_context_set_line_join(self, line_join)
@property
def font(self):
return dll.pixie_context_get_font(self).decode("utf8")
@font.setter
def font(self, font):
dll.pixie_context_set_font(self, font.encode("utf8"))
@property
def font_size(self):
return dll.pixie_context_get_font_size(self)
@font_size.setter
def font_size(self, font_size):
dll.pixie_context_set_font_size(self, font_size)
@property
def text_align(self):
return dll.pixie_context_get_text_align(self)
@text_align.setter
def text_align(self, text_align):
dll.pixie_context_set_text_align(self, text_align)
def save(self):
"""
Saves the entire state of the context by pushing the current state | |
<reponame>guochaoxu2019/POVME3.0
#!python
# POVME 3.0 is released under the GNU General Public License (see http://www.gnu.org/licenses/gpl.html).
# If you have any questions, comments, or suggestions, please don't hesitate to contact me,
# <NAME>, at j5wagner [at] ucsd [dot] edu.
#
# If you use POVME in your work, please cite <NAME>., <NAME>, et al.
# (2011). "POVME: An algorithm for measuring binding-pocket volumes." J Mol Graph
# Model 29(5): 773-776.
import math
import sys
import time
import numpy
import POVME.packages.pymolecule.pymolecule as pymolecule
import gzip
import os
import shutil
#import random
import POVME.packages.binana.peel as peel
import multiprocessing
import platform
#from guppy import hpy
#hp=hpy()
try: from cStringIO import StringIO
except: from StringIO import StringIO
from scipy.spatial.distance import cdist
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
version = "3.0.34"
GridSpacing ='GRIDSPACING'
PointsIncludeRegions ='POINTSINCLUDEREGIONS'
PointsExcludeRegions ='POINTSEXCLUDEREGIONS'
SaveRegions ='SAVEREGIONS'
LoadInclusionPointsFilename ='LOADINCLUSIONPOINTSFILENAME'
LoadSeedPointsFilename ='LOADSEEDPOINTSFILENAME'
PDBFileName ='PDBFILENAME'
DistanceCutoff ='DISTANCECUTOFF'
DefinePocketByLigand ='DEFINEPOCKETBYLIGAND'
ConvexHullExclusion ='CONVEXHULLEXCLUSION'
ContiguousPocketSeedRegions ='CONTIGUOUSPOCKETSEEDREGIONS'
ContiguousPointsCriteria ='CONTIGUOUSPOINTSCRITERIA'
NumProcessors ='NUMPROCESSORS'
MaxGrowIterations ='MAXGROWITERATIONS'
UseDiskNotMemory ='USEDISKNOTMEMORY'
UsePyhull ='USEPYHULL'
UseScipyConvexHull ='USESCIPYCONVEXHULL'
OutputFilenamePrefix ='OUTPUTFILENAMEPREFIX'
SaveIndividualPocketVolumes ='SAVEINDIVIDUALPOCKETVOLUMES'
SavePocketVolumesTrajectory ='SAVEPOCKETVOLUMESTRAJECTORY'
SavePocketVolumesNumpy ='SAVEPOCKETVOLUMESNUMPY'
OutputEqualNumPointsPerFrame ='OUTPUTEQUALNUMPOINTSPERFRAME'
SaveTabbedVolumeFile ='SAVETABBEDVOLUMEFILE'
SaveVolumetricDensityDX ='SAVEVOLUMETRICDENSITYDX'
SaveVolumetricDensityNpy ='SAVEVOLUMETRICDENSITYNPY'
SaveColoredMap ='SAVECOLOREDMAP'
CalculateSurfaceArea ='CALCULATESURFACEAREA'
CompressOutput ='COMPRESSOUTPUT'
NumFrames ='NUMFRAMES'
OutputBasename ='OUTPUTBASENAME'
OutputFrameFilenamePrefix = 'OutputFrameFilenamePrefix'
ConvexHullTriangles = 'ConvexHullTriangles'
ConvexHullCrossProducts = 'ConvexHullCrossProducts'
InclusionSphere = "INCLUSIONSPHERE"
InclusionBox = "INCLUSIONBOX"
InclusionCylinder = "INCLUSIONCYLINDER"
ExclusionSphere = "EXCLUSIONSPHERE"
ExclusionBox = "EXCLUSIONBOX"
ExclusionCylinder = "EXCLUSIONCYLINDER"
SeedSphere = "SEEDSPHERE"
SeedBox = "SEEDBOX"
SeedCylinder = "SEEDCYLINDER"
def log(astr, parameters):
'''Output POVME statements, either to the screen or to a file
Arguments:
astr -- The string to output.
parameters -- The user-defined parameters.
'''
# Print the output to the screen.
print astr
# Save it to the output file as well.
try:
if parameters[CompressOutput] == True: f = gzip.open(parameters[OutputFilenamePrefix] + 'output.txt.gz', 'ab')
else: f = open(parameters[OutputFilenamePrefix] + 'output.txt', 'a')
f.write(astr + "\n")
f.close()
except: pass
def clearLog(parameters):
'''Remove the log file that may be left over from previous run
Arguments:
parameters -- The user-defined parameters.
'''
if parameters[CompressOutput] == True: f = gzip.open(parameters[OutputFilenamePrefix] + 'output.txt.gz', 'wb')
else: f = open(parameters[OutputFilenamePrefix] + 'output.txt', 'w')
f.write('')
f.close()
class Multithreading():
"""A class for running calculations on multiple processors"""
results = []
def __init__(self, inputs, num_processors, task_class):
"""Launches a calculation on multiple processors
Arguments:
inputs -- A list, containing all the input required for the calculation
num_processors -- An integer, the requested number of processors to use
task_class -- An class, the class governing what calculations will be run on a given thread
Returns:
Nothing, though the objects self.results list is populated with the calculation results
"""
self.results = []
if num_processors != 1 and (platform.system().upper()[:3] == "WIN" or "NT" in platform.system().upper()): # If it's windows, you can only use one processor.
print "WARNING: Use of multiple processors is not supported in Windows. Proceeding with one processor..."
num_processors = 1
if num_processors == 1: # so just running on 1 processor, perhaps under windows
single_thread = task_class()
single_thread.total_num_tasks = len(inputs)
single_thread.results = []
for item in inputs: single_thread.value_func(item, None)
self.results = single_thread.results
else: # so it actually is running on multiple processors
cpu_count = 1
cpu_count = multiprocessing.cpu_count()
# first, if num_processors <= 0, determine the number of processors to use programatically
if num_processors <= 0: num_processors = cpu_count
# reduce the number of processors if too many have been specified
if len(inputs) < num_processors:
print 'Too many processors requested (%i requested, vs %i frames to analyze). Lowering number requested.' %(num_processors, len(inputs))
num_processors = len(inputs)
if len(inputs) == 0: # if there are no inputs, there's nothing to do.
self.results = []
return
# now, divide the inputs into the appropriate number of processors
inputs_divided = {}
for t in range(num_processors): inputs_divided[t] = []
for t in range(0, len(inputs), num_processors):
for t2 in range(num_processors):
index = t + t2
if index < len(inputs): inputs_divided[t2].append(inputs[index])
# now, run each division on its own processor
running = multiprocessing.Value('i', num_processors)
mutex = multiprocessing.Lock()
arrays = []
threads = []
for i in range(num_processors):
athread = task_class()
athread.total_num_tasks = len(inputs)
threads.append(athread)
arrays.append(multiprocessing.Array('i',[0, 1]))
results_queue = multiprocessing.Queue() # to keep track of the results
processes = []
for i in range(num_processors):
p = multiprocessing.Process(target=threads[i].runit, args=(running, mutex, results_queue, inputs_divided[i]))
p.daemon = True
p.start()
processes.append(p)
while running.value > 0:
# wait for everything to finish
is_running = 0
time.sleep(1)
# compile all results into one list
for thread in threads:
chunk = results_queue.get()
self.results.extend(chunk)
class MultithreadingTaskGeneral:
"""A parent class of others that governs what calculations are run on each thread"""
results = []
def runit(self, running, mutex, results_queue, items):
"""Launches the calculations on this thread
Arguments:
running -- A multiprocessing.Value object
mutex -- A multiprocessing.Lock object
results_queue -- A multiprocessing.Queue() object for storing the calculation output
items -- A list, the input data required for the calculation
"""
for item in items: self.value_func(item, results_queue)
mutex.acquire()
running.value -= 1
mutex.release()
if self.results == 'ERROR':
running = -1
results_queue.put(self.results)
def value_func(self, item, results_queue): # so overwriting this function
"""The definition that actually does the work.
Arguments:
item -- A list or tuple, the input data required for the calculation
results_queue -- A multiprocessing.Queue() object for storing the calculation output
"""
# input1 = item[0]
# input2 = item[1]
# input3 = item[2]
# input4 = item[3]
# input5 = item[4]
# input6 = item[5]
# use inputs to come up with a result, some_result
#self.results.append(some_result)
pass
class ConvexHull():
"""A class to handle convex-hull calculations"""
def get_seg_dict_num(self, seg_dict, seg_index):
"""seg_dict is a dictionary object that contains information about segments within the convex hull. The keys are 2x3 tuples, which represent two ends of a segment in space. The values of seg_dict are the number of times a segment has been part of a triangle, either 1 or 2. (Zero times would mean that the segment doesn't exist in the dictionary yet). This function looks up and returns the value of a seg_index from seg_dict
Arguments:
seg_dict -- the dictionary of segment 2x3 tuples as keys, integers as values
seg_index -- the key of the dictionary member we are going to retrieve
Returns:
if seg_index exists in the keys of seg_dict, return the value. Otherwise, return 0
"""
if seg_index[0][0] > seg_index[1][0]: # we want the index with the greater x-value, so we don't get identical segments in the dictionary more than once
index = seg_index
else:
index = seg_index[::-1]
if index in seg_dict:
return seg_dict[index]
else:
return 0
def increment_seg_dict(self, seg_dict, seg_index):
"""seg_dict is a dictionary object that contains information about segments within the convex hull. The keys are 2x3 tuples, which represent two ends of a segment in space. The values of seg_dict are the number of times a segment has been part of a triangle, either 1 or 2. (Zero times would mean that the segment doesn't exist in the dictionary yet). This function increments the values within seg_dict, or initiates them if they dont exist yet.
Arguments:
seg_dict -- the dictionary of segment 2x3 tuples as keys, integers as values
seg_index -- the key of the dictionary member we are going to increment
Returns:
None: the values of seg_dict are received and modified by reference
"""
if seg_index[0][0] > seg_index[1][0]: # we want the index with the greater x-value, so we don't get identical segments in the dictionary more than once
index = seg_index
else:
index = seg_index[::-1]
#"putting index:", index, "into seg_dict because", index[0][0], ">", index[1][0]
if index in seg_dict: # if the entry already exists in seg_dict
seg_dict[index] += 1 # increment
else:
seg_dict[index] = 1 # initiate with a value of 1 because it now exists on a triangle
return
def gift_wrapping_3d(self, raw_points):
"""Gift wrapping for 3d convex hull
Arguments:
raw_points -- A nx3 array of points, where each row corresponds to an x,y,z point coordinate
Returns:
A convex hull represented by a list of triangles. Each triangle is a 3x3 array, where each row is an x,y,z coordinate in space. The 3 rows describe the location of the 3 corners of the triangle. Each of the 3 points are arranged so that a cross product will point outwards from the hull
"""
n = numpy.shape(raw_points)[0] # number of points
point1 = raw_points[0] # take the first point
xaxis = numpy.array([1,0,0]) # create a ref vector pointing along x axis
maxx = raw_points[0][0] # initiate highest x value
points = [] # a list of tuples for easy | |
<filename>question_generation/generate_questions.py
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import print_function
import argparse
import json
import os
import itertools
import random
import shutil
import time
import re
import question_engine as qeng
"""
Generate synthetic questions and answers for CLEVR images. Input is a single
JSON file containing ground-truth scene information for all images, and output
is a single JSON file containing all generated questions, answers, and programs.
Questions are generated by expanding templates. Each template contains a single
program template and one or more text templates, both with the same set of typed
slots; by convention <Z> = Size, <C> = Color, <M> = Material, <S> = Shape.
Program templates may contain special nodes that expand into multiple functions
during instantiation; for example a "filter" node in a program template will
expand into a combination of "filter_size", "filter_color", "filter_material",
and "filter_shape" nodes after instantiation, and a "filter_unique" node in a
template will expand into some combination of filtering nodes followed by a
"unique" node.
Templates are instantiated using depth-first search; we are looking for template
instantiations where (1) each "unique" node actually refers to a single object,
(2) constraints in the template are satisfied, and (3) the answer to the question
passes our rejection sampling heuristics.
To efficiently handle (1) and (2), we keep track of partial evaluations of the
program during each step of template expansion. This together with the use of
composite nodes in program templates (filter_unique, relate_filter_unique) allow
us to efficiently prune the search space and terminate early when we know that
(1) or (2) will be violated.
"""
parser = argparse.ArgumentParser()
# Inputs
parser.add_argument('--input_scene_file', default='../output/CLEVR_scenes.json',
help="JSON file containing ground-truth scene information for all images " +
"from render_images.py")
parser.add_argument('--metadata_file', default='metadata.json',
help="JSON file containing metadata about functions")
parser.add_argument('--synonyms_json', default='synonyms.json',
help="JSON file defining synonyms for parameter values")
parser.add_argument('--template_dir', default='CLEVR_1.0_templates',
help="Directory containing JSON templates for questions")
# Output
parser.add_argument('--output_questions_file',
default='../output/CLEVR_questions.json',
help="The output file to write containing generated questions")
# Control which and how many images to process
parser.add_argument('--scene_start_idx', default=0, type=int,
help="The image at which to start generating questions; this allows " +
"question generation to be split across many workers")
parser.add_argument('--num_scenes', default=0, type=int,
help="The number of images for which to generate questions. Setting to 0 " +
"generates questions for all scenes in the input file starting from " +
"--scene_start_idx")
parser.add_argument('--max-num-objects', type=int, required=True,
help="The maximum number of objects in a scene. Should be the same as the parameter you used in image generation.")
# Control the number of questions per image; we will attempt to generate
# templates_per_image * instances_per_template questions per image.
parser.add_argument('--templates_per_image', default=10, type=int,
help="The number of different templates that should be instantiated " +
"on each image")
parser.add_argument('--instances_per_template', default=1, type=int,
help="The number of times each template should be instantiated on an image")
# Misc
parser.add_argument('--reset_counts_every', default=250, type=int,
help="How often to reset template and answer counts. Higher values will " +
"result in flatter distributions over templates and answers, but " +
"will result in longer runtimes.")
parser.add_argument('--verbose', action='store_true',
help="Print more verbose output")
parser.add_argument('--time_dfs', action='store_true',
help="Time each depth-first search; must be given with --verbose")
parser.add_argument('--profile', action='store_true',
help="If given then run inside cProfile")
# args = parser.parse_args()
def precompute_filter_options(scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
attribute_map = {}
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['size', 'color', 'material', 'shape']
else:
assert False, 'Unrecognized dataset'
# Precompute masks
masks = []
for i in range(2 ** len(attr_keys)):
mask = []
for j in range(len(attr_keys)):
mask.append((i // (2 ** j)) % 2)
masks.append(mask)
for object_idx, obj in enumerate(scene_struct['objects']):
if metadata['dataset'] == 'CLEVR-v1.0':
keys = [tuple(obj[k] for k in attr_keys)]
for mask in masks:
for key in keys:
masked_key = []
for a, b in zip(key, mask):
if b == 1:
masked_key.append(a)
else:
masked_key.append(None)
masked_key = tuple(masked_key)
if masked_key not in attribute_map:
attribute_map[masked_key] = set()
attribute_map[masked_key].add(object_idx)
scene_struct['_filter_options'] = attribute_map
def find_filter_options(object_idxs, scene_struct, metadata):
# Keys are tuples (size, color, shape, material) (where some may be None)
# and values are lists of object idxs that match the filter criterion
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
attribute_map = {}
object_idxs = set(object_idxs)
for k, vs in scene_struct['_filter_options'].items():
attribute_map[k] = sorted(list(object_idxs & vs))
return attribute_map
def add_empty_filter_options(attribute_map, metadata, num_to_add):
# Add some filtering criterion that do NOT correspond to objects
if metadata['dataset'] == 'CLEVR-v1.0':
attr_keys = ['Size', 'Color', 'Material', 'Shape']
else:
assert False, 'Unrecognized dataset'
attr_vals = [metadata['types'][t] + [None] for t in attr_keys]
if '_filter_options' in metadata:
attr_vals = metadata['_filter_options']
target_size = len(attribute_map) + num_to_add
while len(attribute_map) < target_size:
k = (random.choice(v) for v in attr_vals)
if k not in attribute_map:
attribute_map[k] = []
def find_relate_filter_options(object_idx, scene_struct, metadata,
unique=False, include_zero=False, trivial_frac=0.1):
options = {}
if '_filter_options' not in scene_struct:
precompute_filter_options(scene_struct, metadata)
# TODO: Right now this is only looking for nontrivial combinations; in some
# cases I may want to add trivial combinations, either where the intersection
# is empty or where the intersection is equal to the filtering output.
trivial_options = {}
for relationship in scene_struct['relationships']:
related = set(scene_struct['relationships'][relationship][object_idx])
for filters, filtered in scene_struct['_filter_options'].items():
intersection = related & filtered
trivial = (intersection == filtered)
if unique and len(intersection) != 1:
continue
if not include_zero and len(intersection) == 0:
continue
if trivial:
trivial_options[(relationship, filters)] = sorted(
list(intersection))
else:
options[(relationship, filters)] = sorted(list(intersection))
N, f = len(options), trivial_frac
num_trivial = int(round(N * f / (1 - f)))
trivial_options = list(trivial_options.items())
random.shuffle(trivial_options)
for k, v in trivial_options[:num_trivial]:
options[k] = v
return options
def node_shallow_copy(node):
new_node = {
'type': node['type'],
'inputs': node['inputs'],
}
if 'side_inputs' in node:
new_node['side_inputs'] = node['side_inputs']
return new_node
def other_heuristic(text, param_vals):
"""
Post-processing heuristic to handle the word "other"
"""
if ' other ' not in text and ' another ' not in text:
return text
target_keys = {
'<Z>', '<C>', '<M>', '<S>',
'<Z2>', '<C2>', '<M2>', '<S2>',
}
if param_vals.keys() != target_keys:
return text
key_pairs = [
('<Z>', '<Z2>'),
('<C>', '<C2>'),
('<M>', '<M2>'),
('<S>', '<S2>'),
]
remove_other = False
for k1, k2 in key_pairs:
v1 = param_vals.get(k1, None)
v2 = param_vals.get(k2, None)
if v1 != '' and v2 != '' and v1 != v2:
print('other has got to go! %s = %s but %s = %s'
% (k1, v1, k2, v2))
remove_other = True
break
if remove_other:
if ' other ' in text:
text = text.replace(' other ', ' ')
if ' another ' in text:
text = text.replace(' another ', ' a ')
return text
def instantiate_templates_dfs(scene_struct, template, metadata, answer_counts,
synonyms, max_instances=None, verbose=False):
param_name_to_type = {p['name']: p['type'] for p in template['params']}
initial_state = {
'nodes': [node_shallow_copy(template['nodes'][0])],
'vals': {},
'input_map': {0: 0},
'next_template_node': 1,
}
states = [initial_state]
final_states = []
while states:
state = states.pop()
# Check to make sure the current state is valid
q = {'nodes': state['nodes']}
outputs = qeng.answer_question(
q, metadata, scene_struct, all_outputs=True)
answer = outputs[-1]
if answer == '__INVALID__':
continue
# Check to make sure constraints are satisfied for the current state
skip_state = False
for constraint in template['constraints']:
if constraint['type'] == 'EQ':
p1, p2 = constraint["params"]
v1 = state['vals'].get(p1)
if v1 is not None and p2 is not None and v1 != p2:
skip_state = True
break
elif constraint['type'] == 'NEQ':
p1, p2 = constraint['params']
v1, v2 = state['vals'].get(p1), state['vals'].get(p2)
if v1 is not None and v2 is not None and v1 != v2:
if verbose:
print('skipping due to NEQ constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'NULL':
p = constraint['params'][0]
p_type = param_name_to_type[p]
v = state['vals'].get(p)
if v is not None:
skip = False
if p_type == 'Shape' and v != 'thing':
skip = True
if p_type != 'Shape' and v != '':
skip = True
if skip:
if verbose:
print('skipping due to NULL constraint')
print(constraint)
print(state['vals'])
skip_state = True
break
elif constraint['type'] == 'OUT_NEQ':
i, j = constraint['params']
i = state['input_map'].get(i, None)
j = state['input_map'].get(j, None)
if i is not | |
self.master_map = self.reference_map.deep_copy()
if self.model.ncs_constraints_present():
# here we are negating non-master part of the model
# self.master_sel=master_sel
# self.master_map = self.reference_map.deep_copy()
mask = maptbx.mask(
xray_structure=xrs.select(self.model.get_master_selection()),
n_real=self.master_map.focus(),
mask_value_inside_molecule=1,
mask_value_outside_molecule=-1,
solvent_radius=0,
atom_radius=1.)
self.master_map = self.reference_map * mask
if self.params.debug:
iotbx.mrcfile.write_ccp4_map(
file_name="%s_3_master.map" % self.params.output_prefix,
unit_cell=xrs.unit_cell(),
space_group=xrs.space_group(),
map_data=self.master_map,
labels=flex.std_string([""]))
def update_ss_in_grm(self, ss_annotation):
self.set_ss_restraints(ss_annotation)
def set_ss_restraints(self, ss_annotation, params=None):
log = self.log
if not self.verbose:
log = null_out()
if self.params.use_ss_restraints and ss_annotation is not None:
ss_manager = manager(
pdb_hierarchy=self.model.get_hierarchy(),
geometry_restraints_manager=self.model.get_restraints_manager().geometry,
sec_str_from_pdb_file=ss_annotation,
params=None,
mon_lib_srv=self.model.get_mon_lib_srv(),
verbose=-1,
log=log)
self.model.get_restraints_manager().geometry.set_secondary_structure_restraints(
ss_manager=ss_manager,
hierarchy=self.model.get_hierarchy(),
log=log)
def _setup_model_h(self):
if self.model_h is not None:
return
if not self.model.has_hd():
# runs reduce internally
assert (libtbx.env.has_module(name="reduce"))
assert (libtbx.env.has_module(name="elbow"))
self.model_h = ready_set_model_interface(
model=self.model,
params=["add_h_to_water=False",
"optimise_final_geometry_of_hydrogens=False"],
)
else:
self.model_h = self.model.deep_copy()
params_h = mmtbx.model.manager.get_default_pdb_interpretation_params()
params_h.pdb_interpretation = self.model._pdb_interpretation_params.pdb_interpretation
# customization for model with H
params_h.pdb_interpretation.clash_guard.nonbonded_distance_threshold=None
params_h.pdb_interpretation.max_reasonable_bond_distance = None
params_h.pdb_interpretation.use_neutron_distances=True
params_h.pdb_interpretation.ncs_search = self.params_for_model.pdb_interpretation.ncs_search
params_h.pdb_interpretation.ncs_search.exclude_selection="water"
self.model_h.set_pdb_interpretation_params(params_h)
self.model_h.get_restraints_manager()
self.model_h.idealize_h_riding()
self.model_h.setup_ncs_constraints_groups(filter_groups=True)
self.model_h._update_master_sel()
if self.params.debug:
self.shift_and_write_result(
model = self.model_h,
fname_suffix="model_h")
def _update_model_h(self):
if self.model_h is None:
self._setup_model_h()
# transfer coords model -> model_h
sc = self.model_h.get_sites_cart()
sc.set_selected(~self.model_h.get_hd_selection(), self.model.get_sites_cart())
self.model_h.set_sites_cart(sc)
self.model_h.idealize_h_riding()
def _update_model_from_model_h(self):
self.model.set_sites_cart(
sites_cart = self.model_h.get_hierarchy().select(~self.model_h.get_hd_selection()).atoms().extract_xyz())
self.model.set_sites_cart_from_hierarchy(multiply_ncs=True)
def idealize_rotamers(self):
print("Fixing rotamers...", file=self.log)
self.log.flush()
if self.params.debug:
self.shift_and_write_result(
model = self.model,
fname_suffix="just_before_rota")
self._update_model_h()
rotman = mmtbx.idealized_aa_residues.rotamer_manager.load(
rotamers="favored")
self.model_h.process_input_model(make_restraints=True)
o = mmtbx.refinement.real_space.side_chain_fit_evaluator(
pdb_hierarchy = self.model_h.get_hierarchy(),
crystal_symmetry = self.model.crystal_symmetry(),
rotamer_evaluator = rotman.rotamer_evaluator,
map_data = self.master_map)
result = mmtbx.refinement.real_space.fit_residues.run(
vdw_radii = self.model_h.get_vdw_radii(),
bselection = o.sel_all(),
pdb_hierarchy = self.model_h.get_hierarchy(),
crystal_symmetry = self.model.crystal_symmetry(),
map_data = self.master_map,
rotamer_manager = rotman,
rotatable_hd = self.model_h.rotatable_hd_selection(iselection=False),
sin_cos_table = scitbx.math.sin_cos_table(n=10000),
backbone_sample = False,
mon_lib_srv = self.model_h.get_mon_lib_srv(),
log = self.log)
self.model_h.set_sites_cart_from_hierarchy()
self._update_model_from_model_h()
if self.params.debug:
self.shift_and_write_result(
model = self.model,
fname_suffix="rota_ideal")
def run(self):
t_0 = time()
self.ann = self.model.get_ss_annotation()
self._setup_model_h()
self.model.set_restraint_objects(self.model_h.get_restraint_objects())
self.model.process_input_model(make_restraints=True)
# set SS restratins
self.set_ss_restraints(self.ann)
self.model.setup_ncs_constraints_groups()
self.init_model_statistics = self.get_statistics(self.model)
#
# Cablam idealization
#
if self.params.debug:
self.shift_and_write_result(
model = self.model,
fname_suffix="start")
self.shift_and_write_result(
model = self.model_h,
fname_suffix="start_h")
self.params.cablam_idealization.find_ss_after_fixes = False
ci_results = cablam_idealization(
model=self.model,
params=self.params.cablam_idealization,
log=self.log).get_results()
self.model = ci_results.model
self.after_cablam_statistics = self.get_statistics(self.model)
if self.params.debug:
self.shift_and_write_result(
model = self.model,
fname_suffix="cablam_id")
# Here we are preparing maps if needed.
if self.user_supplied_map is not None:
self.prepare_user_map()
if self.reference_map is None and self.params.use_map_for_reference:
self.prepare_reference_map_3()
if self.params.run_minimization_first:
# running simple minimization and updating all
# self.master, self.working, etc...
# self.whole_pdb_h.reset_atom_i_seqs()
if self.init_ref_map is None:
self.prepare_init_reference_map()
print("Minimization first", file=self.log)
self.minimize(
model=self.model,
original_pdb_h=self.original_hierarchy,
excl_string_selection=None, # don't need if we have map
reference_map=self.init_ref_map,
)
self.init_gm_model_statistics = self.get_statistics(self.model)
if self.params.debug:
self.shift_and_write_result(
model = self.model,
fname_suffix="init_gm")
if (self.init_gm_model_statistics is not None
and self.init_gm_model_statistics.ramachandran.outliers == 0
and self.init_gm_model_statistics.omega.twisted_general <= 0.01
and self.init_gm_model_statistics.omega.twisted_proline <= 0.01
and self.init_gm_model_statistics.omega.cis_general <= 0.01
and self.init_gm_model_statistics.omega.cis_proline <= 0.01
and self.init_gm_model_statistics.rotamer.outliers <= 0.01):
print("Simple minimization was enough", file=self.log)
# Early exit!!!
self.shift_and_write_result(
model=self.model,
fname_suffix="all_idealized")
if self.params.output_model_h:
self.shift_and_write_result(
model=self.model_h,
fname_suffix="all_idealized_h")
self.final_model_statistics = self.get_statistics(self.model)
# self.original_boxed_hierarchy.write_pdb_file(file_name="original_boxed_end.pdb")
self.time_for_run = time() - t_0
if self.params.output_pkl:
easy_pickle.dump(
file_name="%s.pkl" % self.params.output_prefix,
obj = self.get_stats_obj())
return
self.filtered_whole_ann = None
if self.ann is not None:
self.filtered_whole_ann = self.ann.deep_copy()
print("Original SS annotation", file=self.log)
print(self.ann.as_pdb_str(), file=self.log)
if self.params.filter_input_ss:
self.filtered_whole_ann = self.ann.filter_annotation(
hierarchy=self.model.get_hierarchy(),
asc=self.model.get_atom_selection_cache())
print("Filtered SS annotation", file=self.log)
print(self.filtered_whole_ann.as_pdb_str(), file=self.log)
self.model.set_ss_annotation(self.filtered_whole_ann)
# getting grm with SS restraints
self.update_ss_in_grm(self.filtered_whole_ann)
if (self.ann is None or
self.ann.get_n_helices() + self.ann.get_n_sheets() == 0 or
not self.params.ss_idealization.enabled):
print("No secondary structure annotations found or SS idealization is disabled.", file=self.log)
print("Secondary structure substitution step will be skipped", file=self.log)
self.log.flush()
# here we want to do geometry minimization anyway!
negate_selection = None
if self.reference_map is None:
outlier_selection_txt = mmtbx.building.loop_closure.utils. \
rama_score_selection(self.model.get_hierarchy(), self.model.get_ramachandran_manager(), "outlier",1)
print("outlier_selection_txt", outlier_selection_txt, file=self.log)
negate_selection = "all"
if outlier_selection_txt != "" and outlier_selection_txt is not None:
negate_selection = "not (%s)" % outlier_selection_txt
# if self.params.run_minimization_first:
# self.minimize(
# model=self.model,
# original_pdb_h=self.whole_pdb_h,
# ncs_restraints_group_list=self.filtered_ncs_restr_group_list,
# excl_string_selection=negate_selection,
# reference_map=self.reference_map)
# self.original_boxed_hierarchy.write_pdb_file(file_name="original_boxed_h_1.pdb")
else:
if self.params.debug:
self.params.ss_idealization.file_name_before_regularization = \
"%s_ss_before_reg.pdb" % self.params.output_prefix
self.params.ss_idealization.skip_good_ss_elements = True
ssb.substitute_ss(
model = self.model,
params=self.params.ss_idealization,
reference_map=self.master_map,
log=self.log)
self.log.flush()
self.after_ss_idealization = self.get_statistics(self.model)
self.shift_and_write_result(
model=self.model,
fname_suffix="ss_ideal_stat")
# Write resulting pdb file.
if self.params.debug:
self.shift_and_write_result(
model=self.model,
fname_suffix="ss_ideal",
)
# self.params.loop_idealization.minimize_whole = not self.model.ncs_constraints_present() and self.params.loop_idealization.minimize_whole
self.params.loop_idealization.debug = self.params.debug or self.params.loop_idealization.debug
# self.params.loop_idealization.enabled = False
# self.params.loop_idealization.variant_search_level = 0
print("Starting loop idealization", file=self.log)
loop_ideal = loop_idealization(
self.model,
params=self.params.loop_idealization,
reference_map=self.master_map,
log=self.log,
verbose=True)
self.log.flush()
if self.params.debug:
self.shift_and_write_result(
model = self.model,
fname_suffix="rama_ideal")
self.after_loop_idealization = self.get_statistics(self.model)
# fixing remaining rotamer outliers
if (self.params.additionally_fix_rotamer_outliers and
self.after_loop_idealization.rotamer.outliers > 0.004):
self.idealize_rotamers()
self.after_rotamer_fixing = self.get_statistics(self.model)
ref_hierarchy_for_final_gm = self.original_boxed_hierarchy
if not self.params.use_starting_model_for_final_gm:
ref_hierarchy_for_final_gm = self.model.get_hierarchy().deep_copy()
ref_hierarchy_for_final_gm.reset_atom_i_seqs()
if self.model.ncs_constraints_present():
print("Using ncs", file=self.log)
# assert 0
else:
print("Not using ncs", file=self.log)
# assert 0
# need to update SS manager for the whole model here.
if self.params.use_ss_restraints:
ss_params = sec_str_master_phil.fetch().extract()
ss_params.secondary_structure.protein.remove_outliers = not self.params.ss_idealization.enabled
self.set_ss_restraints(
ss_annotation=self.filtered_whole_ann,
params=ss_params.secondary_structure)
if self.params.run_minimization_last:
print("loop_ideal.ref_exclusion_selection", loop_ideal.ref_exclusion_selection, file=self.log)
print("Minimizing whole model", file=self.log)
self.minimize(
model = self.model,
original_pdb_h=ref_hierarchy_for_final_gm,
excl_string_selection=loop_ideal.ref_exclusion_selection,
reference_map = self.reference_map)
self.shift_and_write_result(
model = self.model,
fname_suffix="all_idealized")
if self.params.output_model_h:
self.shift_and_write_result(
model=self.model_h,
fname_suffix="all_idealized_h")
self.final_model_statistics = self.get_statistics(self.model)
self.time_for_run = time() - t_0
if self.params.output_pkl or self.params.debug:
easy_pickle.dump(
file_name="%s.pkl" % self.params.output_prefix,
obj = self.get_stats_obj())
def minimize(self,
model,
original_pdb_h,
excl_string_selection,
reference_map):
# print "ncs_restraints_group_list", ncs_restraints_group_list
# assert 0
if reference_map is None:
minimize_wrapper_for_ramachandran(
model=model,
original_pdb_h=original_pdb_h,
excl_string_selection=excl_string_selection,
number_of_cycles=self.params.number_of_refinement_cycles,
log=self.log,
)
self._update_model_h()
else:
print("Using map as reference", file=self.log)
self.log.flush()
if self.params.use_hydrogens_in_minimization:
self._update_model_h()
mwwm = minimize_wrapper_with_map(
model=self.model_h,
target_map=reference_map,
number_of_cycles=self.params.number_of_refinement_cycles,
cycles_to_converge=self.params.cycles_to_converge,
log=self.log)
self._update_model_from_model_h()
else:
mwwm = minimize_wrapper_with_map(
model=model,
target_map=reference_map,
number_of_cycles=self.params.number_of_refinement_cycles,
cycles_to_converge=self.params.cycles_to_converge,
log=self.log)
self._update_model_h()
def shift_and_write_result(self, model, fname_suffix=""):
pdb_str = model.model_as_pdb()
fname = "%s_%s.pdb" % (self.params.output_prefix, fname_suffix)
with open(fname, 'w') as f:
f.write(pdb_str)
if self.params.debug:
fname = "%s_%s_nosh.pdb" % (self.params.output_prefix, fname_suffix)
pdb_str = model.model_as_pdb(do_not_shift_back=True)
with open(fname, 'w') as f:
f.write(pdb_str)
def get_rmsd_from_start(self):
if self.rmsd_from_start is not None:
return self.rmsd_from_start
# calculate rmsd
self.rmsd_from_start = ssb.calculate_rmsd_smart(
self.original_boxed_hierarchy,
self.model.get_hierarchy(),
backbone_only=True)
return self.rmsd_from_start
def get_rmsd_from_start2(self):
return ssb.calculate_rmsd_smart(
self.original_boxed_hierarchy,
self.model.get_hierarchy(),
backbone_only=False)
def get_stats_obj(self):
if self.params.run_minimization_first:
stat_obj_list = [self.init_model_statistics,
self.init_gm_model_statistics,
self.after_ss_idealization,
self.after_loop_idealization,
self.after_rotamer_fixing,
self.final_model_statistics,]
else:
stat_obj_list = [self.init_model_statistics,
self.after_ss_idealization,
self.after_loop_idealization,
self.after_rotamer_fixing,
self.final_model_statistics,]
if self.after_cablam_statistics is not None:
stat_obj_list.insert(1, self.after_cablam_statistics)
return group_args(
geoms=stat_obj_list,
rmsds=(self.get_rmsd_from_start(), self.get_rmsd_from_start2()),
runtime=self.time_for_init + self.time_for_run)
def print_stat_comparison(self):
stat_obj_list = self.get_stats_obj()
if self.after_cablam_statistics is None:
if self.params.run_minimization_first:
print(" Starting Init GM SS ideal Rama Rota Final", file=self.log)
else:
print(" Starting SS ideal Rama Rota Final", file=self.log)
else:
if self.params.run_minimization_first:
print(" Starting Cablam Init GM SS ideal Rama Rota Final", file=self.log)
else:
print(" Starting Cablam SS ideal Rama Rota Final", file=self.log)
# Starting SS ideal Rama Rota Final
# Molprobity Score : 4.50 3.27 2.66 2.32 2.54
for val_caption, val_name, val_subname, val_format in [
("Molprobity Score", "molprobity_score", "", "{:10.2f}"),
("Clashscore", "clash", "score", "{:10.2f}"),
("CBeta deviations", "c_beta", "outliers", "{:10.2f}"),
("Ramachandran outliers", "ramachandran", "outliers", "{:10.2f}"),
("Ramachandran allowed", "ramachandran", "allowed", "{:10.2f}"),
("Rotamer outliers", "rotamer", "outliers", "{:10.2f}"),
("Cis-prolines", "omega", "cis_proline", "{:10.2f}"),
("Cis-general", "omega", "cis_general", "{:10.2f}"),
("Twisted prolines", "omega", "twisted_proline", "{:10.2f}"),
("Twisted general", "omega", "twisted_general", "{:10.2f}"),
("CaBLAM outliers", "cablam", "outliers", "{:10.2f}"),
("CaBLAM disfavored", "cablam", "disfavored", "{:10.2f}"),
("CaBLAM CA outliers", "cablam", "ca_outliers", "{:10.2f}"),
("phi-psy2: Motif(10)", "MOTIF", "", "{:10.2f}"),
("phi-psy2: Motif(20)", "MOTIF20", "", "{:10.2f}"),
("phi-psy2: Motif(->)", "MOTIF...", "", "{:10.2f}"),
("phi-psy2: General", "GENERAL", "", "{:10.2f}"),
("phi-psy2: Outlier", "OUTLIER", "", "{:10.2f}"),
]:
l = "%-21s:" % val_caption
for stat_obj in stat_obj_list.geoms:
value = 99999
if stat_obj is not None:
sub_class = getattr(stat_obj, val_name, None)
if sub_class is not None:
if val_subname != "":
value = getattr(sub_class, val_subname, None)
else:
value = sub_class
l += val_format.format(value)
else:
l += val_format.format(0)
print(l, file=self.log)
def print_runtime(self):
print("Time taken for idealization: %s" % str(
datetime.timedelta(seconds=int(self.time_for_init + self.time_for_run))), file=self.log)
def get_map_from_hkl(hkl_file_object, params, xrs, log):
print("Processing input hkl file...", file=log)
crystal_symmetry = hkl_file_object.crystal_symmetry()
rfs = reflection_file_utils.reflection_file_server(
crystal_symmetry = crystal_symmetry,
force_symmetry = True,
reflection_files = [hkl_file_object.file_content],
err = StringIO())
parameters = mmtbx.utils.data_and_flags_master_params().extract()
if (params.data_labels is not None):
parameters.labels = params.data_labels
if (params.r_free_flags_labels is not None):
parameters.r_free_flags.label = params.r_free_flags_labels
determined_data_and_flags = mmtbx.utils.determine_data_and_flags(
reflection_file_server = rfs,
parameters = parameters,
keep_going = True,
working_point_group = crystal_symmetry.space_group().build_derived_point_group(),
log = StringIO(),
symmetry_safety_check = True)
f_obs = determined_data_and_flags.f_obs
if (params.data_labels is None):
params.data_labels = f_obs.info().label_string()
r_free_flags = determined_data_and_flags.r_free_flags
assert f_obs is not None
print("Input data:", file=log)
print(" Iobs or Fobs:", f_obs.info().labels, file=log)
if (r_free_flags is not None):
print(" Free-R flags:", r_free_flags.info().labels, file=log)
params.r_free_flags_labels = r_free_flags.info().label_string()
else:
print(" Free-R flags: Not present", file=log)
fmodel = mmtbx.f_model.manager(
f_obs = f_obs,
r_free_flags = r_free_flags,
xray_structure = xrs)
fmodel.update_all_scales()
fft_map = fmodel.electron_density_map().fft_map(
resolution_factor = 0.25,
map_type | |
'attribute_map': {
'month': 'month',
},
'location_map': {
'month': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_usage_billable_summary
)
def __get_usage_fargate(
self,
start_hr,
**kwargs
):
"""Get hourly usage for Fargate # noqa: E501
Get hourly usage for [Fargate](https://docs.datadoghq.com/integrations/ecs_fargate/). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage_fargate(start_hr, async_req=True)
>>> result = thread.get()
Args:
start_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage beginning at this hour.
Keyword Args:
end_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage ending **before** this hour.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageFargateResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['start_hr'] = \
start_hr
return self.call_with_http_info(**kwargs)
self.get_usage_fargate = Endpoint(
settings={
'response_type': (UsageFargateResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/usage/fargate',
'operation_id': 'get_usage_fargate',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'start_hr',
'end_hr',
],
'required': [
'start_hr',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'start_hr':
(datetime,),
'end_hr':
(datetime,),
},
'attribute_map': {
'start_hr': 'start_hr',
'end_hr': 'end_hr',
},
'location_map': {
'start_hr': 'query',
'end_hr': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_usage_fargate
)
def __get_usage_hosts(
self,
start_hr,
**kwargs
):
"""Get hourly usage for hosts and containers # noqa: E501
Get hourly usage for hosts and containers. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage_hosts(start_hr, async_req=True)
>>> result = thread.get()
Args:
start_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage beginning at this hour.
Keyword Args:
end_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage ending **before** this hour.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageHostsResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['start_hr'] = \
start_hr
return self.call_with_http_info(**kwargs)
self.get_usage_hosts = Endpoint(
settings={
'response_type': (UsageHostsResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/usage/hosts',
'operation_id': 'get_usage_hosts',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'start_hr',
'end_hr',
],
'required': [
'start_hr',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'start_hr':
(datetime,),
'end_hr':
(datetime,),
},
'attribute_map': {
'start_hr': 'start_hr',
'end_hr': 'end_hr',
},
'location_map': {
'start_hr': 'query',
'end_hr': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_usage_hosts
)
def __get_usage_lambda(
self,
start_hr,
**kwargs
):
"""Get hourly usage for Lambda # noqa: E501
Get hourly usage for lambda. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage_lambda(start_hr, async_req=True)
>>> result = thread.get()
Args:
start_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage beginning at this hour.
Keyword Args:
end_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage ending **before** this hour.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
UsageLambdaResponse
If the method is called asynchronously, returns the request
thread.
"""
kwargs['async_req'] = kwargs.get(
'async_req', False
)
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['_preload_content'] = kwargs.get(
'_preload_content', True
)
kwargs['_request_timeout'] = kwargs.get(
'_request_timeout', None
)
kwargs['_check_input_type'] = kwargs.get(
'_check_input_type', True
)
kwargs['_check_return_type'] = kwargs.get(
'_check_return_type', True
)
kwargs['_host_index'] = kwargs.get('_host_index')
kwargs['start_hr'] = \
start_hr
return self.call_with_http_info(**kwargs)
self.get_usage_lambda = Endpoint(
settings={
'response_type': (UsageLambdaResponse,),
'auth': [
'apiKeyAuth',
'appKeyAuth'
],
'endpoint_path': '/api/v1/usage/aws_lambda',
'operation_id': 'get_usage_lambda',
'http_method': 'GET',
'servers': None,
},
params_map={
'all': [
'start_hr',
'end_hr',
],
'required': [
'start_hr',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'start_hr':
(datetime,),
'end_hr':
(datetime,),
},
'attribute_map': {
'start_hr': 'start_hr',
'end_hr': 'end_hr',
},
'location_map': {
'start_hr': 'query',
'end_hr': 'query',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json;datetime-format=rfc3339'
],
'content_type': [],
},
api_client=api_client,
callable=__get_usage_lambda
)
def __get_usage_logs(
self,
start_hr,
**kwargs
):
"""Get hourly usage for Logs # noqa: E501
Get hourly usage for logs. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_usage_logs(start_hr, async_req=True)
>>> result = thread.get()
Args:
start_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage beginning at this hour.
Keyword Args:
end_hr (datetime): Datetime in ISO-8601 format, UTC, precise to hour: [YYYY-MM-DDThh] for usage ending **before** this hour.. [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (float/tuple): timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to | |
match
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('gradeId', str(grade_id), match)
def clear_grade_id_terms(self):
"""Clears the grade ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradeId')
grade_id_terms = property(fdel=clear_grade_id_terms)
def supports_grade_query(self):
"""Tests if a ``GradeQuery`` is available for querying grades.
return: (boolean) - ``true`` if a grade query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grade_query(self):
"""Gets the query for a grade.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeQuery) - the grade query
raise: Unimplemented - ``supports_grade_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grade_query = property(fget=get_grade_query)
@utilities.arguments_not_none
def match_any_grade(self, match):
"""Matches grade systems with any grade.
arg: match (boolean): ``true`` to match grade systems with
any grade, ``false`` to match systems with no grade
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_terms(self):
"""Clears the grade terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
grade_terms = property(fdel=clear_grade_terms)
@utilities.arguments_not_none
def match_lowest_numeric_score(self, start, end, match):
"""Matches grade systems whose low end score falls in the specified range inclusive.
arg: start (decimal): low end of range
arg: end (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``end`` is less than ``start``
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_lowest_numeric_score_terms(self):
"""Clears the lowest numeric score terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('lowestNumericScore')
lowest_numeric_score_terms = property(fdel=clear_lowest_numeric_score_terms)
@utilities.arguments_not_none
def match_numeric_score_increment(self, start, end, match):
"""Matches grade systems numeric score increment is between the specified range inclusive.
arg: start (decimal): low end of range
arg: end (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``end`` is less than ``start``
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_numeric_score_increment_terms(self):
"""Clears the numeric score increment terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('numericScoreIncrement')
numeric_score_increment_terms = property(fdel=clear_numeric_score_increment_terms)
@utilities.arguments_not_none
def match_highest_numeric_score(self, start, end, match):
"""Matches grade systems whose high end score falls in the specified range inclusive.
arg: start (decimal): low end of range
arg: end (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``end`` is less than ``start``
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_highest_numeric_score_terms(self):
"""Clears the highest numeric score terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('highestNumericScore')
highest_numeric_score_terms = property(fdel=clear_highest_numeric_score_terms)
@utilities.arguments_not_none
def match_gradebook_column_id(self, gradebook_column_id, match):
"""Sets the gradebook column ``Id`` for this query.
arg: gradebook_column_id (osid.id.Id): a gradebook column
``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``gradebook_column_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('gradebookColumnId', str(gradebook_column_id), match)
def clear_gradebook_column_id_terms(self):
"""Clears the gradebook column ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradebookColumnId')
gradebook_column_id_terms = property(fdel=clear_gradebook_column_id_terms)
def supports_gradebook_column_query(self):
"""Tests if a ``GradebookColumnQuery`` is available.
return: (boolean) - ``true`` if a gradebook column query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_column_query(self):
"""Gets the query for a gradebook column.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookColumnQuery) - the gradebook
column query
raise: Unimplemented - ``supports_gradebook_column_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_column_query = property(fget=get_gradebook_column_query)
@utilities.arguments_not_none
def match_any_gradebook_column(self, match):
"""Matches grade systems assigned to any gradebook column.
arg: match (boolean): ``true`` to match grade systems mapped
to any column, ``false`` to match systems mapped to no
columns
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_gradebook_column_terms(self):
"""Clears the gradebook column terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
gradebook_column_terms = property(fdel=clear_gradebook_column_terms)
@utilities.arguments_not_none
def match_gradebook_id(self, gradebook_id, match):
"""Sets the gradebook ``Id`` for this query.
arg: gradebook_id (osid.id.Id): a gradebook ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``gradebook_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_bin_id
self._add_match('assignedGradebookIds', str(gradebook_id), match)
def clear_gradebook_id_terms(self):
"""Clears the gradebook ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms
self._clear_terms('assignedGradebookIds')
gradebook_id_terms = property(fdel=clear_gradebook_id_terms)
def supports_gradebook_query(self):
"""Tests if a ``GradebookQuery`` is available.
return: (boolean) - ``true`` if a gradebook query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_query(self):
"""Gets the query for a gradebook.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookQuery) - the gradebook query
raise: Unimplemented - ``supports_gradebook_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_query = property(fget=get_gradebook_query)
def clear_gradebook_terms(self):
"""Clears the gradebook terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('gradebook')
gradebook_terms = property(fdel=clear_gradebook_terms)
@utilities.arguments_not_none
def get_grade_system_query_record(self, grade_system_record_type):
"""Gets the grade system query record corresponding to the given ``GradeSystem`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
arg: grade_system_record_type (osid.type.Type): a grade
system record type
return: (osid.grading.records.GradeSystemQueryRecord) - the
grade system query record
raise: NullArgument - ``grade_system_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(grade_system_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class GradeEntryQuery(abc_grading_queries.GradeEntryQuery, osid_queries.OsidRelationshipQuery):
"""This is the query for searching grade entries.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
def __init__(self, runtime):
self._namespace = 'grading.GradeEntry'
self._runtime = runtime
record_type_data_sets = get_registry('GRADE_ENTRY_RECORD_TYPES', runtime)
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_queries.OsidObjectQuery.__init__(self, runtime)
@utilities.arguments_not_none
def match_gradebook_column_id(self, gradebook_column_id, match):
"""Sets the gradebook column ``Id`` for this query.
arg: gradebook_column_id (osid.id.Id): a gradebook column
``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``gradebook_column_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._add_match('gradebookColumnId',
str(gradebook_column_id),
match)
def clear_gradebook_column_id_terms(self):
"""Clears the gradebook column ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradebookColumnId')
gradebook_column_id_terms = property(fdel=clear_gradebook_column_id_terms)
def supports_gradebook_column_query(self):
"""Tests if a ``GradebookColumnQuery`` is available for querying creators.
return: (boolean) - ``true`` if a gradebook column query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_column_query(self):
"""Gets the query for a gradebook column.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookColumnQuery) - the gradebook
column query
raise: Unimplemented - ``supports_gradebook_column_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_column_query = property(fget=get_gradebook_column_query)
def clear_gradebook_column_terms(self):
"""Clears the gradebook column terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('gradebookColumn')
gradebook_column_terms = property(fdel=clear_gradebook_column_terms)
@utilities.arguments_not_none
def match_key_resource_id(self, resource_id, match):
"""Sets the key resource ``Id`` for this query.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``resource_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('keyResourceId', str(resource_id), match)
def clear_key_resource_id_terms(self):
"""Clears the key resource ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('keyResourceId')
key_resource_id_terms = property(fdel=clear_key_resource_id_terms)
| |
from __future__ import annotations
from typing import Union, Tuple, List
import warnings
import numpy as np
class Question:
"""Question is a thershold/matching concept for splitting the node of the Decision Tree
Args:
column_index (int): Column index to be chosen from the array passed at the matching time.
value (Union[int, str, float, np.int64, np.float64]): Threshold value/ matching value.
header (str): column/header name.
"""
def __init__(self, column_index: int, value: Union[int, str, float, np.int64, np.float64], header: str):
"""Constructor
"""
self.column_index = column_index
self.value = value
self.header = header
def match(self, example: Union[list, np.ndarray]) -> bool:
"""Matching function to decide based on example whether result is true or false.
Args:
example (Union[list, np.ndarray]): Example to compare with question parameters.
Returns:
bool: if the example is in threshold or value matches then results true or false.
"""
if isinstance(example, list):
example = np.array(example, dtype="O")
val = example[self.column_index]
# adding numpy int and float data types as well
if isinstance(val, (int, float, np.int64, np.float64)):
# a condition for question to return True or False for numeric value
return float(val) >= float(self.value)
else:
return str(val) == str(self.value) # categorical data comparison
def __repr__(self):
condition = "=="
if isinstance(self.value, (int, float, np.int64, np.float64)):
condition = ">="
return f"Is {self.header} {condition} {self.value} ?"
class Node:
"""A Tree node either Decision Node or Leaf Node
Args:
question (Question, optional): question object. Defaults to None.
true_branch (Node, optional): connection to node at true side of the branch. Defaults to None.
false_branch (Node, optional): connection to node at false side of the branch. Defaults to None.
uncertainty (float, optional): Uncertainty value like gini,entropy,variance etc. Defaults to None.
leaf_value (Union[dict,int,float], optional): Leaf node/final node's value. Defaults to None.
"""
def __init__(self, question: Question = None, true_branch: Node = None, false_branch: Node = None, uncertainty: float = None, *, leaf_value: Union[dict, int, float] = None):
"""Constructor
"""
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
self.uncertainty = uncertainty
self.leaf_value = leaf_value
@property
def _is_leaf_node(self) -> bool:
"""Check if this node is leaf node or not.
Returns:
bool: True if leaf node else false.
"""
return self.leaf_value is not None
class DecisionTreeClassifier:
"""Decision Tree Based Classification Model
Args:
max_depth (int, optional): max depth of the tree. Defaults to 100.
min_samples_split (int, optional): min size of the sample at the time of split. Defaults to 2.
criteria (str, optional): what criteria to use for information. Defaults to 'gini'. available 'gini','entropy'.
"""
def __init__(self, max_depth: int = 100, min_samples_split: int = 2, criteria: str = 'gini'):
"""Constructor
"""
self._X = None
self._y = None
self._feature_names = None
self._target_name = None
self._tree = None
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.criteria = criteria
def _count_dict(self, a: np.ndarray) -> dict:
"""Count class frequecies and get a dictionary from it
Args:
a (np.ndarray): input array. shape should be (m,1) for m samples.
Returns:
dict: categories/classes freq dictionary.
"""
unique_values = np.unique(a, return_counts=True)
zipped = zip(*unique_values)
dict_obj = dict(zipped)
return dict_obj
def _gini_impurity(self, arr: np.ndarray) -> float:
"""Calculate Gini Impurity
Args:
arr (np.ndarray): input array.
Returns:
float: gini impurity value.
"""
classes, counts = np.unique(arr, return_counts=True)
gini_score = 1 - np.square(counts / arr.shape[0]).sum(axis=0)
return gini_score
def _entropy(self, arr: np.ndarray) -> float:
"""Calculate Entropy
Args:
arr (np.ndarray): input array.
Returns:
float: entropy result.
"""
classes, counts = np.unique(arr, return_counts=True)
p = counts / arr.shape[0]
entropy_score = (-p * np.log2(p)).sum(axis=0)
return entropy_score
def _uncertainty(self, a: np.ndarray) -> float:
"""calcualte uncertainty
Args:
a (np.ndarray): input array
Returns:
float: uncertainty value
"""
if self.criteria == "entropy":
value = self._entropy(a)
elif self.criteria == "gini":
value = self._gini_impurity(a)
else:
warnings.warn(f"{self.criteria} is not coded yet. returning to gini.")
value = self._gini_impurity(a)
return value
def _partition(self, rows: np.ndarray, question: Union[Question, None]) -> Tuple[list, list]:
"""partition the rows based on the question
Args:
rows (np.ndarray): input array to split.
question (Question): question object containing spltting concept.
Returns:
Tuple[list,list]: true idxs and false idxs.
"""
true_idx, false_idx = [], []
for idx, row in enumerate(rows):
if question.match(row):
true_idx.append(idx)
else:
false_idx.append(idx)
return true_idx, false_idx
def _info_gain(self, left: np.ndarray, right: np.ndarray, parent_uncertainty: float) -> float:
"""Calculate information gain after splitting
Args:
left (np.ndarray): left side array.
right (np.ndarray): right side array.
parent_uncertainty (float): parent node Uncertainity.
Returns:
flaot: information gain value.
"""
# calculating portion/ partition/ weightage
pr = left.shape[0] / (left.shape[0] + right.shape[0])
# calcualte child uncertainity
child_uncertainty = pr * \
self._uncertainty(left) - (1 - pr) * self._uncertainty(right)
# calculate information gain
info_gain_value = parent_uncertainty - child_uncertainty
return info_gain_value
def _find_best_split(self, X: np.ndarray, y: np.ndarray) -> Tuple[float, Union[Question, None], float]:
"""method to find best split possible for the sample
Args:
X (np.ndarray): Feature matrix.
y (np.ndarray): target matrix.
Returns:
Tuple[float,Union[Question,None],float]: maximum gain from the split, best question of it, and parent node uncertainty.
"""
max_gain = -1
best_split_question = None
parent_uncertainty = self._uncertainty(y)
m_samples, n_labels = X.shape
for col_index in range(n_labels): # iterate over feature columns
# get unique values from the feature
unique_values = np.unique(X[:, col_index])
for val in unique_values: # check for every value and find maximum info gain
ques = Question(
column_index=col_index,
value=val,
header=self._feature_names[col_index]
)
t_idx, f_idx = self._partition(X, ques)
# if it does not split the data
# skip it
if len(t_idx) == 0 or len(f_idx) == 0:
continue
true_y = y[t_idx, :]
false_y = y[f_idx, :]
# get information gain
gain = self._info_gain(true_y, false_y, parent_uncertainty)
if gain > max_gain:
max_gain, best_split_question = gain, ques
return max_gain, best_split_question, parent_uncertainty
def _build_tree(self, X: np.ndarray, y: np.ndarray, depth: int = 0) -> Node:
"""Recursive funtion to build tree.
Args:
X (np.ndarray): input features matrix.
y (np.ndarray): target matrix.
depth (int, optional): depth count of the recursion. Defaults to 0.
Returns:
Node: either leaf node or decision node
"""
m_samples, n_labels = X.shape
# if depth is greater than max depth defined or labels/features are left to 1
# or number of samples are less than the minimum size of samples to split then
# stop recursion and return a node
if (depth > self.max_depth or n_labels == 1 or m_samples < self.min_samples_split):
return Node(leaf_value=self._count_dict(y))
gain, ques, uncertainty = self._find_best_split(X, y)
# if gain is zero
# then no point grinding further here
if gain < 0:
return Node(leaf_value=self._count_dict(y))
t_idx, f_idx = self._partition(X, ques) # get partition idxs
true_branch = self._build_tree(
X[t_idx, :], y[t_idx, :], depth + 1) # recog true branch samples
false_branch = self._build_tree(
X[f_idx, :], y[f_idx, :], depth + 1) # recog false branch samples
return Node(
question=ques,
true_branch=true_branch,
false_branch=false_branch,
uncertainty=uncertainty
)
def train(self, X: Union[np.ndarray, list], y: Union[np.ndarray, list], feature_name: list = None, target_name: list = None) -> None:
"""Train the model
Args:
X (Union[np.ndarray,list]): feature matrix.
y (Union[np.ndarray,list]): target matrix.
feature_name (list, optional): feature names list. Defaults to None.
target_name (list, optional): target name list. Defaults to None.
"""
X = np.array(X, dtype='O') if not isinstance(
X, (np.ndarray)) else X # converting to numpy array
y = np.array(y, dtype='O') if not isinstance(
y, (np.ndarray)) else y # converting to numpy array
# reshaping to vectors
self._X = X.reshape(-1, 1) if len(X.shape) == 1 else X
self._y = y.reshape(-1, 1) if len(y.shape) == 1 else y
# creating feature names if not mentioned
self._feature_names = feature_name or [
f"C_{i}" for i in range(self._X.shape[1])]
# creating target name if not mentioned
self._target_name = target_name or ['target']
# BOOOM
# building the tree
self._tree = self._build_tree(
X=self._X,
y=self._y
)
def print_tree(self, node: Union[Node, None] = None, spacing: str = "|-") -> None:
"""print the tree
Args:
node (Union[Node,None], optional): starting node. Defaults to None. then it will go to the root node of the tree.
spacing (str, optional): printing separater. Defaults to "|-".
"""
node = node or self._tree
if node._is_leaf_node:
print(spacing, " Predict :", node.leaf_value)
return
# Print the question at this node
print(spacing + str(node.question) +
" | " + self.criteria + " :" + str(node.uncertainty))
# Call | |
> 0 and Day_1N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_1N.iloc[c,0]:
if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_1N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 0] == \
Kit_PM_1N_24hr.iloc[c, 0]:
Kit_per_day_4N_1N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_1N[c])
K_PM_D_4N_1N.append(Day_1N.iloc[c,0])
if (len(Kit_PM_per_day_3N)-1) >= c and (len(Kit_PM_per_day_2N)-1) >= c:
#if Day_3N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_3N.iloc[c,0] == Day_2N.iloc[c,0]:
if Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_2N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \
Kit_PM_2N_24hr.iloc[c, 0]:
Kit_per_day_3N_2N.append(Kit_PM_per_day_3N[c]/Kit_PM_per_day_2N[c])
K_PM_D_3N_2N.append(Day_2N.iloc[c,0])
if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_3N)-1) >= c:
#if Day_4N.iloc[c,7] > 0 and Day_3N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_3N.iloc[c,0]:
if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 6] > 0 and Kit_PM_3N_24hr.iloc[c, 0] == \
Kit_PM_4N_24hr.iloc[c, 0]:
Kit_per_day_4N_3N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_3N[c])
K_PM_D_4N_3N.append(Day_3N.iloc[c,0])
if (len(Kit_PM_per_day_4N)-1) >= c and (len(Kit_PM_per_day_2N)-1) >= c:
#if Day_4N.iloc[c,7] > 0 and Day_2N.iloc[c,7] > 0 and Day_4N.iloc[c,0] == Day_2N.iloc[c,0]:
if Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 6] > 0 and Kit_PM_4N_24hr.iloc[c, 0] == \
Kit_PM_2N_24hr.iloc[c, 0]:
Kit_per_day_4N_2N.append(Kit_PM_per_day_4N[c]/Kit_PM_per_day_2N[c])
K_PM_D_4N_2N.append(Day_4N.iloc[c,0])
# now for box plotting for Kitchen PM per day percent changes
#2N to 1N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_2N_1N, ax=ax_box, color='g')
sns.distplot(Kit_per_day_2N_1N, ax=ax_hist, color='g')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 2N/1N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#3N to 1N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_3N_1N, ax=ax_box, color='r')
sns.distplot(Kit_per_day_3N_1N, ax=ax_hist, color='r')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 3N/1N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#4N to 1N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_4N_1N, ax=ax_box, color='y')
sns.distplot(Kit_per_day_4N_1N, ax=ax_hist, color='y')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 4N/1N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#3N to 2N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_3N_2N, ax=ax_box, color='m')
sns.distplot(Kit_per_day_3N_2N, ax=ax_hist, color='m')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 3N/2N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#4N to 3N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_4N_3N, ax=ax_box, color='k')
sns.distplot(Kit_per_day_4N_3N, ax=ax_hist, color='k')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 4N/3N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#4N to 2N
sns.set(style="ticks")
f, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw={"height_ratios": (0.15, 0.85)})
sns.boxplot(Kit_per_day_4N_2N, ax=ax_box, color='tab:orange')
sns.distplot(Kit_per_day_4N_2N, ax=ax_hist, color='tab:orange')
ax_box.set(yticks=[])
sns.despine(ax=ax_hist)
sns.despine(ax=ax_box, left=True)
plt.title('% 4N/2N (Kitchen PM per Day)')
plt.ylim(top=2)
plt.ylim(bottom = 0)
#Plotting on the same graph
fig, ax = plt.subplots()
plt.title('No-Hood Kitchen PM per day')
#plt.hold()
#1N
quant_1_1N = np.percentile(Kit_PM_per_day_1N, [25,50,75])
Top_lim_1_1N = quant_1_1N[2] + 1.5*(quant_1_1N[2] - quant_1_1N[0])
Low_lim_1_1N = quant_1_1N[0] - 1.5*(quant_1_1N[2] - quant_1_1N[0])
bp_1 = plt.boxplot(Kit_PM_per_day_1N, positions = [1], widths = 0.6)
kitchen_pm_1N_outlier = []
for v,a in enumerate(Kit_PM_per_day_1N):
if a > Top_lim_1_1N or a < Low_lim_1_1N:
kitchen_pm_1N_outlier.append(K_PM_D_1N[v])
plt.text(1,a,K_PM_D_1N[v])
plt.text(1,0.1,'1N',color='b')
#2N
quant_1_2N = np.percentile(Kit_PM_per_day_2N, [25,50,75])
Top_lim_1_2N = quant_1_2N[2] + 1.5*(quant_1_2N[2] - quant_1_2N[0])
Low_lim_1_2N = quant_1_2N[0] - 1.5*(quant_1_2N[2] - quant_1_2N[0])
bp_1 = plt.boxplot(Kit_PM_per_day_2N,positions = [2], widths = 0.6)
kitchen_pm_2N_outlier = []
for v,a in enumerate(Kit_PM_per_day_2N):
if a > Top_lim_1_2N or a < Low_lim_1_2N:
kitchen_pm_2N_outlier.append(K_PM_D_2N[v])
plt.text(2,a,K_PM_D_2N[v])
plt.text(2,0.1,'2N', color= 'g')
#3N
quant_1_3N = np.percentile(Kit_PM_per_day_3N, [25,50,75])
Top_lim_1_3N = quant_1_3N[2] + 1.5*(quant_1_3N[2] - quant_1_3N[0])
Low_lim_1_3N = quant_1_3N[0] - 1.5*(quant_1_3N[2] - quant_1_3N[0])
kitchen_pm_3N_outlier = []
bp_1 = plt.boxplot(Kit_PM_per_day_3N,positions = [3], widths = 0.6)
count = 0
for v,a in enumerate(Kit_PM_per_day_3N):
if a > Top_lim_1_3N or a < Low_lim_1_3N:
kitchen_pm_3N_outlier.append(K_PM_D_3N[v])
count = count + 1
if count == (3):
plt.text(3,a,K_PM_D_3N[v],ha='left', va='bottom')
if count == (1):
plt.text(3,a,K_PM_D_3N[v],ha='left', va='top')
else:
plt.text(3,a,K_PM_D_3N[v],ha='right', va='bottom')
plt.text(3,0.1,'3N', color='r')
#4N
quant_1_4N = np.percentile(Kit_PM_per_day_4N, [25,50,75])
Top_lim_1_4N = quant_1_4N[2] + 1.5*(quant_1_4N[2] - quant_1_4N[0])
Low_lim_1_4N = quant_1_4N[0] - 1.5*(quant_1_4N[2] - quant_1_4N[0])
bp_1 = plt.boxplot(Kit_PM_per_day_4N,positions = [4], widths = 0.6)
kitchen_pm_4N_outlier = []
for v,a in enumerate(Kit_PM_per_day_4N):
if a > Top_lim_1_4N or a < Low_lim_1_4N:
kitchen_pm_4N_outlier.append(K_PM_D_4N[v])
plt.text(4,a,K_PM_D_4N[v])
plt.text(4,0.1,'4N', color='y')
plt.xlim(0,5)
plt.ylim(0,1200)
print('Kitchen PM 1N had these values as outliers ', kitchen_pm_1N_outlier)
print('Kitchen PM 2N had these values as outliers ', kitchen_pm_2N_outlier)
print('Kitchen PM 3N had these values as outliers ', kitchen_pm_3N_outlier)
print('Kitchen PM 4N had these values as outliers ', kitchen_pm_4N_outlier)
plt.show()
# % change of PM per day
fig_2, ax2 = plt.subplots()
plt.title('% No_hood PM per Day Change' )
#plt.hold(True)
#2N to 1N
quant_1_2N_1N = np.percentile(Kit_per_day_2N_1N, [25,50,75])
Top_lim_1_2N_1N = quant_1_2N_1N[2] + 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])
Low_lim_1_2N_1N = quant_1_2N_1N[0] - 1.5*(quant_1_2N_1N[2]-quant_1_2N_1N[0])
bp_1_1 = plt.boxplot(Kit_per_day_2N_1N, positions=[1], widths= 0.6)
kitchen_pm_2N_1N_outlier = []
for v,a in enumerate(Kit_per_day_2N_1N):
if a > Top_lim_1_2N_1N or a < Low_lim_1_2N_1N:
kitchen_pm_2N_1N_outlier.append(K_PM_D_2N_1N[v])
plt.text(1, a, K_PM_D_2N_1N[v])
plt.text(0.5, -0.25, '2N / 1N', color= 'g')
#3N to 1N
quant_1_3N_1N = np.percentile(Kit_per_day_3N_1N, [25,50,75])
Top_lim_1_3N_1N = quant_1_3N_1N[2] + 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])
Low_lim_1_3N_1N = quant_1_3N_1N[0] - 1.5*(quant_1_3N_1N[2]-quant_1_3N_1N[0])
bp_1_1 = plt.boxplot(Kit_per_day_3N_1N, positions=[2], widths= 0.6)
kitchen_pm_3N_1N_outlier = []
for v,a in enumerate(Kit_per_day_3N_1N):
if a > Top_lim_1_3N_1N or a < Low_lim_1_3N_1N:
kitchen_pm_3N_1N_outlier.append(K_PM_D_3N_1N[v])
plt.text(2, a, K_PM_D_3N_1N[v])
plt.text(1.5, -0.25, '3N / 1N', color= 'r')
#4N to 1N
quant_1_4N_1N = np.percentile(Kit_per_day_4N_1N, [25,50,75])
Top_lim_1_4N_1N = quant_1_4N_1N[2] + 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])
Low_lim_1_4N_1N = quant_1_4N_1N[0] - 1.5*(quant_1_4N_1N[2]-quant_1_4N_1N[0])
bp_1_1 = plt.boxplot(Kit_per_day_4N_1N, positions=[3], widths= 0.6)
kitchen_pm_4N_1N_outlier = []
for v,a in enumerate(Kit_per_day_4N_1N):
if a > Top_lim_1_4N_1N or a < Low_lim_1_4N_1N:
kitchen_pm_4N_1N_outlier.append(K_PM_D_4N_1N[v])
plt.text(3, a, K_PM_D_4N_1N[v])
plt.text(2.5, -0.25, '4N / 1N', color= 'y')
#3N to 2N
quant_1_3N_2N = np.percentile(Kit_per_day_3N_2N, [25,50,75])
Top_lim_1_3N_2N = quant_1_3N_2N[2] + 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])
Low_lim_1_3N_2N = quant_1_3N_2N[0] - 1.5*(quant_1_3N_2N[2]-quant_1_3N_2N[0])
bp_1_1 = plt.boxplot(Kit_per_day_3N_2N, positions=[4], widths= 0.6)
kitchen_pm_3N_2N_outlier = []
for v,a in enumerate(Kit_per_day_3N_2N):
if a > Top_lim_1_3N_2N or a < Low_lim_1_3N_2N:
kitchen_pm_3N_2N_outlier.append(K_PM_D_3N_2N[v])
plt.text(4, a, K_PM_D_3N_2N[v])
plt.text(3.5, -0.25, '3N / 2N', color= 'm')
#4N to 3N
quant_1_4N_3N = np.percentile(Kit_per_day_4N_3N, [25,50,75])
Top_lim_1_4N_3N = quant_1_4N_3N[2] + 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])
Low_lim_1_4N_3N = quant_1_4N_3N[0] - 1.5*(quant_1_4N_3N[2]-quant_1_4N_3N[0])
bp_1_1 = plt.boxplot(Kit_per_day_4N_3N, positions=[5], widths= 0.6)
kitchen_pm_4N_3N_outlier = []
for v,a in enumerate(Kit_per_day_4N_3N):
if a > Top_lim_1_4N_3N or a < Low_lim_1_4N_3N:
kitchen_pm_4N_3N_outlier.append(K_PM_D_4N_3N[v])
plt.text(5, a, K_PM_D_4N_3N[v])
plt.text(4.5, -0.25, '4N / 3N', color= 'k')
#4N to 2N
quant_1_4N_2N = np.percentile(Kit_per_day_4N_2N, [25,50,75])
Top_lim_1_4N_2N = quant_1_4N_2N[2] + 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])
Low_lim_1_4N_2N = quant_1_4N_2N[0] - 1.5*(quant_1_4N_2N[2]-quant_1_4N_2N[0])
bp_1_1 = plt.boxplot(Kit_per_day_4N_2N, positions=[6], widths= 0.6)
kitchen_pm_4N_2N_outlier = []
for v,a in enumerate(Kit_per_day_4N_2N):
if a > Top_lim_1_4N_2N or a < Low_lim_1_4N_2N:
kitchen_pm_4N_2N_outlier.append(K_PM_D_4N_2N[v])
plt.text(6, a, K_PM_D_4N_2N[v])
plt.text(5.5, -0.25, '4N / 2N', color= 'tab:orange')
plt.xlim(0,7)
plt.ylim(-0.5,5)
print('Kitchen PM 2N/1N had these values as outliers ', kitchen_pm_2N_1N_outlier)
print('Kitchen PM 3N/1N had these values as outliers ', kitchen_pm_3N_1N_outlier)
print('Kitchen PM 4N/1N had these values as outliers ', kitchen_pm_4N_1N_outlier)
print('Kitchen PM 3N/2N had these values as outliers ', kitchen_pm_3N_2N_outlier)
print('Kitchen PM 4N/3N had these values as outliers ', kitchen_pm_4N_3N_outlier)
print('Kitchen PM 4N/2N had these values as outliers ', kitchen_pm_4N_2N_outlier)
plt.show()
#adding averages to the tables
quant_1_1N = np.append(quant_1_1N, np.average(Kit_PM_per_day_1N))
quant_1_2N = np.append(quant_1_2N, np.average(Kit_PM_per_day_2N))
quant_1_3N = np.append(quant_1_3N, np.average(Kit_PM_per_day_3N))
quant_1_4N = np.append(quant_1_4N, np.average(Kit_PM_per_day_4N))
D_50_quant_phase_PM_d = {'Percentile %': ['25','50','75', 'Avg'], '1N': quant_1_1N, '2N': quant_1_2N,'3N' : quant_1_3N,'4N': quant_1_4N}
PM_D_50_phase_no_hood = pd.DataFrame(data=D_50_quant_phase_PM_d,columns=['Percentile %','1N', '2N', '3N','4N'])
quant_1_2N_1N = np.append(quant_1_2N_1N , np.average(Kit_per_day_2N_1N))
quant_1_3N_1N = np.append(quant_1_3N_1N , np.average(Kit_per_day_3N_1N))
quant_1_4N_1N = np.append(quant_1_4N_1N , np.average(Kit_per_day_4N_1N))
quant_1_3N_2N = np.append(quant_1_3N_2N , np.average(Kit_per_day_3N_2N))
quant_1_4N_3N = np.append(quant_1_4N_3N , np.average(Kit_per_day_4N_3N))
quant_1_4N_2N = np.append(quant_1_4N_2N , np.average(Kit_per_day_4N_2N))
D_50_quant_percent_PM_d ={'Percentile %': ['25','50','75', 'Avg'],'2N / 1N': quant_1_2N_1N,'3N / 1N': quant_1_3N_1N,'4N / 1N': quant_1_4N_1N,
'3N / 2N': quant_1_3N_2N,'4N / 3N': quant_1_4N_3N,'4N / 2N': quant_1_4N_2N}
PM_D_50_percent_change_no_hood = pd.DataFrame(data=D_50_quant_percent_PM_d, columns=['Percentile %','2N / 1N','3N / 1N', '4N / 1N'
,'3N / 2N','4N / 3N','4N / 2N'])
print(PM_D_50_phase_no_hood)
print(PM_D_50_percent_change_no_hood)
# hood Pm per day
if Hood_or_no == 'hood':
Kit_PM_per_day_1H = []
K_PM_D_1H = []
Kit_PM_per_day_2H = []
K_PM_D_2H = []
Kit_PM_per_day_3H = []
K_PM_D_3H = []
count_t = 0
count_pm = 0
for c in hood_counter:
if c == (Household_removal[count_t] - C_Place_holder):
count_t = count_t + 1
if count_t == len(Household_removal):
count_t = 0
continue
if c == (Household_removal_Hood_PM[count_pm] - C_Place_holder):
count_pm = count_pm + 1
if count_pm == len(Household_removal_Hood_PM):
count_pm = 0
continue
# if Day_1H.iloc[c,7] != -1.00:
# Kit_PM_per_day_1H.append(Day_1H.iloc[c,7]/Day_1H.iloc[c,1])
# K_PM_D_1H.append(Day_1H.iloc[c,0])
if Kit_PM_1H_24hr.iloc[c, 6] != -1.00:
Kit_PM_per_day_1H.append(Kit_PM_1H_24hr.iloc[c,6])
K_PM_D_1H.append(Kit_PM_1H_24hr.iloc[c,0])
# if Day_2H.iloc[c, 7] != -1.00:
# Kit_PM_per_day_2H.append(Day_2H.iloc[c,7]/Day_2H.iloc[c,1])
# K_PM_D_2H.append(Day_2H.iloc[c,0])
if Kit_PM_2H_24hr.iloc[c, 6] != -1.00:
Kit_PM_per_day_2H.append(Kit_PM_2H_24hr.iloc[c,6])
K_PM_D_2H.append(Kit_PM_2H_24hr.iloc[c,0])
# if Day_3H.iloc[c, 7] != -1.00:
# Kit_PM_per_day_3H.append(Day_3H.iloc[c,7]/Day_3H.iloc[c,1])
# K_PM_D_3H.append(Day_3H.iloc[c, 0])
if Kit_PM_3H_24hr.iloc[c, 6] != -1.00:
Kit_PM_per_day_3H.append(Kit_PM_3H_24hr.iloc[c,6])
K_PM_D_3H.append(Kit_PM_3H_24hr.iloc[c,0])
# percentages Between Phases of kitchen PM per day
Kit_per_day_2H_1H = | |
<filename>main_superpixels_graph_classification_ipynb.py
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.superpixels_graph_classification.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
t0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
if MODEL_NAME in ['GCN', 'GAT']:
if net_params['self_loop']:
print("[!] Adding graph self-loops for GCN/GAT models (central node trick).")
dataset._add_self_loops()
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n""" .format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device.type == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_accs, epoch_val_accs = [], []
# batching exception for Diffpool
drop_last = True if MODEL_NAME == 'DiffPool' else False
if MODEL_NAME in ['RingGNN', '3WLGNN']:
# import train functions specific for WL-GNNs
from train.train_superpixels_graph_classification import train_epoch_dense as train_epoch, evaluate_network_dense as evaluate_network
train_loader = DataLoader(trainset, shuffle=True, collate_fn=dataset.collate_dense_gnn)
val_loader = DataLoader(valset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
test_loader = DataLoader(testset, shuffle=False, collate_fn=dataset.collate_dense_gnn)
else:
# import train functions for all other GCNs
from train.train_superpixels_graph_classification import train_epoch_sparse as train_epoch, evaluate_network_sparse as evaluate_network
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, drop_last=drop_last, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, drop_last=drop_last, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
if MODEL_NAME in ['RingGNN', '3WLGNN']: # since different batch training function for dense GNNs
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch, params['batch_size'])
else: # for all other models common train function
epoch_train_loss, epoch_train_acc, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
epoch_val_loss, epoch_val_acc = evaluate_network(model, device, val_loader, epoch)
_, epoch_test_acc = evaluate_network(model, device, test_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_accs.append(epoch_train_acc)
epoch_val_accs.append(epoch_val_acc)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_acc', epoch_train_acc, epoch)
writer.add_scalar('val/_acc', epoch_val_acc, epoch)
writer.add_scalar('test/_acc', epoch_test_acc, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_acc=epoch_train_acc, val_acc=epoch_val_acc,
test_acc=epoch_test_acc)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_acc = evaluate_network(model, device, test_loader, epoch)
_, train_acc = evaluate_network(model, device, train_loader, epoch)
print("Test Accuracy: {:.4f}".format(test_acc))
print("Train Accuracy: {:.4f}".format(train_acc))
print("Convergence Time (Epochs): {:.4f}".format(epoch))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST ACCURACY: {:.4f}\nTRAIN ACCURACY: {:.4f}\n\n
Convergence Time (Epochs): {:.4f}\nTotal Time Taken: {:.4f} hrs\nAverage Time Per Epoch: {:.4f} s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(test_acc))*100, np.mean(np.array(train_acc))*100, epoch, (time.time()-t0)/3600, np.mean(per_epoch_time)))
def main(config):
"""
USER CONTROLS
"""
# parameters
params = config['params']
# dataset
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
# device
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
out_dir = config['out_dir']
# GNN model
MODEL_NAME = config['model']
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
# Superpixels
net_params['in_dim'] = dataset.train[0][0].ndata['feat'][0].size(0)
net_params['in_dim_edge'] = dataset.train[0][0].edata['feat'][0].size(0)
num_classes = len(np.unique(np.array(dataset.train[:][1])))
net_params['n_classes'] = num_classes
if MODEL_NAME == 'DiffPool':
# calculate assignment dimension: pool_ratio * largest graph's maximum
# number of nodes in the dataset
max_num_nodes_train = max([dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))])
max_num_nodes_test = max([dataset.test[i][0].number_of_nodes() for i in range(len(dataset.test))])
max_num_node = max(max_num_nodes_train, max_num_nodes_test)
net_params['assign_dim'] = int(max_num_node * net_params['pool_ratio']) * net_params['batch_size']
if MODEL_NAME == 'RingGNN':
num_nodes_train = [dataset.train[i][0].number_of_nodes() for i in range(len(dataset.train))]
num_nodes_test = [dataset.test[i][0].number_of_nodes() for i in range(len(dataset.test))]
num_nodes = num_nodes_train + num_nodes_test
net_params['avg_node_num'] = int(np.ceil(np.mean(num_nodes)))
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
# ==================== setup parameters ==============
# select GPU or CPU
#use_gpu = True; gpu_id = 0; device = None # default GPU
use_gpu = False; gpu_id = -1; device = None # CPU
# MODEL_NAME = '3WLGNN'
# MODEL_NAME = 'RingGNN'
MODEL_NAME = 'GatedGCN'
# MODEL_NAME = 'MoNet'
# MODEL_NAME = 'GCN'
# MODEL_NAME = 'GAT'
# MODEL_NAME = 'GraphSage'
# MODEL_NAME = 'DiffPool'
# MODEL_NAME = 'MLP'
# MODEL_NAME = 'GIN'
DATASET_NAME = 'MNIST'
# DATASET_NAME = 'CIFAR10'
out_dir = 'out/superpixels_graph_classification/'
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
print("[I] Loading data (notebook) ...")
dataset = LoadData(DATASET_NAME)
trainset, valset, testset = dataset.train, dataset.val, dataset.test
print("[I] Finished loading.")
n_heads = -1
edge_feat = False
pseudo_dim_MoNet = -1
kernel = -1
gnn_per_block = -1
embedding_dim = -1
pool_ratio = -1
n_mlp_GIN = -1
gated = False
self_loop = False
# self_loop = True
max_time = 12
if MODEL_NAME == 'GatedGCN':
seed = 41;
epochs = 1000;
batch_size = 5;
init_lr = 5e-5;
lr_reduce_factor = 0.5;
lr_schedule_patience = 25;
min_lr = 1e-6;
weight_decay = 0
L = 4;
hidden_dim = 70;
out_dim = hidden_dim;
dropout = 0.0;
readout = 'mean'
if MODEL_NAME == 'GCN':
seed = 41;
epochs = 1000;
batch_size = 5;
init_lr = 5e-5;
lr_reduce_factor = 0.5;
lr_schedule_patience = 25;
min_lr = 1e-6;
weight_decay = 0
L = 4;
hidden_dim = 146;
out_dim = hidden_dim;
dropout = 0.0;
readout = 'mean'
if MODEL_NAME == 'GAT':
seed = 41;
epochs = 1000;
batch_size = 50;
init_lr = 5e-5;
lr_reduce_factor = 0.5;
lr_schedule_patience = 25;
min_lr = 1e-6;
weight_decay = 0
L = 4;
n_heads = 8;
hidden_dim = 19;
out_dim = n_heads * hidden_dim;
dropout = 0.0;
readout = 'mean'
print('True hidden dim:', out_dim)
if MODEL_NAME == 'GraphSage':
seed = 41;
epochs = 1000;
batch_size = 50;
init_lr = 5e-5;
lr_reduce_factor = 0.5;
lr_schedule_patience = 25;
min_lr = 1e-6;
weight_decay = 0
L = 4;
hidden_dim = 108;
out_dim = hidden_dim;
dropout = 0.0;
readout = 'mean'
if MODEL_NAME == 'MLP':
seed = 41;
epochs = 1000;
batch_size = 50;
init_lr = 5e-4;
lr_reduce_factor = 0.5;
lr_schedule_patience = 25;
min_lr = 1e-6;
weight_decay = 0
gated = False; # MEAN
L = 4;
hidden_dim = 168;
out_dim = hidden_dim;
dropout = 0.0;
readout = 'mean'
gated = True; # GATED
L = 4;
hidden_dim = 150;
out_dim = hidden_dim;
dropout = 0.0;
readout = 'mean'
if MODEL_NAME | |
<filename>coralie/coraliepipe.py
import sys
base = '../'
sys.path.append(base+"utils/Continuum")
sys.path.append(base+"utils/Correlation")
sys.path.append(base+"utils/GLOBALutils")
sys.path.append(base+"utils/OptExtract")
baryc_dir= base+'utils/SSEphem/'
sys.path.append(baryc_dir)
ephemeris='DEc403'
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from pylab import *
# ceres modules
import coralieutils
import continuum
import correlation
import GLOBALutils
import Marsh
# other useful modules
import argparse
import ephem
import jplephem
from math import radians as rad
from astropy.io import fits as pyfits
import pickle
import os
import scipy
import scipy.interpolate
from scipy import interpolate
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
# Recive input parameters
parser = argparse.ArgumentParser()
parser.add_argument('directorio')
parser.add_argument('-avoid_plot', action="store_true", default=False)
parser.add_argument('-dirout',default='default')
parser.add_argument('-do_class', action="store_true", default=False)
parser.add_argument('-just_extract', action="store_true", default=False)
parser.add_argument('-npools', default=1)
parser.add_argument('-o2do',default='all')
parser.add_argument('-reffile',default='default')
args = parser.parse_args()
dirin = args.directorio
avoid_plot = args.avoid_plot
dirout = args.dirout
DoClass = args.do_class
JustExtract = args.just_extract
npools = int(args.npools)
object2do = args.o2do
reffile = args.reffile
if dirin[-1] != '/':
dirin = dirin + '/'
if dirout == 'default':
dirout = dirin[:-1]+'_red/'
if not os.access(dirout,os.F_OK):
os.system('mkdir '+dirout)
if os.access(dirout+'proc',os.F_OK):
os.system('rm -r '+dirout+'proc')
os.system('mkdir '+dirout+'proc')
f_res = open(dirout+'proc/'+'results.txt','w')
if reffile == 'default':
reffile = dirin+'reffile.txt'
####### GLOBAL VARIABLES #####
force_pre_process = False
force_flat_extract = False
force_thar_extract = False
force_thfp_extract = False
force_tharxc = False
force_thar_wavcal = False
force_thfp_wavcal = False
force_sci_extract = False
force_spectral_file_build = True
force_stellar_pars = False
dumpargon = False
minlines_glob_ob = 700
minlines_glob_co = 500
Inverse_m = True
use_cheby = True
MRMS = 100 # max rms in m/s, global wav solution
trace_degree = 4
Marsh_alg = 0
ext_aperture = 3
NSigma_Marsh = 5
NCosmic_Marsh = 10
S_Marsh = 0.4
N_Marsh = 3 # grado polinomio
min_extract_col = 50
max_extract_col = 2000
n_useful = 70 # up to which order do we care?
# Number of coefficients for the global wavelength solution
ncoef_x = 4
ncoef_m = 6
npar_wsol = (min(ncoef_x,ncoef_m) + 1) * (2*max(ncoef_x,ncoef_m) - min(ncoef_x,ncoef_m) + 2) / 2
models_path = base+"data/COELHO_MODELS/R_40000b/" # path to the synthetic models
order_dir = base+"coralie/wavcals/" # path to reference files for the wavelength solution
#############################
# file containing the log
log = dirout+'night.log'
print "\n\n\tCoralie Euler1.2m PIPELINE\n"
print "\tRAW data is in ",dirin
print "\tProducts of reduction will be in",dirout
print '\n'
# classification of input images according to header info
biases, ob_flats, co_flats, ob_loc, co_loc, ThAr_ref, ThFP_ref,\
simThAr_sci,sim_FP_sci,ThAr_ref_dates,ThFP_ref_dates,obnames,\
obnames_FP,exptimes, exptimes_FP, flats = coralieutils.FileClassify(dirin,log)
# Pre-process
if ( (( len(ob_flats) > 0) and (os.access(dirout+'FlatOb.fits',os.F_OK) == False)) or \
(( len(co_flats) > 0) and (os.access(dirout+'FlatCo.fits',os.F_OK) == False)) or \
(( len(flats) > 0) and (os.access(dirout+'Flat.fits',os.F_OK) == False)) or \
(os.access(dirout+'trace.pkl',os.F_OK) == False) or \
(os.access(dirout+'MasterBias.fits',os.F_OK) == False) or \
(force_pre_process) ):
print "\tNo previous pre-processing files or found"
pre_process = 1
else:
print "\tPre-processing files found, going straight to extraction"
pre_process = 0
if (pre_process == 1):
print "\tGenerating Master calibration frames..."
# median combine Biases
MasterBias, RO_bias, GA_bias = coralieutils.MedianCombine(biases,ZF=0.)
hdu = pyfits.PrimaryHDU( MasterBias )
if (os.access(dirout+'MasterBias.fits',os.F_OK)):
os.remove(dirout+'MasterBias.fits')
hdu.writeto(dirout+'MasterBias.fits')
print "\t\t-> Masterbias: done!"
if len(flats) > 0:
# median combine list of co flats2
Flat,RO_flat,GA_flat = coralieutils.MedianCombine(flats,ZF=MasterBias)
hdu = pyfits.PrimaryHDU(Flat)
if (os.access(dirout+'Flat.fits',os.F_OK)):
os.remove(dirout+'Flat.fits')
hdu.writeto(dirout+'Flat.fits')
if len(ob_flats) > 0:
# median combine list of ob flats
Flat_ob, RO_ob, GA_ob = coralieutils.MedianCombine(ob_flats,ZF=MasterBias)
# save this file for later reference
hdu = pyfits.PrimaryHDU( Flat_ob )
if (os.access(dirout+'FlatOb.fits',os.F_OK)):
os.remove(dirout+'FlatOb.fits')
hdu.writeto(dirout+'FlatOb.fits')
else:
Flat_ob = Flat
if len(co_flats) > 0:
# median combine list of co flats
Flat_co,RO_co,GA_co = coralieutils.MedianCombine(co_flats,ZF=MasterBias)
hdu = pyfits.PrimaryHDU(Flat_co)
if (os.access(dirout+'FlatCo.fits',os.F_OK)):
os.remove(dirout+'FlatCo.fits')
hdu.writeto(dirout+'FlatCo.fits')
else:
Flat_co = Flat
print "\t\t-> Masterflats: done!"
# Find orders & traces
print "\tTracing echelle orders..."
if len(ob_flats)>0 and len(co_flats)>0:
c_ob, nord_ob = GLOBALutils.get_them(Flat_ob, 8, trace_degree,maxords=-1,mode=1)
c_co, nord_co = GLOBALutils.get_them(Flat_co, 8, trace_degree,maxords=-1,startfrom=300,mode=1)
else:
c_all, nord_all = GLOBALutils.get_them(Flat, 5, trace_degree,maxords=-1,mode=1,nsigmas=3)
GA_co,GA_ob = GA_flat, GA_flat
RO_co,RO_ob = RO_flat, RO_flat
c_ob = c_all[:22]
c_co = c_all[22]
i = 23
while i < len(c_all)-1:
c_ob = np.vstack((c_ob,c_all[i]))
c_co = np.vstack((c_co,c_all[i+1]))
i+=2
nord_co, nord_ob = len(c_co),len(c_ob)
print '\t', nord_ob, 'object orders found...'
print '\t', nord_co, 'comparison orders found...'
if len(ob_flats)>0 and len(co_flats)>0:
trace_dict = {'c_ob':c_ob,'c_co':c_co,
'nord_ob':nord_ob, 'nord_co':nord_co,
'GA_ob': GA_ob, 'RO_ob': RO_ob,
'GA_co': GA_co, 'RO_co': RO_co}
else:
trace_dict = {'c_all':c_all,'c_ob':c_ob,'c_co':c_co,
'nord_ob':nord_ob, 'nord_co':nord_co,'nord_all':nord_all,
'GA_ob': GA_ob, 'RO_ob': RO_ob,
'GA_co': GA_co, 'RO_co': RO_co}
pickle.dump( trace_dict, open( dirout+"trace.pkl", 'w' ) )
else:
trace_dict = pickle.load( open( dirout+"trace.pkl", 'r' ) )
c_co = trace_dict['c_co']
c_ob = trace_dict['c_ob']
nord_ob = trace_dict['nord_ob']
nord_co = trace_dict['nord_co']
if 'c_all' in trace_dict.keys():
c_all = trace_dict['c_all']
nord_all = trace_dict['nord_all']
# recover GA*, RO*
GA_ob = trace_dict['GA_ob']
RO_ob = trace_dict['RO_ob']
GA_co = trace_dict['GA_co']
RO_co = trace_dict['RO_co']
# recover flats & master bias
if len(ob_flats)>0:
h = pyfits.open(dirout+'FlatOb.fits')
Flat_ob = h[0].data
else:
h = pyfits.open(dirout+'Flat.fits')
Flat_ob = h[0].data
if len(co_flats)>0:
h = pyfits.open(dirout+'Flat.fits')
Flat_co = h[0].data
else:
h = pyfits.open(dirout+'Flat.fits')
Flat_co = h[0].data
h = pyfits.open(dirout+'MasterBias.fits')
MasterBias = h[0].data
if len(ob_flats)>0 and len(co_flats)>0:
c_all = GLOBALutils.Mesh(c_ob,c_co)
print '\n\tExtraction of Flat calibration frames:'
# Extract flat spectra, object
P_ob_fits = dirout + 'P_ob.fits'
S_flat_ob_fits = dirout +'S_flat_ob.fits'
P_ob = np.zeros( Flat_ob.shape )
S_flat_ob = np.zeros((nord_ob, 3, Flat_ob.shape[1]) )
if ( os.access(P_ob_fits,os.F_OK) == False ) or ( os.access(S_flat_ob_fits,os.F_OK) == False ) or \
(force_flat_extract):
print "\t\tNo extracted flat object spectra found or extraction forced, extracting and saving..."
print "\t\t\tWill extract",nord_ob,"orders for object fibre..."
P_ob = GLOBALutils.obtain_P(Flat_ob,c_ob,ext_aperture,RO_ob,\
GA_ob,NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
S_flat_ob = GLOBALutils.optimal_extraction(Flat_ob,P_ob,c_ob,ext_aperture,\
RO_ob,GA_ob,S_Marsh,NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
# write P_on and S_flat_ob as fits files
if (os.access(P_ob_fits,os.F_OK)):
os.remove( P_ob_fits )
if (os.access(S_flat_ob_fits,os.F_OK)):
os.remove( S_flat_ob_fits )
hdu = pyfits.PrimaryHDU( P_ob )
hdu.writeto( P_ob_fits )
hdu = pyfits.PrimaryHDU( S_flat_ob )
hdu.writeto( S_flat_ob_fits )
else:
print "\t\tExtracted flat object spectra found, loading..."
P_ob = pyfits.getdata( P_ob_fits )
S_flat_ob = pyfits.getdata( S_flat_ob_fits )
# Extract flat spectra, comparison
P_co_fits = dirout + 'P_co.fits'
S_flat_co_fits = dirout +'S_flat_co.fits'
P_co = np.zeros( Flat_co.shape )
S_flat_co = np.zeros((nord_co, 3, Flat_co.shape[1]) )
if ( os.access(P_co_fits,os.F_OK) == False ) or ( os.access(S_flat_co_fits,os.F_OK) == False ) or (force_flat_extract):
print "\t\tNo extracted flat comparison spectra found or extraction forced, extracting and saving..."
print "\t\t\tWill extract",nord_co,"orders for comparison fibre"
P_co = GLOBALutils.obtain_P(Flat_co,c_co,ext_aperture,RO_co,\
GA_co,NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
S_flat_co = GLOBALutils.optimal_extraction(Flat_co,P_co,c_co,ext_aperture,RO_co,GA_co,\
S_Marsh,NCosmic_Marsh,min_extract_col,\
max_extract_col,npools)
# write P_on and S_flat_co as fits files
if (os.access(P_co_fits,os.F_OK)):
os.remove( P_co_fits )
if (os.access(S_flat_co_fits,os.F_OK)):
os.remove( S_flat_co_fits )
hdu = pyfits.PrimaryHDU( P_co )
hdu.writeto( P_co_fits )
hdu = pyfits.PrimaryHDU( S_flat_co )
hdu.writeto( S_flat_co_fits )
else:
print "\t\tExtracted flat comparison spectra found, loading..."
P_co = pyfits.getdata( P_co_fits )
S_flat_co = pyfits.getdata( S_flat_co_fits )
# Normalize flat field spectra.
S_flat_ob_n, maxvals_ob = GLOBALutils.FlatNormalize_single( S_flat_ob, mid=int(0.5*S_flat_ob.shape[2]))
S_flat_co_n, maxvals_co = GLOBALutils.FlatNormalize_single( S_flat_co, mid=int(0.5*S_flat_co.shape[2]))
print '\n\tExtraction of ThAr calibration frames:'
# Extract all ThAr files
for fsim in ThAr_ref:
hthar = pyfits.open( fsim )
dthar = coralieutils.OverscanTrim( pyfits.getdata( fsim ) )
ron = hthar[0].header['HIERARCH ESO CORA CCD RON']
gain = hthar[0].header['HIERARCH ESO CORA CCD GAIN']
thar_fits_ob = dirout + fsim.split('/')[-1][:-8]+'spec.ob.fits.S'
thar_fits_co = dirout + fsim.split('/')[-1][:-8]+'spec.co.fits.S'
if ( os.access(thar_fits_ob,os.F_OK) == False ) or \
( os.access(thar_fits_co,os.F_OK) == False ) or \
(force_thar_extract):
print "\t\tNo previous extraction or extraction forced for ThAr file", fsim, "extracting..."
thar_S_ob = GLOBALutils.optimal_extraction(dthar,P_ob,c_ob,ext_aperture,ron,gain,\
S_Marsh,100.,min_extract_col,max_extract_col,npools)
thar_S_co = GLOBALutils.optimal_extraction(dthar,P_co,c_co,ext_aperture,ron,gain,\
S_Marsh,100.,min_extract_col,max_extract_col,npools)
# save as fits file
if (os.access(thar_fits_ob,os.F_OK)):
os.remove( thar_fits_ob )
if (os.access(thar_fits_co,os.F_OK)):
os.remove( thar_fits_co )
hdu = pyfits.PrimaryHDU( thar_S_ob )
hdu.writeto( thar_fits_ob )
hdu = pyfits.PrimaryHDU( thar_S_co )
hdu.writeto( thar_fits_co )
else:
print "\t\tThAr file", fsim, "all ready extracted, loading..."
print "\n\tWavelength solution of ThAr calibration spectra:"
# compute wavelength calibration files
sorted_ThAr_dates = np.argsort( ThAr_ref_dates )
p0_array = np.zeros( (len(ThAr_ref_dates), npar_wsol) )
for i in range(len(sorted_ThAr_dates)):
index = sorted_ThAr_dates[i]
wavsol_pkl = dirout + ThAr_ref[index].split('/')[-1][:-8]+'wavsolpars.pkl'
thar_fits_ob = dirout + ThAr_ref[index].split('/')[-1][:-8]+'spec.ob.fits.S'
thar_fits_co = dirout + ThAr_ref[index].split('/')[-1][:-8]+'spec.co.fits.S'
if ( os.access(wavsol_pkl,os.F_OK) == False ) or (force_thar_wavcal):
print "\t\tComputing wavelength solution of ThAr file", ThAr_ref[index]
hthar = pyfits.open( ThAr_ref[index] )
mjd, mjd0 = coralieutils.mjd_fromheader( hthar )
thar_S_ob = pyfits.getdata( thar_fits_ob )
thar_S_co = pyfits.getdata( thar_fits_co )
lines_thar_ob = thar_S_ob[:,1,:]
iv_thar_ob = thar_S_ob[:,2,:]
lines_thar_co = thar_S_co[:,1,:]
iv_thar_co = thar_S_co[:,2,:]
All_Pixel_Centers = np.array([])
All_Wavelengths = np.array([])
All_Orders = np.array([])
All_Centroids = np.array([])
All_Sigmas = np.array([])
All_Intensities = np.array([])
for order in range(n_useful):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_ob[order,:]
IV = iv_thar_ob[order,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths,\
rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration(order_dir+'order_'+order_s+'o.iwdat',\
thar_order,order,wei,rmsmax=5000000,\
minlines=10,FixEnds=True,Dump_Argon=dumpargon,\
Dump_AllLines=True, Cheby=use_cheby)
if (order == 35):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, 1023, len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers = np.append( All_Pixel_Centers, pixel_centers )
All_Wavelengths = np.append( All_Wavelengths, wavelengths )
All_Orders = np.append( All_Orders, np.zeros( len(pixel_centers) ) + order )
All_Centroids = np.append( All_Centroids, centroids)
All_Sigmas = np.append( All_Sigmas, sigmas)
All_Intensities = np.append( All_Intensities, intensities )
p0 = np.zeros( npar_wsol )
p0[0] = (35+89) * Global_ZP
p1, G_pix, G_ord, G_wav, II, rms_ms, G_res = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers, All_Wavelengths, All_Orders,\
np.ones(All_Intensities.shape), p0, Cheby=use_cheby,\
maxrms=MRMS, | |
#!/usr/bin/env python3
# vim: set expandtab tabstop=4 shiftwidth=4:
#
# Python Starbound Mapper (pystarboundmap)
# Copyright (C) 2018 <NAME>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the development team nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import io
import re
import json
import mmap
import struct
import starbound
from PIL import Image
from PyQt5 import QtGui
def read_config(config_data):
"""
Attempts to parse a starbound .config file. These are very nearly JSON,
but include comments prefixed by //, which isn't allowed in JSON, so
the JSON parser fails. https://pypi.org/project/json5/ might be able
to parse these, actually, but as its README mentions, it is SUPER slow.
Way slower than even the gigantic list of comment special cases below.
"""
out_lines = []
df = io.StringIO(config_data.decode('utf-8'))
odf = io.StringIO()
in_comment = False
for line in df.readlines():
# ack, multiline comments in /objects/generic/statuspod/statuspod.object
# also in /objects/ancient/hologramgalaxy/hologramgalaxy.object, and
# unfortunately that one necessitates some stripping (though stripping
# is no more CPU-intensive than hardcoding the few instances)
if line.lstrip()[:2] == '/*':
if line.rstrip()[-2:] != '*/':
in_comment = True
else:
if in_comment:
if line.lstrip()[:2] == '*/':
in_comment = False
else:
idx = line.find('//')
if idx == -1:
print(line, file=odf)
else:
print(line[0:idx], file=odf)
# This list of patterns allows us to load all the data we care about
# (that I'm aware of anyway) but I've moved to just stripping out
# anything after // automatically. That shaves about a second off of
# our startup time. Doubtless there are image processing speedups
# which would probably account for the majority of the loadtime)
#elif line[:3] != '// ':
# found_pattern = False
# for pattern in [
# ' // ',
# # special case for /objects/biome/foundry/lavatanklarge/lavatanklarge.object
# '//FIRE',
# # special cases for /objects/biome/tentacle/tentaclespawner1/tentaclespawner1.object
# '//type',
# '//additional',
# '//relative',
# '//[x,y] size',
# '//total',
# # special case for /objects/avian/sawblade/sawblade.object
# '//mollys',
# # special case for /objects/avian/birdgroundlantern/birdgroundlantern.object
# '//"interactive"',
# # special cases for /objects/outpost/signstore/signdispenser.object
# '//"openSounds"',
# '//"closeSounds"',
# # special case for /objects/glitch/medievalspikes/medievalspikes.object
# '//TODO',
# # special case for /objects/themed/island/islandhammock/islandhammock.object
# '//"sitCoverImage"',
# # special case for /objects/protectorate/objects/protectoratewindbanner3/protectoratewindbanner3.object
# '//"soundEffect"',
# # special cases for /objects/protectorate/objects/protectoratelobbyvending/protectoratelobbyvending.object
# '//"onSound"',
# '//"offSound"',
# # special case for /objects/spawner/spawners/spawner_human.object
# '//6000,',
# # special cases for /objects/spawner/colonydeed/colonydeed.object
# '//whether',
# '//delay',
# '//cooldown',
# '//scan',
# '//length',
# '//seconds',
# # special cases for /objects/spawner/invisiblemonsterspawner.object
# '//level',
# '//options',
# '//only',
# # special case for /objects/crafting/upgradeablecraftingobjects/craftingwheel/craftingwheel.object
# '//this',
# ]:
# idx = line.find(pattern)
# if idx != -1:
# found_pattern = True
# break
# if found_pattern:
# print(line[0:idx], file=odf)
# else:
# print(line, file=odf)
odf.seek(0)
return json.load(odf)
class Material(object):
"""
Holds info about a material. Right now we're ignoring all the
fancy rendering options and pretending that everything is the
very first (top left) tile, and we're not drawing edges or the
like.
"""
def __init__(self, info, path, full_path, pakdata, crop_parameters):
self.info = info
self.name = info['materialName']
self.path = path
self.full_path = full_path
self.pakdata = pakdata
self.crop_parameters = crop_parameters
self._image = None
self._bgimage = None
self._midimage = None
@property
def image(self):
"""
Loads the image dynamically on-demand.
"""
if not self._image:
df = io.BytesIO(self.pakdata.get(
'{}/{}'.format(self.path, self.info['renderParameters']['texture'])
))
full_image = Image.open(df)
cropped = full_image.crop(self.crop_parameters)
df = io.BytesIO()
cropped.save(df, format='png')
self._image = QtGui.QPixmap()
if not self.image.loadFromData(df.getvalue()):
self._image = None
# TODO: handle these properly
raise Exception('Could not load material {}'.format(self.name))
return self._image
@property
def bgimage(self):
"""
Loads the background version dynamically on-demand.
"""
if not self._bgimage:
self._bgimage = StarboundData.highlight_pixmap(
self.image.copy(), 0, 0, 0, 192,
)
return self._bgimage
@property
def midimage(self):
"""
Loads the midrange version dynamically on-demand.
"""
if not self._midimage:
self._midimage = StarboundData.highlight_pixmap(
self.image.copy(), 0, 0, 0, 96,
)
return self._midimage
class Matmod(object):
"""
Holds info about a matmod. Right now we're ignoring all the
fancy rendering options and rendering the whole shebang, though
we're only using the very first (top left) tile.
"""
def __init__(self, info, full_path, pakdata):
self.info = info
self.name = info['modName']
self.full_path = full_path
self.pakdata = pakdata
self._image = None
self._bgimage = None
self._midimage = None
@property
def image(self):
"""
Loads the image dynamically on-demand.
"""
if not self._image:
df = io.BytesIO(self.pakdata.get(
'/tiles/mods/{}'.format(self.info['renderParameters']['texture'])
))
full_image = Image.open(df)
cropped = full_image.crop((0, 8, 16, 24))
df = io.BytesIO()
cropped.save(df, format='png')
self._image = QtGui.QPixmap()
if not self._image.loadFromData(df.getvalue()):
self._image = None
# TODO: Handle this
raise Exception('Could not load material {}'.format(self.name))
return self._image
@property
def bgimage(self):
"""
Loads the background version dynamically on-demand.
"""
if not self._bgimage:
self._bgimage = StarboundData.highlight_pixmap(
self.image.copy(), 0, 0, 0, 90,
)
return self._bgimage
@property
def midimage(self):
"""
Loads the midrange version dynamically on-demand.
"""
if not self._midimage:
self._midimage = StarboundData.highlight_pixmap(
self.image.copy(), 0, 0, 0, 45,
)
return self._midimage
class Plant(object):
"""
Class to hold plant info. This is more basic than all our other
objects because map plant entities seem to only ever reference the
PNG directly.
"""
def __init__(self, pathname, pakdata):
self.pathname = pathname
self.pakdata = pakdata
self._image = None
self._hi_image = None
@property
def image(self):
"""
Loads the image dynamically on-demand.
"""
if not self._image:
self._image = QtGui.QPixmap()
self._image.loadFromData(self.pakdata.get(self.pathname))
return self._image
@property
def hi_image(self):
"""
Loads the highlighted version dynamically on-demand.
"""
if not self._hi_image:
self._hi_image = StarboundData.highlight_pixmap(
self.image.copy(), 255, 255, 255, 100,
)
return self._hi_image
class SBObjectOrientation(object):
"""
Info about a specific orientation. Note that we're ignoring
color variations - just grabbing the top right image for now.
"""
def __init__(self, info, frames, path, pakdata):
self.info = info
self.offset = (0, 0)
self.anchor = (0, 0)
self.pakdata = pakdata
self._image = None
self._hi_image = None
# Grab offset, if we can
if 'imagePosition' in info:
self.offset = tuple(info['imagePosition'])
# Figure out what property holds the image filename
if 'dualImage' in info:
file_string = info['dualImage']
elif 'image' in info:
file_string = info['image']
elif 'imageLayers' in info:
# TODO: not actually sure what the Right thing to do here is.
# Just taking the first one in the list.
file_string = info['imageLayers'][0]['image']
elif 'leftImage' in info:
# TODO: Not sure here either - there'll also be a rightImage.
# I assume that the direction is specified somehow by the map
# data. Just taking the left version for now
file_string = info['leftImage']
else:
raise Exception('Not sure what to do with {}'.format(path))
# Grab the actual image filename and frame info file
image_file = file_string.split(':')[0]
self.info_frames = self.get_frame(path, image_file, frames, pakdata)
if image_file[0] == '/':
self.full_image_file = image_file
else:
self.full_image_file = '{}/{}'.format(path, image_file)
def get_frame(self, path, image_file, frames, pakdata):
"""
Given a path and image filename, read in frames if possible
"""
base_filename = image_file.rsplit('.', 1)[0]
if base_filename not in frames:
full_filename = '{}/{}.frames'.format(path, base_filename)
try:
frames[base_filename] = read_config(pakdata.get(full_filename))
except KeyError:
if 'default' | |
"Colombia"},
"zho": {"official": "\u54E5\u4F26\u6BD4\u4E9A\u5171\u548C\u56FD", "common": "\u54E5\u4F26\u6BD4\u4E9A"}
},
"latlng": [4, -72],
"demonym": "Colombian",
"landlocked": False,
"borders": ["BRA", "ECU", "PAN", "PER", "VEN"],
"area": 1141748,
"flag": "\ud83c\udde8\ud83c\uddf4"
},
{
"name": {
"common": "Comoros",
"official": "Union of the Comoros",
"native": {
"ara": {
"official": "\u0627\u0644\u0627\u062a\u062d\u0627\u062f \u0627\u0644\u0642\u0645\u0631\u064a",
"common": "\u0627\u0644\u0642\u0645\u0631\u200e"
},
"fra": {
"official": "Union des Comores",
"common": "Comores"
},
"zdj": {
"official": "Udzima wa Komori",
"common": "Komori"
}
}
},
"tld": [".km"],
"cca2": "KM",
"ccn3": "174",
"cca3": "COM",
"cioc": "COM",
"independent": True,
"status": "officially-assigned",
"currency": ["KMF"],
"callingCode": ["269"],
"capital": ["Moroni"],
"altSpellings": ["KM", "Union of the Comoros", "Union des Comores", "Udzima wa Komori", "al-Itti\u1e25\u0101d al-Qumur\u012b"],
"region": "Africa",
"subregion": "Eastern Africa",
"languages": {
"ara": "Arabic",
"fra": "French",
"zdj": "Comorian"
},
"translations": {
"cym": {"official": "Undeb y Comoros", "common": "Y Comoros"},
"deu": {"official": "Union der Komoren", "common": "Union der Komoren"},
"fra": {"official": "Union des Comores", "common": "Comores"},
"hrv": {"official": "Savez Komori", "common": "Komori"},
"ita": {"official": "Unione delle Comore", "common": "Comore"},
"jpn": {"official": "\u30b3\u30e2\u30ed\u9023\u5408", "common": "\u30b3\u30e2\u30ed"},
"nld": {"official": "Unie van de Comoren", "common": "Comoren"},
"por": {"official": "Uni\u00e3o das Comores", "common": "Comores"},
"rus": {"official": "\u0421\u043e\u044e\u0437 \u041a\u043e\u043c\u043e\u0440\u0441\u043a\u0438\u0445 \u041e\u0441\u0442\u0440\u043e\u0432\u043e\u0432", "common": "\u041a\u043e\u043c\u043e\u0440\u044b"},
"slk": {"official": "Komorsk\u00e1 \u00fania", "common": "Komory"},
"spa": {"official": "Uni\u00f3n de las Comoras", "common": "Comoras"},
"fin": {"official": "Komorien liitto", "common": "Komorit"},
"est": {"official": "Komoori Liit", "common": "Komoorid"},
"zho": {"official": "\u79D1\u6469\u7F57\u8054\u76DF", "common": "\u79D1\u6469\u7F57"}
},
"latlng": [-12.16666666, 44.25],
"demonym": "Comoran",
"landlocked": False,
"borders": [],
"area": 1862,
"flag": "\ud83c\uddf0\ud83c\uddf2"
},
{
"name": {
"common": "Cape Verde",
"official": "Republic of Cabo Verde",
"native": {
"por": {
"official": "Rep\u00fablica de Cabo Verde",
"common": "Cabo Verde"
}
}
},
"tld": [".cv"],
"cca2": "CV",
"ccn3": "132",
"cca3": "CPV",
"cioc": "CPV",
"independent": True,
"status": "officially-assigned",
"currency": ["CVE"],
"callingCode": ["238"],
"capital": ["Praia"],
"altSpellings": ["CV", "Republic of Cabo Verde", "Rep\u00fablica de Cabo Verde"],
"region": "Africa",
"subregion": "Western Africa",
"languages": {
"por": "Portuguese"
},
"translations": {
"cym": {"official": "Gweriniaeth Cabo Verde", "common": "Penrhyn Verde"},
"deu": {"official": "Republik Cabo Verde", "common": "Kap Verde"},
"fra": {"official": "R\u00e9publique du Cap-Vert", "common": "\u00celes du Cap-Vert"},
"hrv": {"official": "Republika Cabo Verde", "common": "Zelenortska Republika"},
"ita": {"official": "Repubblica di Capo Verde", "common": "Capo Verde"},
"jpn": {"official": "\u30ab\u30fc\u30dc\u30d9\u30eb\u30c7\u5171\u548c\u56fd", "common": "\u30ab\u30fc\u30dc\u30d9\u30eb\u30c7"},
"nld": {"official": "Republiek van Cabo Verde", "common": "Kaapverdi\u00eb"},
"por": {"official": "Rep\u00fablica de Cabo Verde", "common": "Cabo Verde"},
"rus": {"official": "\u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041a\u0430\u0431\u043e -\u0412\u0435\u0440\u0434\u0435", "common": "\u041a\u0430\u0431\u043e-\u0412\u0435\u0440\u0434\u0435"},
"slk": {"official": "Kapverdsk\u00e1 republika", "common": "Kapverdy"},
"spa": {"official": "Rep\u00fablica de Cabo Verde", "common": "<NAME>"},
"fin": {"official": "Kap Verden tasavalta", "common": "Kap Verde"},
"est": {"official": "<NAME>", "common": "Roheneemesaared"},
"zho": {"official": "\u4F5B\u5F97\u89D2\u5171\u548C\u56FD", "common": "\u4F5B\u5F97\u89D2"}
},
"latlng": [16, -24],
"demonym": "Cape Verdian",
"landlocked": False,
"borders": [],
"area": 4033,
"flag": "\ud83c\udde8\ud83c\uddfb"
},
{
"name": {
"common": "Costa Rica",
"official": "Republic of Costa Rica",
"native": {
"spa": {
"official": "Rep\u00fablica de Costa Rica",
"common": "Costa Rica"
}
}
},
"tld": [".cr"],
"cca2": "CR",
"ccn3": "188",
"cca3": "CRI",
"cioc": "CRC",
"independent": True,
"status": "officially-assigned",
"currency": ["CRC"],
"callingCode": ["506"],
"capital": ["San Jos\u00e9"],
"altSpellings": ["CR", "Republic of Costa Rica", "Rep\u00fablica de Costa Rica"],
"region": "Americas",
"subregion": "Central America",
"languages": {
"spa": "Spanish"
},
"translations": {
"cym": {"official": "Gweriniaeth Costa Rica", "common": "Costa Rica"},
"deu": {"official": "Republik Costa Rica", "common": "Costa Rica"},
"fra": {"official": "R\u00e9publique du Costa Rica", "common": "Costa Rica"},
"hrv": {"official": "Republika Kostarika", "common": "Kostarika"},
"ita": {"official": "Repubblica di Costa Rica", "common": "Costa Rica"},
"jpn": {"official": "\u30b3\u30b9\u30bf\u30ea\u30ab\u5171\u548c\u56fd", "common": "\u30b3\u30b9\u30bf\u30ea\u30ab"},
"nld": {"official": "Republiek Costa Rica", "common": "Costa Rica"},
"por": {"official": "Rep\u00fablica da Costa Rica", "common": "Costa Rica"},
"rus": {"official": "\u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041a\u043e\u0441\u0442\u0430-\u0420\u0438\u043a\u0430", "common": "\u041a\u043e\u0441\u0442\u0430-\u0420\u0438\u043a\u0430"},
"slk": {"official": "Kostarick\u00e1 republika", "common": "Kostarika"},
"spa": {"official": "Rep\u00fablica de Costa Rica", "common": "Costa Rica"},
"fin": {"official": "Costa Rican tasavalta", "common": "Costa Rica"},
"est": {"official": "Costa Rica Vabariik", "common": "Costa Rica"},
"zho": {"official": "\u54E5\u65AF\u8FBE\u9ECE\u52A0\u5171\u548C\u56FD", "common": "\u54E5\u65AF\u8FBE\u9ECE\u52A0"}
},
"latlng": [10, -84],
"demonym": "Costa Rican",
"landlocked": False,
"borders": ["NIC", "PAN"],
"area": 51100,
"flag": "\ud83c\udde8\ud83c\uddf7"
},
{
"name": {
"common": "Cuba",
"official": "Republic of Cuba",
"native": {
"spa": {
"official": "Rep\u00fablica de Cuba",
"common": "Cuba"
}
}
},
"tld": [".cu"],
"cca2": "CU",
"ccn3": "192",
"cca3": "CUB",
"cioc": "CUB",
"independent": True,
"status": "officially-assigned",
"currency": ["CUC", "CUP"],
"callingCode": ["53"],
"capital": ["Havana"],
"altSpellings": ["CU", "Republic of Cuba", "Rep\u00fablica de Cuba"],
"region": "Americas",
"subregion": "Caribbean",
"languages": {
"spa": "Spanish"
},
"translations": {
"cym": {"official": "Gweriniaeth Ciwba", "common": "Ciwba"},
"deu": {"official": "Republik Kuba", "common": "Kuba"},
"fra": {"official": "R\u00e9publique de Cuba", "common": "Cuba"},
"hrv": {"official": "Republika Kuba", "common": "Kuba"},
"ita": {"official": "Repubblica di Cuba", "common": "Cuba"},
"jpn": {"official": "\u30ad\u30e5\u30fc\u30d0\u5171\u548c\u56fd", "common": "\u30ad\u30e5\u30fc\u30d0"},
"nld": {"official": "Republiek Cuba", "common": "Cuba"},
"por": {"official": "Rep\u00fablica de Cuba", "common": "Cuba"},
"rus": {"official": "\u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041a\u0443\u0431\u0430", "common": "\u041a\u0443\u0431\u0430"},
"slk": {"official": "Kub\u00e1nska republika", "common": "Kuba"},
"spa": {"official": "Rep\u00fablica de Cuba", "common": "Cuba"},
"fin": {"official": "Kuuban tasavalta", "common": "Kuuba"},
"est": {"official": "Kuuba Vabariik", "common": "Kuuba"},
"zho": {"official": "\u53E4\u5DF4\u5171\u548C\u56FD", "common": "\u53E4\u5DF4"}
},
"latlng": [21.5, -80],
"demonym": "Cuban",
"landlocked": False,
"borders": [],
"area": 109884,
"flag": "\ud83c\udde8\ud83c\uddfa"
},
{
"name": {
"common": "Cura\u00e7ao",
"official": "Country of Cura\u00e7ao",
"native": {
"eng": {
"official": "Country of Cura\u00e7ao",
"common": "Cura\u00e7ao"
},
"nld": {
"official": "Land Cura\u00e7ao",
"common": "Cura\u00e7ao"
},
"pap": {
"official": "Pais K\u00f2rsou",
"common": "Pais K\u00f2rsou"
}
}
},
"tld": [".cw"],
"cca2": "CW",
"ccn3": "531",
"cca3": "CUW",
"cioc": "",
"independent": False,
"status": "officially-assigned",
"currency": ["ANG"],
"callingCode": ["5999"],
"capital": ["Willemstad"],
"altSpellings": ["CW", "Curacao", "K\u00f2rsou", "Country of Cura\u00e7ao", "Land Cura\u00e7ao", "Pais K\u00f2rsou"],
"region": "Americas",
"subregion": "Caribbean",
"languages": {
"eng": "English",
"nld": "Dutch",
"pap": "Papiamento"
},
"translations": {
"deu": {"official": "Land Cura\u00e7ao", "common": "Cura\u00e7ao"},
"fra": {"official": "Cura\u00e7ao", "common": "Cura\u00e7ao"},
"nld": {"official": "Land Cura\u00e7ao", "common": "Cura\u00e7ao"},
"por": {"official": "Pa\u00eds de Cura\u00e7ao", "common": "ilha da Cura\u00e7\u00e3o"},
"rus": {"official": "\u0421\u0442\u0440\u0430\u043d\u0430 \u041a\u044e\u0440\u0430\u0441\u0430\u043e", "common": "\u041a\u044e\u0440\u0430\u0441\u0430\u043e"},
"slk": {"official": "Curacao", "common": "Curacao"},
"spa": {"official": "Pa\u00eds de Curazao", "common": "Curazao"},
"fin": {"official": "Cura\u00e7ao", "common": "Cura\u00e7ao"},
"est": {"official": "Cura\u00E7ao", "common": "Cura\u00E7ao"},
"zho": {"official": "\u5E93\u62C9\u7D22", "common": "\u5E93\u62C9\u7D22"}
},
"latlng": [12.116667, -68.933333],
"demonym": "Dutch",
"landlocked": False,
"borders": [],
"area": 444,
"flag": "\ud83c\udde8\ud83c\uddfc"
},
{
"name": {
"common": "Christmas Island",
"official": "Territory of Christmas Island",
"native": {
"eng": {
"official": "Territory of Christmas Island",
"common": "Christmas Island"
}
}
},
"tld": [".cx"],
"cca2": "CX",
"ccn3": "162",
"cca3": "CXR",
"cioc": "",
"independent": False,
"status": "officially-assigned",
"currency": ["AUD"],
"callingCode": ["61"],
"capital": ["Flying Fish Cove"],
"altSpellings": ["CX", "Territory of Christmas Island"],
"region": "Oceania",
"subregion": "Australia and New Zealand",
"languages": {
"eng": "English"
},
"translations": {
"cym": {"official": "Tiriogaeth yr Ynys y Nadolig", "common": "Ynys y Nadolig"},
"deu": {"official": "Gebiet der Weihnachtsinsel", "common": "Weihnachtsinsel"},
"fra": {"official": "Territoire de l'\u00eele Christmas", "common": "\u00cele Christmas"},
"hrv": {"official": "Teritorij Bo\u017ei\u0107ni otok", "common": "Bo\u017ei\u0107ni otok"},
"ita": {"official": "Territorio di Christmas Island", "common": "Isola di Natale"},
"jpn": {"official": "\u30af\u30ea\u30b9\u30de\u30b9\u5cf6\u306e\u9818\u571f", "common": "\u30af\u30ea\u30b9\u30de\u30b9\u5cf6"},
"nld": {"official": "Grondgebied van Christmas Island", "common": "Christmaseiland"},
"por": {"official": "Territ\u00f3<NAME>", "common": "Ilha do Natal"},
"rus": {"official": "\u0422\u0435\u0440\u0440\u0438\u0442\u043e\u0440\u0438\u044f \u043e\u0441\u0442\u0440\u043e\u0432\u0430 \u0420\u043e\u0436\u0434\u0435\u0441\u0442\u0432\u0430", "common": "\u041e\u0441\u0442\u0440\u043e\u0432 \u0420\u043e\u0436\u0434\u0435\u0441\u0442\u0432\u0430"},
"slk": {"official": "Terit\u00f3rium Viano\u010dn\u00e9ho ostrova", "common": "Viano\u010dn\u00fa ostrov"},
"spa": {"official": "Territorio de la Isla de Navidad", "common": "Isla de Navidad"},
"fin": {"official": "Joulusaaren alue", "common": "Joulusaari"},
"est": {"official": "J\u00F5ulusaare ala", "common": "J\u00F5ulusaar"},
"zho": {"official": "\u5723\u8BDE\u5C9B", "common": "\u5723\u8BDE\u5C9B"}
},
"latlng": [-10.5, 105.66666666],
"demonym": "Christmas Islander",
"landlocked": False,
"borders": [],
"area": 135,
"flag": "\ud83c\udde8\ud83c\uddfd"
},
{
"name": {
"common": "Cayman Islands",
"official": "Cayman Islands",
"native": {
"eng": {
"official": "Cayman Islands",
"common": "Cayman Islands"
}
}
},
"tld": [".ky"],
"cca2": "KY",
"ccn3": "136",
"cca3": "CYM",
"cioc": "CAY",
"independent": False,
"status": "officially-assigned",
"currency": ["KYD"],
"callingCode": ["1345"],
"capital": ["George Town"],
"altSpellings": ["KY"],
"region": "Americas",
"subregion": "Caribbean",
"languages": {
"eng": "English"
},
"translations": {
"cym": {"official": "Ynysoedd Cayman", "common": "Ynysoedd Cayman"},
"deu": {"official": "Cayman-Inseln", "common": "Kaimaninseln"},
"fra": {"official": "\u00celes Ca\u00efmans", "common": "\u00celes Ca\u00efmans"},
"hrv": {"official": "Kajmanski otoci", "common": "Kajmanski otoci"},
"ita": {"official": "I<NAME>", "common": "Isole Cayman"},
"jpn": {"official": "\u30b1\u30a4\u30de\u30f3\u8af8\u5cf6", "common": "\u30b1\u30a4\u30de\u30f3\u8af8\u5cf6"},
"nld": {"official": "Caymaneilanden", "common": "Caymaneilanden"},
"por": {"official": "<NAME>", "common": "Ilhas Caim\u00e3o"},
"rus": {"official": "\u041a\u0430\u0439\u043c\u0430\u043d\u043e\u0432\u044b \u043e\u0441\u0442\u0440\u043e\u0432\u0430", "common": "\u041a\u0430\u0439\u043c\u0430\u043d\u043e\u0432\u044b \u043e\u0441\u0442\u0440\u043e\u0432\u0430"},
"slk": {"official": "Kajmanie ostrovy", "common": "Kajmanie ostrovy"},
"spa": {"official": "Is<NAME>aim\u00e1n", "common": "Islas Caim\u00e1n"},
"fin": {"official": "Caymansaaret", "common": "Caymansaaret"},
"est": {"official": "Kaimanisaared", "common": "Kaimanisaared"},
"zho": {"official": "\u5F00\u66FC\u7FA4\u5C9B", "common": "\u5F00\u66FC\u7FA4\u5C9B"}
},
"latlng": [19.5, -80.5],
"demonym": "Caymanian",
"landlocked": False,
"borders": [],
"area": 264,
"flag": "\ud83c\uddf0\ud83c\uddfe"
},
{
"name": {
"common": "Cyprus",
"official": "Republic of Cyprus",
"native": {
"ell": {
"official": "\u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1 \u03c4\u03b7\u03c2 \u039a\u03cd\u03c0\u03c1\u03bf\u03c2",
"common": "\u039a\u03cd\u03c0\u03c1\u03bf\u03c2"
},
"tur": {
"official": "K\u0131br\u0131s Cumhuriyeti",
"common": "K\u0131br\u0131s"
}
}
},
"tld": [".cy"],
"cca2": "CY",
"ccn3": "196",
"cca3": "CYP",
"cioc": "CYP",
"independent": True,
"status": "officially-assigned",
"currency": ["EUR"],
"callingCode": ["357"],
"capital": ["Nicosia"],
"altSpellings": ["CY", "K\u00fdpros", "K\u0131br\u0131s", "Republic of Cyprus", "\u039a\u03c5\u03c0\u03c1\u03b9\u03b1\u03ba\u03ae \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1", "K\u0131br\u0131s Cumhuriyeti"],
"region": "Europe",
"subregion": "Eastern Europe",
"languages": {
"ell": "Greek",
"tur": "Turkish"
},
"translations": {
"cym": {"official": "Gweriniaeth Cyprus", "common": "Cyprus"},
"deu": {"official": "Republik Zypern", "common": "Zypern"},
"fra": {"official": "R\u00e9publique de Chypre", "common": "Chypre"},
"hrv": {"official": "Republika Cipar", "common": "Cipar"},
"ita": {"official": "Repubblica di Cipro", "common": "Cipro"},
"jpn": {"official": "\u30ad\u30d7\u30ed\u30b9\u5171\u548c\u56fd", "common": "\u30ad\u30d7\u30ed\u30b9"},
"nld": {"official": "Republiek Cyprus", "common": "Cyprus"},
"por": {"official": "Rep\u00fablica de Chipre", "common": "Chipre"},
"rus": {"official": "\u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u041a\u0438\u043f\u0440", "common": "\u041a\u0438\u043f\u0440"},
"slk": {"official": "Cypersk\u00e1 republika", "common": "Cyprus"},
"spa": {"official": "Rep\u00fablica de Chipre", "common": "Chipre"},
"fin": {"official": "Kyproksen tasavalta", "common": "Kypros"},
"est": {"official": "K\u00FCprose Vabariik", "common": "K\u00FCpros"},
"zho": {"official": "\u585E\u6D66\u8DEF\u65AF\u5171\u548C\u56FD", "common": "\u585E\u6D66\u8DEF\u65AF"}
},
"latlng": [35, 33],
"demonym": "Cypriot",
"landlocked": False,
"borders": [],
"area": 9251,
"flag": "\ud83c\udde8\ud83c\uddfe"
},
{
"name": {
"common": "Czechia",
"official": "Czech Republic",
"native": {
"ces": {
"official": "\u010desk\u00e1 republika",
"common": "\u010cesko"
},
"slk": {
"official": "\u010cesk\u00e1 republika",
"common": "\u010cesko"
}
}
},
"tld": [".cz"],
"cca2": "CZ",
"ccn3": "203",
"cca3": "CZE",
"cioc": "CZE",
"independent": True,
"status": "officially-assigned",
"currency": ["CZK"],
"callingCode": ["420"],
"capital": ["Prague"],
"altSpellings": ["CZ", "\u010cesk\u00e1 republika", "\u010cesko"],
"region": "Europe",
"subregion": "Eastern Europe",
"languages": {
"ces": "Czech",
"slk": "Slovak"
},
"translations": {
"cym": {"official": "Y Weriniaeth Tsiec", "common": "Y Weriniaeth Tsiec"},
"deu": {"official": "Tschechische Republik", "common": "Tschechien"},
"fra": {"official": "R\u00e9publique tch\u00e8que", "common": "Tch\u00e9quie"},
"hrv": {"official": "\u010ce\u0161ka", "common": "\u010ce\u0161ka"},
"ita": {"official": "Repubblica Ceca", "common": "Cechia"},
"jpn": {"official": "\u30c1\u30a7\u30b3\u5171\u548c\u56fd", "common": "\u30c1\u30a7\u30b3"},
"nld": {"official": "Tsjechische Republiek", "common": "Tsjechi\u00eb"},
"por": {"official": "Rep\u00fablica Checa", "common": "Ch\u00e9quia"},
"rus": {"official": "\u0427\u0435\u0448\u0441\u043a\u0430\u044f \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430", "common": "\u0427\u0435\u0445\u0438\u044f"},
"slk": {"official": "\u010cesk\u00e1 republika", "common": "\u010cesko"},
"spa": {"official": "Rep\u00fablica Checa", "common": "Chequia"},
"fin": {"official": "T\u0161ekin tasavalta", "common": "T\u0161ekki"},
"est": {"official": "T\u0161ehhi Vabariik", "common": "T\u0161ehhi"},
"zho": {"official": "\u6377\u514B\u5171\u548C\u56FD", "common": "\u6377\u514B"}
},
"latlng": [49.75, 15.5],
"demonym": "Czech",
"landlocked": True,
"borders": ["AUT", "DEU", "POL", "SVK"],
"area": 78865,
"flag": "\ud83c\udde8\ud83c\uddff"
},
{
"name": {
"common": "Germany",
"official": "Federal Republic of Germany",
"native": {
"deu": {
"official": "Bundesrepublik Deutschland",
"common": "Deutschland"
}
}
},
"tld": [".de"],
"cca2": "DE",
"ccn3": "276",
"cca3": "DEU",
"cioc": "GER",
"independent": True,
"status": "officially-assigned",
"currency": ["EUR"],
"callingCode": ["49"],
"capital": ["Berlin"],
"altSpellings": ["DE", "Federal Republic of Germany", "Bundesrepublik Deutschland"],
"region": "Europe",
"subregion": "Western Europe",
"languages": {
"deu": "German"
},
"translations": {
"deu": {"official": "Bundesrepublik Deutschland", "common": "Deutschland"},
"fra": {"official": "R\u00e9publique f\u00e9d\u00e9rale d'Allemagne", "common": "Allemagne"},
"hrv": {"official": "Njema\u010dka Federativna Republika", "common": "Njema\u010dka"},
"ita": {"official": "Repubblica federale di Germania", "common": "Germania"},
"jpn": {"official": "\u30c9\u30a4\u30c4\u9023\u90a6\u5171\u548c\u56fd", "common": "\u30c9\u30a4\u30c4"},
"nld": {"official": "Bondsrepubliek Duitsland", "common": "Duitsland"},
"por": {"official": "Rep\u00fablica Federal da Alemanha", "common": "Alemanha"},
"rus": {"official": "\u0424\u0435\u0434\u0435\u0440\u0430\u0442\u0438\u0432\u043d\u0430\u044f \u0420\u0435\u0441\u043f\u0443\u0431\u043b\u0438\u043a\u0430 \u0413\u0435\u0440\u043c\u0430\u043d\u0438\u044f", "common": "\u0413\u0435\u0440\u043c\u0430\u043d\u0438\u044f"},
"slk": {"official": "Nemeck\u00e1 spolkov\u00e1 republika", "common": "Nemecko"},
"spa": {"official": "Rep\u00fablica Federal de Alemania", "common": "Alemania"},
"fin": {"official": "Saksan liittotasavalta", "common": "Saksa"},
"est": {"official": "Saksamaa Liitvabariik", "common": "Saksamaa"},
"zho": {"official": "\u5FB7\u610F\u5FD7\u8054\u90A6\u5171\u548C\u56FD", "common": "\u5FB7\u56FD"}
},
"latlng": [51, 9],
"demonym": "German",
"landlocked": False,
"borders": ["AUT", "BEL", "CZE", "DNK", "FRA", "LUX", "NLD", "POL", "CHE"],
"area": 357114,
"flag": "\ud83c\udde9\ud83c\uddea"
},
{
"name": {
"common": "Djibouti",
"official": "Republic of Djibouti",
"native": {
"ara": {
"official": "\u062c\u0645\u0647\u0648\u0631\u064a\u0629 \u062c\u064a\u0628\u0648\u062a\u064a",
"common": "\u062c\u064a\u0628\u0648\u062a\u064a\u200e"
},
"fra": {
"official": "R\u00e9publique de Djibouti",
"common": "Djibouti"
}
}
},
"tld": [".dj"],
"cca2": "DJ",
"ccn3": "262",
"cca3": "DJI",
"cioc": "DJI",
"independent": True,
"status": "officially-assigned",
"currency": ["DJF"],
"callingCode": ["253"],
"capital": ["Djibouti"],
"altSpellings": ["DJ", | |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013--2014, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the {organization} nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
'''
This module contains some convenience functions for building simple geometric
objects with Gmsh.
'''
import numpy
from .basic import Point, Line, LineLoop, PlaneSurface, Comment, Circle, \
CompoundLine, RuledSurface, Volume, PhysicalVolume, SurfaceLoop, Array, \
Extrude, CompoundVolume
def rotation_matrix(u, theta):
'''Return matrix that implements the rotation around the vector :math:`u`
by the angle :math:`\\theta`, cf.
https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle.
:param u: rotation vector
:param theta: rotation angle
'''
# Cross-product matrix.
cpm = numpy.array([[0.0, -u[2], u[1]],
[u[2], 0.0, -u[0]],
[-u[1], u[0], 0.0]])
c = numpy.cos(theta)
s = numpy.sin(theta)
R = numpy.eye(3) * c \
+ s * cpm \
+ (1.0 - c) * numpy.outer(u, u)
return R
def add_rectangle(xmin, xmax, ymin, ymax, z, lcar):
X = [[xmin, ymin, z],
[xmax, ymin, z],
[xmax, ymax, z],
[xmin, ymax, z]]
# Create points.
p = [Point(x, lcar) for x in X]
# Create lines
e = [Line(p[k], p[k+1]) for k in range(len(p)-1)]
e.append(Line(p[-1], p[0]))
ll = LineLoop(e)
s = PlaneSurface(ll)
return s
def add_polygon(X, lcar):
# Create points.
p = [Point(x, lcar) for x in X]
# Create lines
e = [Line(p[k], p[k+1]) for k in range(len(p)-1)]
e.append(Line(p[-1], p[0]))
ll = LineLoop(e)
s = PlaneSurface(ll)
return s
def add_circle(radius, lcar,
R=numpy.eye(3),
x0=numpy.array([0.0, 0.0, 0.0]),
compound=False,
num_sections=3
):
'''Add circle in the :math:`y`-:math:`z`-plane.
'''
# Define points that make the circle (midpoint and the four cardinal
# directions).
X = [[0.0, 0.0, 0.0]]
if num_sections == 4:
# For accuracy, the points are provided explicitly.
X = [[0.0, 0.0, 0.0],
[0.0, radius, 0.0],
[0.0, 0.0, radius],
[0.0, -radius, 0.0],
[0.0, 0.0, -radius]]
else:
for k in range(num_sections):
alpha = 2*numpy.pi * k / num_sections
X.append([0.0, radius*numpy.cos(alpha), radius*numpy.sin(alpha)])
# Apply the transformation.
# TODO assert that the transformation preserves circles
X = [numpy.dot(R, x) + x0 for x in X]
# Add Gmsh Points.
Comment('Points')
p = [Point(x, lcar) for x in X]
# Define the circle arcs.
Comment('Circle arcs')
c = []
for k in range(1, len(p)-1):
c.append(Circle([p[k], p[0], p[k+1]]))
# Don't forget the closing arc.
c.append(Circle([p[-1], p[0], p[1]]))
if compound:
c = [CompoundLine(c)]
return c
def add_ball(x0, radius, lcar,
with_volume=True,
holes=None,
label=None
):
'''Creates a ball with a given radius around a given midpoint :math:`x_0`.
'''
if holes is None:
holes = []
# Add points.
p = [Point(x0, lcar=lcar),
Point([x0[0]+radius, x0[1], x0[2]], lcar=lcar),
Point([x0[0], x0[1]+radius, x0[2]], lcar=lcar),
Point([x0[0], x0[1], x0[2]+radius], lcar=lcar),
Point([x0[0]-radius, x0[1], x0[2]], lcar=lcar),
Point([x0[0], x0[1]-radius, x0[2]], lcar=lcar),
Point([x0[0], x0[1], x0[2]-radius], lcar=lcar)
]
# Add ball skeleton.
c = [Circle([p[1], p[0], p[6]]),
Circle([p[6], p[0], p[4]]),
Circle([p[4], p[0], p[3]]),
Circle([p[3], p[0], p[1]]),
Circle([p[1], p[0], p[2]]),
Circle([p[2], p[0], p[4]]),
Circle([p[4], p[0], p[5]]),
Circle([p[5], p[0], p[1]]),
Circle([p[6], p[0], p[2]]),
Circle([p[2], p[0], p[3]]),
Circle([p[3], p[0], p[5]]),
Circle([p[5], p[0], p[6]])
]
# Add surfaces (1/8th of the ball surface).
ll = [LineLoop([c[4], c[9], c[3]]),
LineLoop([c[8], '-'+c[4], c[0]]),
LineLoop([c[11], '-'+c[7], '-'+c[0]]),
LineLoop([c[7], '-'+c[3], c[10]]),
LineLoop(['-'+c[9], c[5], c[2]]),
LineLoop(['-'+c[10], '-'+c[2], c[6]]),
LineLoop(['-'+c[1], '-'+c[6], '-'+c[11]]),
LineLoop(['-'+c[5], '-'+c[8], c[1]])
]
# Create a surface for each line loop.
s = [RuledSurface(l) for l in ll]
# Create the surface loop.
surface_loop = SurfaceLoop(s)
if holes:
# Create an array of surface loops; the first entry is the outer
# surface loop, the following ones are holes.
surface_loop = Array([surface_loop] + holes)
# Create volume.
if with_volume:
volume = Volume(surface_loop)
if label:
PhysicalVolume(volume, label)
else:
volume = None
return volume, surface_loop, s
def add_box(x0, x1, y0, y1, z0, z1,
lcar,
with_volume=True,
holes=None,
label=None
):
if holes is None:
holes = []
# Define corner points.
p = [Point([x1, y1, z1], lcar=lcar),
Point([x1, y1, z0], lcar=lcar),
Point([x1, y0, z1], lcar=lcar),
Point([x1, y0, z0], lcar=lcar),
Point([x0, y1, z1], lcar=lcar),
Point([x0, y1, z0], lcar=lcar),
Point([x0, y0, z1], lcar=lcar),
Point([x0, y0, z0], lcar=lcar)
]
# Define edges.
e = [Line(p[0], p[1]),
Line(p[0], p[2]),
Line(p[0], p[4]),
Line(p[1], p[3]),
Line(p[1], p[5]),
Line(p[2], p[3]),
Line(p[2], p[6]),
Line(p[3], p[7]),
Line(p[4], p[5]),
Line(p[4], p[6]),
Line(p[5], p[7]),
Line(p[6], p[7])
]
# Define the six line loops.
ll = [LineLoop([e[0], e[3], '-'+e[5], '-'+e[1]]),
LineLoop([e[0], e[4], '-'+e[8], '-'+e[2]]),
LineLoop([e[1], e[6], '-'+e[9], '-'+e[2]]),
LineLoop([e[3], e[7], '-'+e[10], '-'+e[4]]),
LineLoop([e[5], e[7], '-'+e[11], '-'+e[6]]),
LineLoop([e[8], e[10], '-'+e[11], '-'+e[9]])
]
# Create a surface for each line loop.
s = [RuledSurface(l) for l in ll]
# Create the surface loop.
surface_loop = SurfaceLoop(s)
if holes:
# Create an array of surface loops; the first entry is the outer
# surface loop, the following ones are holes.
surface_loop = Array([surface_loop] + holes)
if with_volume:
# Create volume
vol = Volume(surface_loop)
if label:
PhysicalVolume(vol, label)
else:
vol = None
return vol, surface_loop
def add_torus(irad, orad,
lcar,
R=numpy.eye(3),
x0=numpy.array([0.0, 0.0, 0.0]),
label=None
):
'''Create Gmsh code for the torus under the coordinate transformation
.. math::
\hat{x} = R x + x_0.
:param irad: inner radius of the torus
:param orad: outer radius of the torus
'''
Comment(76 * '-')
Comment('Torus')
# Add circle
x0t = numpy.dot(R, numpy.array([0.0, orad, 0.0]))
c = add_circle(irad, lcar, R=R, x0=x0+x0t)
rot_axis = [0.0, 0.0, 1.0]
rot_axis = numpy.dot(R, rot_axis)
point_on_rot_axis = [0.0, 0.0, 0.0]
point_on_rot_axis = numpy.dot(R, point_on_rot_axis) + x0
# Form the torus by extruding the circle three times by 2/3*pi.
# This works around the inability of Gmsh to extrude by pi or more. The
# Extrude() macro returns an array; the first [0] entry in the array is
# the entity that has been extruded at the far end. This can be used for
# the following Extrude() step. The second [1] entry of the array is the
# surface that was created by the extrusion.
previous = c
angle = '2*Pi/3'
all_names = []
for i in range(3):
Comment('Round no. %s' % (i+1))
for k in range(len(previous)):
# ts1[] = Extrude {{0,0,1}, {0,0,0}, 2*Pi/3}{Line{tc1};};
# ...
tmp_name = Extrude(
'Line{%s}' % previous[k],
rotation_axis=rot_axis,
point_on_axis=point_on_rot_axis,
angle=angle
)
all_names.append(tmp_name)
previous[k] = tmp_name + '[0]'
# Now build surface loop and volume.
all_surfaces = [name + '[1]' for name in all_names]
#compound_surface = CompoundSurface(all_surfaces)
surface_loop = SurfaceLoop(all_surfaces)
vol = Volume(surface_loop)
if label:
PhysicalVolume(vol, label)
Comment(76*'-')
return
def add_torus2(irad, orad,
lcar,
R=numpy.eye(3),
x0=numpy.array([0.0, 0.0, 0.0]),
label=None
):
'''Create Gmsh code for the torus under the coordinate transformation
.. math::
\hat{x} = R x + x_0.
:param irad: inner radius of the torus
:param orad: outer radius of the torus
'''
Comment(76*'-')
Comment('Torus')
# Add circle
x0t = numpy.dot(R, numpy.array([0.0, orad, 0.0]))
c = add_circle(irad, lcar, R=R, x0=x0+x0t)
ll = LineLoop(c)
s = PlaneSurface(ll)
rot_axis = [0.0, 0.0, 1.0]
rot_axis = numpy.dot(R, rot_axis)
point_on_rot_axis = [0.0, 0.0, 0.0]
point_on_rot_axis = numpy.dot(R, point_on_rot_axis) + x0
# Form | |
<reponame>psneo/Pesidious<filename>extract_features.py<gh_stars>10-100
import argparse
import glob
from logging import basicConfig, exception, debug, error, info, warning, getLogger
import os
import pickle
import re
import sys
import time
import traceback
# from handlers import TimedRotatingFileHandler
from pathlib import Path
from random import shuffle
from datetime import date
from pyfiglet import Figlet
from sklearn.model_selection import train_test_split
import lief
import torch
from torch.utils.data import DataLoader, Dataset
# Installing rich modules for pretty printing
from rich.logging import RichHandler
from rich.progress import track
from rich.traceback import install
from rich import print
from rich.panel import Panel
from rich.text import Text
from rich.table import Table
install()
SECTION_INDEX = 0
def parse_args():
parser = argparse.ArgumentParser(description='PE File Feature Extraction. \nThe purpose of this application is extract the feature vectors from PE files for the purpose of malware analysis and malware mutation.')
parser.add_argument('-m',"--malware-path", help = "The filepath of the malicious PE files whose features are to be extracted.", type = Path, default=Path("Data/malware"))
parser.add_argument('-b',"--benign-path", help = "The filepath of the benign PE files whose features are to be extracted.", type = Path, default=Path("Data/benign"))
parser.add_argument('-o', "--output-dir", help = "The filepath to where the feature vectors will be extracted. If this location does not exist, it will be created.", type = Path, default = Path("feature_vector_directory"))
parser.add_argument('-f', "--logfile", help = "The file path to store the logs.", type = Path, default = Path("extract_features_logs_" + str(date.today()) + ".log"))
logging_level = ["debug", "info", "warning", "error", "critical"]
parser.add_argument(
"-l",
"--log",
dest="log",
metavar="LOGGING_LEVEL",
choices=logging_level,
default="info",
help=f"Select the logging level. Keep in mind increasing verbosity might affect performance. Available choices include : {logging_level}",
)
args = parser.parse_args()
return args
def logging_setup(logfile: str , log_level: str):
log_dir = "Logs"
if not os.path.exists(log_dir):
os.mkdir(log_dir)
logfile = os.path.join(log_dir, logfile)
basicConfig(
level=log_level.upper(),
filemode='a', # other options are w for write.
format="%(message)s",
filename=logfile
)
getLogger().addHandler(RichHandler())
def features_mapping_index(malware_path: str, benign_path: str, output_path: str):
malware_feature_vector_directory, benign_feature_vector_directory = setup_directories(malware_path, benign_path, output_path)
malware_pe_files = [os.path.join(malware_path, files) for files in os.listdir(malware_path)]
benign_pe_files = [os.path.join(benign_path, files) for files in os.listdir(benign_path)]
# Reading Imports filter files
debug("[*] Reading filtered Imports file ...")
filtered_imports_file = Path("manipulation_content/imports_content.txt")
with open(str(filtered_imports_file), 'r') as file:
filtered_imports = file.read()
filtered_imports_file = filtered_imports.split('\n')
debug(f"Number of malware files : {(len(malware_pe_files))}")
debug(f"Number of benign files : {(len(benign_pe_files))}")
debug(f"Number of total files : {(len(malware_pe_files) + len(benign_pe_files))}")
debug(f"Output directory : {str(output_path)}")
info("[*] Creating import features mapping ... \n")
feature_vector_mapping = {}
# import_feature_vector_mapping = {}
# section_feature_vector_mapping = {}
index = 0
error_files = []
# index_section = 0
# index_import = 0
files = malware_pe_files + benign_pe_files
info("\t[*] Starting import extraction ...")
# for i, file in enumerate(malware_pe_files + benign_pe_files):
for i in track(range(len(files)), description="Extracting imports ... ", transient=True):
file = files[i]
debug(f'\t[+] Num: {i} - Name: {file} - Number of import features: {len(feature_vector_mapping)}')
# input("begining of the loop")
try:
win32, feature_vector_mapping, index = extract_imports(file, feature_vector_mapping, filtered_imports_file, index)
# win32, import_feature_vector_mapping, index_import = extract_imports(file, import_feature_vector_mapping, filtered_imports_file, index_import)
if not win32:
exception(f"\t[*] Deleting PE file : [bold red]{file}", extra={"markup":True})
os.remove(file)
# files.remove(file)
error_files.append(file)
exception(f"\t[-] {file} has been deleted ...")
pass
except:
traceback.print_exc()
exception(f"\t[*] Deleting PE file : [bold red]{file}", extra={"markup":True})
os.remove(file)
# files.remove(file)
error_files.append(file)
exception(f"\t[-] {file} has been deleted ...")
# debug(f"\t[+] Index Import : {index_import}")
SECTION_INDEX = index
info(f"\t[+] Import extraction completed with {SECTION_INDEX} imports... \n")
info("\t[*] Starting section extraction ...")
debug(f"[-] Number of files skipped and deleted: {len(error_files)}")
# input("TEsT")
# for i, file in enumerate(malware_pe_files + benign_pe_files):
for i in track(range(len(files)), description="Extracting sections ...: ", transient=True):
file = files[i]
debug(f'\t[+] Num: {i} - Name: {file} - Number of section features: {len(feature_vector_mapping)}')
# Check if the file threw an error before, and if it does, skip it.
if file in error_files:
warning(f"[-] This file is in the error_files list and will be skipped!")
# input("stop here")
continue
try:
win32, feature_vector_mapping, index = extract_sections(file, feature_vector_mapping, index)
# win32, section_feature_vector_mapping, index_section = extract_sections(file, section_feature_vector_mapping, index_section)
if not win32:
exception(f"\t[*] Deleting PE file : {file}")
os.remove(file)
# files.remove(file)
error_files.append(file)
exception(f"\t[-] {file} has been deleted ...")
pass
except:
traceback.print_exc()
exception(f"\t[*] Deleting PE file : {file}")
os.remove(file)
# files.remove(file)
error_files.append(file)
exception(f"\t[-] {file} has been deleted ...")
pass
# debug("\t[+] Index Section : {index_section}")
# info(f"\t[+] Section extraction completed with {index_section} sections ... \n")
info("[+] Features mapping to index is complete ... \n")
debug(f"Total size of feature vector mapping : {len(feature_vector_mapping)} \n")
info("[*] Pickling Feature vector mapping ...")
for i, import_lib in enumerate(feature_vector_mapping):
debug(f"\t[+] feature vector value at [{i}] : {str(import_lib)}")
# for i, import_lib in enumerate(section_feature_vector_mapping):
# debug(f"\t[+] feature vector value at [{i}] : {str(import_lib)}")
# for i, import_lib in enumerate(import_feature_vector_mapping):
# debug(f"\t[+] feature vector value at [{i}] : {str(import_lib)}")
pickle.dump(feature_vector_mapping,
open(os.path.join(output_path,"feature_vector_mapping.pk"), 'wb'))
# pickle.dump(import_feature_vector_mapping,
# open(os.path.join(output_path,"import_feature_vector_mapping.pk"), 'wb'))
# pickle.dump(section_feature_vector_mapping,
# open(os.path.join(output_path,"section_feature_vector_mapping.pk"), 'wb'))
info(f"[+] Pickling feature vector mapping complete. You can find them at logs : [bold green]{output_path}\n", extra={"markup":True})
debug(f"\t -> Feature Vector mapping - {str(os.path.join(output_path,'feature_vector_mapping.pk'))} ", extra={"markup":True})
# debug(f"\t -> Import Feature Vector mapping - {str(os.path.join(output_path,'import_feature_vector_mapping.pk'))} ", extra={"markup":True})
# debug(f"\t -> Section Feature Vector mapping - {str(os.path.join(output_path,'section_feature_vector_mapping.pk'))} ", extra={"markup":True})
# For feature vector with imports and sections:
info("[*] Creating feature vector with imports and sections for [bold red] malware set...", extra={"markup":True})
malware_pe_files_feature_set = torch.Tensor(feature_generation(malware_pe_files, feature_vector_mapping))
info("[*] Creating feature vector with imports and sections for [bold green] benign set...", extra={"markup":True})
benign_pe_files_feature_set = torch.Tensor(feature_generation(benign_pe_files, feature_vector_mapping))
pickle.dump(malware_pe_files_feature_set, open(os.path.join(malware_feature_vector_directory, "malware_feature_set.pk"), 'wb'))
pickle.dump(benign_pe_files_feature_set, open(os.path.join(benign_feature_vector_directory, "benign_feature_set.pk"), 'wb'))
# ---------------------------------#
# For feature vector with imports: #
# ---------------------------------#
# debug(f"[*] Creating feature vector with imports for malware set ...")
# malware_pe_files_import_feature_set = torch.Tensor(feature_generation(malware_pe_files, import_feature_vector_mapping))
# debug("[*] Creating feature vector with imports for benign set ...")
# benign_pe_files_import_feature_set = torch.Tensor(feature_generation(benign_pe_files, import_feature_vector_mapping))
# debug(f"[+] malware_pe_files_import_feature_set type : [bold green] {str(malware_pe_files_import_feature_set)}", extra={"markup":True})
# debug(f"[+] malware_pe_files_import_feature_set size : [bold green]{str(malware_pe_files_import_feature_set.shape)}")
# pickle.dump(malware_pe_files_import_feature_set, open(os.path.join(malware_feature_vector_directory, "malware_pe_files_import_feature_set.pk"), 'wb'))
# pickle.dump(benign_pe_files_import_feature_set, open(os.path.join(benign_feature_vector_directory, "benign_pe_files_import_feature_set.pk"), 'wb'))
# ---------------------------------#
# For feature vector with sections:#
# ---------------------------------#
# debug("[*] Creating feature vector with sections for malware set...")
# malware_pe_files_section_feature_set = torch.Tensor(feature_generation(malware_pe_files, section_feature_vector_mapping))
# debug("[*] Creating feature vector with sections for benign set...")
# benign_pe_files_section_feature_set = torch.Tensor(feature_generation(benign_pe_files, section_feature_vector_mapping))
# debug(f"[+] malware_pe_files_section_feature_set type : {str(malware_pe_files_section_feature_set)}", extra={"markup":True})
# debug(f"[+] malware_pe_files_section_feature_set size : {str(malware_pe_files_section_feature_set.shape)}")
# pickle.dump(malware_pe_files_section_feature_set, open(os.path.join(malware_feature_vector_directory, "malware_pe_files_section_feature_set.pk"), 'wb'))
# pickle.dump(benign_pe_files_section_feature_set, open(os.path.join(benign_feature_vector_directory, "benign_pe_files_section_feature_set.pk"), 'wb'))
pass
# From ALFA Adv-mlaware-viz
def filter_imported_functions(func_string_with_library):
"""
Filters the returned imported functions of binary to remove those with special characters (lots of noise for some reason),
and require functions to start with a capital letter since Windows API functions seem to obey Upper Camelcase convension.
Update: The limitation for the upper case in the preprocessing step has been removed.
"""
func_string = func_string_with_library.split(":")[0]
if re.match("^[a-zA-Z]*$", func_string):
return True
else:
return False
# From ALFA Adv-mlaware-viz
def remove_encoding_indicator(func_string):
"""
In many functions there is a following "A" or "W" to indicate unicode or ANSI respectively that we want to remove.
Make a check that we have a lower case letter
"""
if (func_string[-1] == 'A' or func_string[-1] == 'W') and func_string[-2].islower():
return func_string[:-1]
else:
return func_string
# From ALFA Adv-mlaware-viz
def process_imported_functions_output(imports):
imports = list(filter(lambda x: filter_imported_functions(x), imports))
# imports = list(map(lambda x: remove_encoding_indicator(x), imports))
return imports
def feature_generation(pe_files: list, feature_vector_mapping: dict):
pe_files_feature_set = []
# for i, file in enumerate(pe_files):
for i in track(range(len(pe_files)), description="Generating feature vectors ... ", transient=True):
file = pe_files[i]
debug(f'\t[+] Num: {i} - Name: [bold green]{file} ', extra={"markup":True})
feature_vector = [0] * len(feature_vector_mapping)
try:
binary = lief.parse(file)
imports = [e.name + ':' + lib.name.lower() for lib in binary.imports for e in lib.entries]
imports = process_imported_functions_output(imports)
sections = [section.name for section in binary.sections]
for lib_import in imports:
if lib_import in feature_vector_mapping:
index = feature_vector_mapping[lib_import]
feature_vector[index] = 1
for section in sections:
if section in feature_vector_mapping:
index = feature_vector_mapping[section]
feature_vector[index] = 1
except:
exception(f"\t[-] {file} is not parseable!")
raise Exception(f"\t[-] {file} is not parseable!")
# pe_files_feature_vectors.append(feature_vector)
# pe_files_feature_vectors.append(file)
# debug("pe_files_feature_vectors (features, file)" + str(pe_files_feature_vectors))
pe_files_feature_set.append(feature_vector)
debug(f"\t[+] Vectors Type : {str(type(pe_files_feature_set))}")
debug("[+] Feature Generation complete ... \n")
return pe_files_feature_set
def extract_imports(file, feature_vector_mapping: dict, filtered_import_list: list,index: int = 0, win32: bool = True):
binary = lief.parse(file)
debug(f"\t[+] [bold green]{file}[/bold green] File Type : [bold red]{str(binary.optional_header.magic)}", extra={"markup":True})
if str(binary.optional_header.magic) != | |
#!/usr/bin/env python
# coding: utf-8
"""
Utilities and functions for working with ALC, other datasets. Includes functions
for reading .Align xml scaling files saved during LA-ICP-MS analysis and
calculating scale factors for images from .Align data.
"""
import os
import operator
import json
from urllib.request import urlopen
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
__all__ = ['check_url',
'read_json',
'save_json',
'json_from_path_or_url',
'save_csv',
'list_if_endswith',
'list_if_in',
'check_any_str',
'join_1st_match',
'round_to_even',
'list_of_val',
'check_mos_csv_keys',
'prediction_to_np',
'mask_list_to_np',
'rescale_2d_arr',
'crop_nd_arr',
'mask_to_3D_arr_size',
'scancsv_to_dict',
'get_Align_center_size',
'calc_scale_factor',
'load_data_dict',
'alc_calc_scans_n']
### Various functions and classes for file processing and code simplification
### in other modules below.
def check_url(input_str):
"""Check if a string is a 'https' url.
Parameters
----------
input_str : str
An input string or path.
Returns
-------
bool
True if input_string is url, else False.
"""
return 'https' in str(input_str)
def read_json(json_path):
"""Read a .json file.
Parameters
----------
json_path : str
Path to json file for reading.
Returns
-------
js : any
Contents of json file. For purposes of this package, this will be a list or dict.
"""
with open(json_path, 'r') as f:
js = json.load(f)
return js
def read_json_url(json_url):
"""Read a .json file from a url.
Parameters
----------
json_url : str
url to .json file for loading; if from Github should be 'raw' link.
Returns
-------
Type: any
Whatever data was retrieved from the .json file at the url (probably a
list or dict for purposes of this package).
"""
#get url data response
url_data = urlopen(json_url)
#return loaded json
return json.loads(url_data.read())
def json_from_path_or_url(path_or_url_str):
"""Check if input str is a url and load .json from url if so. Else,
assume that input str is a path and load a .json from that path.
Parameters
----------
path_or_url_str : str
A url (with 'https') or a path to a .json file.
Returns
-------
Type: any
Whatever data was retrieved from the .json file at the url/path; likely
a dict or list if called in this project.
"""
if check_url(path_or_url_str):
return read_json_url(path_or_url_str)
#if not url, assumes path
else:
return read_json(path_or_url_str)
def save_json(json_path, item_for_save):
"""Save an item to a json file (will overwrite existing file with same path).
Parameters
----------
json_path : str
Save path for json file.
item_for_save : any
Item for saving to json file. For purposes of this package, this will
be a list or dict.
Returns
-------
None.
"""
if os.path.isfile(json_path):
os.remove(json_path)
with open(json_path, 'w') as f:
json.dump(item_for_save, f)
return
def save_csv(path, pandas_table):
"""Save a pandas table as a .csv file
Parameters
----------
path : str
Full save path (including '.csv') for the pandas table.
pandas_table : pandas DataFrame
A pandas DataFrame with headers.
Returns
-------
None.
"""
pandas_table.to_csv(path, index=False, header=True, encoding='utf-8-sig')
def list_if_endswith(input_list, end_string):
"""Returns a list without input items that don't end with an input string.
Parameters
----------
input_list : list[str]
A list of strings.
end_string : str
A string to check items in the list.
Returns
-------
list
A copy of input_list w/o strings that do not end w/ end_str.
"""
return [val for val in input_list if val.endswith(end_string)]
def list_if_in(input_list, string_in):
"""Return a list without input items not containing an input string.
Parameters
----------
input_list : list[str]
A list of strings.
string_in : str
A string to check items in the list.
Returns
-------
list
A copy of input list w/o strings not containing string_in.
"""
return [string for string in input_list if string_in in string]
def check_any_str(list_to_check, input_string):
"""Check if any items in a list have a string in them.
Parameters
----------
list_to_check : list[str]
A list of strings.
input_string : str
A string to check items in the list.
Returns
-------
Boolean
True or False, depending on whether input_string is found in >= list item.
"""
return any(input_string in string for string in list_to_check)
def join_1st_match(input_list, input_string, input_join_path):
"""Check whether any items in a list contain a string; join first match
to a directory if so.
Parameters
----------
input_list : list[str]
A list of strings (ideally filenames).
input_string : str
A string (ideally a filename) to match.
input_join_path : str
A path to join the first match in input_list to.
Returns
-------
output_file_pth : str
A path *input_join_path*/first_match.
"""
output_file_pth = ''
if check_any_str(input_list, input_string):
first_match = list_if_in(input_list, input_string)[0]
output_file_pth = os.path.join(input_join_path, first_match)
return output_file_pth
def round_to_even(number):
"""Round a number to the nearest even integer.
Parameters
----------
number : float or int
A number for rounding.
Returns
-------
Int
Even integer rounded from *number*.
"""
return round(float(number)/2)*2
def list_of_val(val_for_list, list_len, num_lists = 1):
"""Generate a list or list of lists containing a single value.
Parameters
----------
val_for_list : any
Value that will be repeated in list or lists of lists.
list_len : int
Length of list output, or lists within list if multiple.
num_lists : int, optional
If > 1, number of lists within list of lists output. The default is 1.
Returns
-------
list or list of lists
A list [val_for_list, val_for_list, ...] or list of such lists.
"""
output_list = []
temp_list = []
for _ in range(0, int(list_len)):
temp_list.append(val_for_list)
if num_lists <= 1:
return temp_list
else:
for _ in range(0, int(num_lists)):
output_list.append(temp_list)
return output_list
def check_mos_csv_keys(input_mos_csv_dict):
"""Check whether a dict has keys matching required headers in a mosaic_info.csv.
Parameters
----------
input_mos_csv_dict : dict
A dict with (or without) mosaic_info.csv headers as keys.
Returns
-------
Bool
True or False, depending on whether dict keys match required headers.
"""
req_keys = ['Sample', 'Scanlist', 'Mosaic',
'Max_zircon_size', 'X_offset', 'Y_offset']
input_keys = list(input_mos_csv_dict.keys())
return all(key in input_keys for key in req_keys)
def prediction_to_np(input_results):
"""Stack Detectron prediction results to np array.
Parameters
----------
input_results : Detectron2 Prediction
Prediction results from a Detectron2 predictor.
Returns
-------
arr : np array
All instances in the input predictions stacked to a
np array along axis 2. Empty list if no instances.
"""
arr = []
if len(input_results['instances']) > 0:
instances = input_results['instances'].get('pred_masks')
arr = np.stack([instance.cpu().numpy() for instance in instances], 2)
return arr
def mask_list_to_np(input_mask_list):
"""Stack a list of mask arrays (e.g., from Otsu segmentation)
to a single, larger array.
Parameters
----------
input_mask_list : list[array]
A list of binary mask arrays.
Returns
-------
arr : np array
A stacked array from input masks. Empty list if no masks in input.
"""
arr = []
if input_mask_list:
arr = np.stack(input_mask_list, 2)
return arr
# from https://stackoverflow.com/a/58567022
def rescale_2d_arr(im, nR, nC):
"""Rescale a 2d array to input size (nR x nC)
Parameters
----------
im : array
Input 2D array (likely a mask image).
nR : int
Rows for rescaled array/image.
nC : int
Columns for rescaled array/image.
Returns
-------
array
Input array im resized to nR rows, nC columns.
"""
nR0 = len(im) # source number of rows
nC0 = len(im[0]) # source number of columns
return np.asarray([[ im[int(nR0 * r / nR)][int(nC0 * c / nC)]
for c in range(nC)] for r in range(nR)])
# from https://stackoverflow.com/a/50322574
def crop_nd_arr(img, bounding):
"""Crop the central portion of an array so that it matches shape 'bounding'.
Parameters
----------
img : array
An nd array that needs to be cropped.
bounding : tuple
Shape tupple smaller than shape of img to crop img to.
Returns
-------
array
A version of img with its edges uniformly cropped to
match size of 'bounding' input.
"""
start = tuple(map(lambda a, da: a//2-da//2, img.shape, bounding))
end = tuple(map(operator.add, start, bounding))
slices = tuple(map(slice, start, end))
return img[slices]
def mask_to_3D_arr_size(input_mask, input_arr):
"""Check whether a mask (2D array) is the same (x, y) shape as an input
image; resizes or crops it to match if not.
Parameters
----------
input_mask : array
A 2D array (presumably a mask).
input_arr : array
A 3D array (presumably a 3-channel image).
Returns
-------
output_arr : array
Either input_mask or input_mask resized to match the x,y of input_arr.
"""
output_arr = input_mask
#crop if mask is larger than original image
if all([input_mask.shape[0] > input_arr.shape[0],
input_mask.shape[1] > input_arr.shape[1]]):
output_arr = crop_nd_arr(input_mask, input_arr.shape[:2])
#rescale if mask is smaller in x or y than original image
elif input_mask.shape != input_arr.shape[:2]:
output_arr = rescale_2d_arr(input_mask, *input_arr.shape[:2])
return output_arr
def scancsv_to_dict(scancsv_path):
"""Convert a .scancsv | |
from collections import OrderedDict
import dolfin as df
from pathlib import Path
import numpy as np
from fenics_helpers import boundary
from fenics_helpers.timestepping import TimeStepper
import constitutive as c
TEST = True
class MechanicsSpaces:
def __init__(self, mesh, constraint, mesh_function=None):
self.mesh = mesh
self.constraint = constraint
self.mesh_function = mesh_function
self.deg_d = 2
self.deg_q = 2
def eps(self, u):
e = df.sym(df.grad(u))
dim = self.mesh.geometric_dimension()
if dim == 1:
return df.as_vector([e[0, 0]])
if dim == 2:
return df.as_vector([e[0, 0], e[1, 1], 2 * e[0, 1]])
if dim == 3:
return df.as_vector(
[e[0, 0], e[1, 1], e[2, 2], 2 * e[1, 2], 2 * e[0, 2], 2 * e[0, 1]]
)
class GDMSpaces(MechanicsSpaces):
def __init__(self, mesh, constraint, mesh_function=None):
super().__init__(mesh, constraint, mesh_function)
self.deg_e = self.deg_d
def create(self):
self.metadata = {
"quadrature_degree": self.deg_q,
"quadrature_scheme": "default",
}
self.dxm = df.dx(metadata=self.metadata, subdomain_data=self.mesh_function)
# solution field
Ed = df.VectorElement("CG", self.mesh.ufl_cell(), degree=self.deg_d)
Ee = df.FiniteElement("CG", self.mesh.ufl_cell(), degree=self.deg_d)
self.V = df.FunctionSpace(self.mesh, Ed * Ee)
self.Vd, self.Ve = self.V.split()
self.dd, self.de = df.TrialFunctions(self.V)
self.d_, self.e_ = df.TestFunctions(self.V)
self.u = df.Function(self.V, name="d-e mixed space")
self.d, self.e = df.split(self.u)
# generic quadrature function spaces
VQF, VQV, VQT = c.helper.spaces(self.mesh, self.deg_q, c.q_dim(self.constraint))
# quadrature functions
Q = c.Q
# inputs to the model
self.q_in = OrderedDict()
self.q_in[Q.EPS] = df.Function(VQV, name="current strains")
self.q_in[Q.E] = df.Function(VQF, name="current nonlocal equivalent strains")
self.q_in_calc = {}
self.q_in_calc[Q.EPS] = c.helper.LocalProjector(self.eps(self.d), VQV, self.dxm)
self.q_in_calc[Q.E] = c.helper.LocalProjector(self.e, VQF, self.dxm)
# outputs of the model
self.q = {}
self.q[Q.SIGMA] = df.Function(VQV, name="current stresses")
self.q[Q.DSIGMA_DEPS] = df.Function(VQT, name="stress-strain tangent")
self.q[Q.DSIGMA_DE] = df.Function(VQV, name="stress-nonlocal-strain tangent")
self.q[Q.EEQ] = df.Function(VQF, name="current (local) equivalent strain")
self.q[Q.DEEQ] = df.Function(VQV, name="equivalent-strain-strain tangent")
self.q_history = {
Q.KAPPA: df.Function(VQF, name="current history variable kappa")
}
self.n = len(self.q[Q.SIGMA].vector().get_local()) // c.q_dim(self.constraint)
self.nq = self.n // self.mesh.num_cells()
self.ip_flags = None
if self.mesh_function is not None:
self.ip_flags = np.repeat(self.mesh_function.array(), self.nq)
class Problem(df.NonlinearProblem):
def __init__(self, spaces):
super().__init__()
self.spaces = spaces
self.spaces.create()
self.loop = c.IpLoop()
self.loop.resize(self.spaces.n)
def evaluate(self):
for name, q_space in self.spaces.q_in.items():
self.spaces.q_in_calc[name](q_space)
eval_input = [q.vector().get_local() for q in self.spaces.q_in.values()]
self.loop.evaluate(*eval_input)
for name, q_space in self.spaces.q.items():
c.helper.set_q(q_space, self.loop.get(name))
class GDMProblem(c.MechanicsProblem):
def __init__(self, mesh, prm, law, loop=None):
df.NonlinearProblem.__init__(self)
self.mesh = mesh
self.prm = prm
if mesh.geometric_dimension() != c.g_dim(prm.constraint):
raise RuntimeError(
f"The geometric dimension of the mesh does not match the {prm.constraint} constraint."
)
metadata = {"quadrature_degree": prm.deg_q, "quadrature_scheme": "default"}
self.dxm = df.dx(metadata=metadata)
# solution field
Ed = df.VectorElement("CG", mesh.ufl_cell(), degree=prm.deg_d)
Ee = df.FiniteElement("CG", mesh.ufl_cell(), degree=prm.deg_d)
self.V = df.FunctionSpace(mesh, Ed * Ee)
self._u = df.Function(self.V, name="d-e mixed space")
# generic quadrature function spaces
VQF, VQV, VQT = c.helper.spaces(mesh, prm.deg_q, c.q_dim(prm.constraint))
# quadrature function
Q = c.Q
# inputs to the model
self.q_in = {}
self.q_in[Q.EPS] = df.Function(VQV, name="current strains")
self.q_in[Q.E] = df.Function(VQF, name="current nonlocal equivalent strains")
# outputs of the model
self.q = {}
self.q[Q.SIGMA] = df.Function(VQV, name="current stresses")
self.q[Q.DSIGMA_DEPS] = df.Function(VQT, name="stress-strain tangent")
self.q[Q.DSIGMA_DE] = df.Function(VQV, name="stress-nonlocal-strain tangent")
self.q[Q.EEQ] = df.Function(VQF, name="current (local) equivalent strain")
self.q[Q.DEEQ] = df.Function(VQV, name="equivalent-strain-strain tangent")
self.q_history = {
Q.KAPPA: df.Function(VQF, name="current history variable kappa")
}
n_gauss_points = len(self.q[Q.SIGMA].vector().get_local()) // c.q_dim(
prm.constraint
)
self.loop = loop or c.IpLoop()
self.loop.add_law(law)
self.loop.resize(n_gauss_points)
dd, de = df.TrialFunctions(self.V)
d_, e_ = df.TestFunctions(self.V)
d, e = df.split(self._u)
self.d = d
eps = self.eps
f_d = 1.0
self.R = f_d * df.inner(eps(d_), self.q[Q.SIGMA]) * self.dxm
self.R += e_ * (e - self.q[Q.EEQ]) * self.dxm
self.R += df.dot(df.grad(e_), prm.l ** 2 * df.grad(e)) * self.dxm
self.dR = f_d * df.inner(eps(dd), self.q[Q.DSIGMA_DEPS] * eps(d_)) * self.dxm
self.dR += f_d * de * df.dot(self.q[Q.DSIGMA_DE], eps(d_)) * self.dxm
self.dR += df.inner(eps(dd), -self.q[Q.DEEQ] * e_) * self.dxm
self.dR += (
de * e_ * self.dxm
+ df.dot(df.grad(de), prm.l ** 2 * df.grad(e_)) * self.dxm
)
self.calculate_eps = c.helper.LocalProjector(eps(self.d), VQV, self.dxm)
self.calculate_e = c.helper.LocalProjector(e, VQF, self.dxm)
self._assembler = None
self._bcs = None
@property
def u(self):
return self._u
@property
def Vd(self):
return self.V.split()[0]
@property
def Ve(self):
return self.V.split()[1]
def evaluate_material(self):
# project the strain and the nonlocal equivalent strains onto
# their quadrature spaces and ...
self.calculate_eps(self.q_in[c.Q.EPS])
self.calculate_e(self.q_in[c.Q.E])
self.loop.evaluate(
self.q_in[c.Q.EPS].vector().get_local(),
self.q_in[c.Q.E].vector().get_local(),
)
# ... and write the calculated values into their quadrature spaces.
for name, q_space in self.q.items():
c.helper.set_q(q_space, self.loop.get(name))
def update(self):
self.calculate_eps(self.q_in[c.Q.EPS])
self.calculate_e(self.q_in[c.Q.E])
self.loop.update(
self.q_in[c.Q.EPS].vector().get_local(),
self.q_in[c.Q.E].vector().get_local(),
)
def test_tensile_meso():
mesh = df.Mesh()
mvc = df.MeshValueCollection("size_t", mesh, 1)
LX, LY = 80.0, 80.0 # magic!
mesh_file = Path(__file__).parent / "mesh.xdmf"
with df.XDMFFile(str(mesh_file)) as f:
f.read(mesh)
f.read(mvc, "gmsh:physical")
subdomains = df.MeshFunction("size_t", mesh, mvc)
if not TEST:
import matplotlib.pyplot as plt
df.plot(subdomains)
plt.show()
mat_l = 2.0
Q = c.Q
s = GDMSpaces(mesh, c.Constraint.PLANE_STRAIN, subdomains)
s.create()
R = df.inner(s.eps(s.d_), s.q[Q.SIGMA]) * s.dxm(1)
dR = df.inner(s.eps(s.dd), s.q[Q.DSIGMA_DEPS] * s.eps(s.d_)) * s.dxm(1)
R += s.e_ * (s.e - s.q[Q.EEQ]) * s.dxm(1)
R += df.dot(df.grad(s.e_), mat_l ** 2 * df.grad(s.e)) * s.dxm(1)
dR += s.de * df.dot(s.q[Q.DSIGMA_DE], s.eps(s.d_)) * s.dxm(1)
dR += df.inner(s.eps(s.dd), -s.q[Q.DEEQ] * s.e_) * s.dxm(1)
dR += s.de * s.e_ * s.dxm(1)
dR += df.dot(df.grad(s.de), mat_l ** 2 * df.grad(s.e_)) * s.dxm(1)
R += df.inner(s.eps(s.d_), s.q[Q.SIGMA]) * s.dxm(2)
dR += df.inner(s.eps(s.dd), s.q[Q.DSIGMA_DEPS] * s.eps(s.d_)) * s.dxm(2)
dR += s.de * s.e_ * s.dxm(2)
R += df.inner(s.eps(s.d_), s.q[Q.SIGMA]) * s.dxm(3)
dR += df.inner(s.eps(s.dd), s.q[Q.DSIGMA_DEPS] * s.eps(s.d_)) * s.dxm(3)
dR += s.de * s.e_ * s.dxm(3)
VQF, VQV, VQT = c.helper.spaces(s.mesh, s.deg_q, c.q_dim(s.constraint))
calculate_eps = c.helper.LocalProjector(s.eps(s.d), VQV, s.dxm)
calculate_e = c.helper.LocalProjector(s.e, VQF, s.dxm(1))
F = 0.75 # interface reduction
t = 0.5 # interface thickness
lawAggreg = c.LinearElastic(2 * 26738, 0.18, s.constraint)
lawInterf = c.LocalDamage(
26738,
0.18,
s.constraint,
c.DamageLawExponential(
k0=F * 3.4 / 26738.0, alpha=0.99, beta=3.4 / 26738.0 / (0.12 * F / t)
),
c.ModMisesEeq(k=10, nu=0.18, constraint=s.constraint),
)
lawMatrix = c.GradientDamage(
26738.0,
0.18,
s.constraint,
c.DamageLawExponential(
k0=3.4 / 26738.0, alpha=0.99, beta=3.4 / 26738.0 / 0.0216
),
c.ModMisesEeq(k=10, nu=0.18, constraint=s.constraint),
)
loop = c.IpLoop()
loop.add_law(lawMatrix, np.where(s.ip_flags == 1)[0])
loop.add_law(lawAggreg, np.where(s.ip_flags == 2)[0])
loop.add_law(lawInterf, np.where(s.ip_flags == 3)[0])
loop.resize(s.n)
bot = boundary.plane_at(0, "y")
top = boundary.plane_at(LY, "y")
bc_expr = df.Expression("u", degree=0, u=0)
bcs = []
bcs.append(df.DirichletBC(s.Vd.sub(1), bc_expr, top))
bcs.append(df.DirichletBC(s.Vd.sub(1), 0.0, bot))
bcs.append(
df.DirichletBC(s.Vd.sub(0), 0.0, boundary.point_at((0, 0)), method="pointwise")
)
# return
assembler = df.SystemAssembler(dR, R, bcs)
class SolveMe(df.NonlinearProblem):
def F(self, b, x):
calculate_eps(s.q_in[Q.EPS])
calculate_e(s.q_in[Q.E])
loop.evaluate(s.q_in[Q.EPS].vector().get_local(), s.q_in[Q.E].vector().get_local())
# ... and write the calculated values into their quadrature spaces.
c.helper.set_q(s.q[Q.SIGMA], loop.get(c.Q.SIGMA))
c.helper.set_q(s.q[Q.DSIGMA_DEPS], loop.get(c.Q.DSIGMA_DEPS))
c.helper.set_q(s.q[Q.DEEQ], loop.get(c.Q.DEEQ))
c.helper.set_q(s.q[Q.DSIGMA_DE], loop.get(c.Q.DSIGMA_DE))
c.helper.set_q(s.q[Q.EEQ], loop.get(c.Q.EEQ))
assembler.assemble(b, x)
def J(self, A, x):
assembler.assemble(A)
linear_solver = df.LUSolver("mumps")
solver = df.NewtonSolver(
df.MPI.comm_world, linear_solver, df.PETScFactory.instance()
)
solver.parameters["linear_solver"] = "mumps"
solver.parameters["maximum_iterations"] = 10
solver.parameters["error_on_nonconvergence"] = False
problem = SolveMe()
def solve(t, dt):
print(t, dt)
bc_expr.u = 0.1 * t
# try:
return solver.solve(problem, s.u.vector())
# except:
# return -1, False
ld = c.helper.LoadDisplacementCurve(bcs[0])
if not TEST:
ld.show()
if not ld.is_root:
df.set_log_level(df.LogLevel.ERROR)
fff = df.XDMFFile("output.xdmf")
fff.parameters["functions_share_mesh"] = True
fff.parameters["flush_output"] = True
plot_space = df.FunctionSpace(s.mesh, "DG", 0)
k = df.Function(plot_space, name="kappa")
def pp(t):
calculate_eps(s.q_in[Q.EPS])
calculate_e(s.q_in[Q.E])
loop.update(s.q_in[Q.EPS].vector().get_local(), s.q_in[Q.E].vector().get_local())
# this fixes XDMF time stamps
import locale
locale.setlocale(locale.LC_NUMERIC, "en_US.UTF-8")
d, e = s.u.split(0)
d.rename("disp", "disp")
e.rename("e", "e")
all_kappa = lawInterf.kappa() + lawMatrix.kappa()
k.vector().set_local(all_kappa[:: s.nq])
fff.write(d, t)
fff.write(e, t)
fff.write(k, t)
ld(t, df.assemble(R))
t_end = 1.
if TEST:
t_end = 0.02
TimeStepper(solve, pp, s.u).adaptive(t_end, dt=0.02)
def test_bending():
LX = 2000
LY = 300
LX_load = 100
mesh = df.RectangleMesh(df.Point(0, 0), df.Point(LX, LY), 100, 15)
spaces = GDMSpaces(mesh, c.Constraint.PLANE_STRAIN)
law = c.GradientDamage(
20000,
0.2,
spaces.constraint,
c.DamageLawExponential(k0=2 / 20000.0, alpha=0.99, beta=100.0),
c.ModMisesEeq(k=10, nu=0.2, constraint=spaces.constraint),
)
problem = Problem(spaces)
problem.loop.add_law(law)
problem.evaluate()
prm = c.Parameters(c.Constraint.PLANE_STRAIN)
prm.E = 20000.0
prm.nu = 0.2
prm.l = 200 ** 0.5
prm.ft = 2.0
prm.k = 10.0
prm.alpha = 0.99
prm.beta = 100.0
prm.deg_d = 2
law = c.GradientDamage(
prm.E,
prm.nu,
prm.constraint,
c.DamageLawExponential(prm.ft / prm.E, prm.alpha, prm.beta),
c.ModMisesEeq(prm.k, prm.nu, prm.constraint),
)
problem = GDMProblem(mesh, prm, law)
left = boundary.point_at((0.0, 0.0), eps=0.1)
right = boundary.point_at((LX, 0.0), eps=0.1)
top = boundary.within_range(
[(LX - LX_load) / 2.0, LY], [(LX + LX_load) / 2, LY], eps=0.1
)
bc_expr = df.Expression("d*t", degree=0, t=0, d=-3)
bcs = []
bcs.append(df.DirichletBC(problem.Vd.sub(1), bc_expr, top))
bcs.append(df.DirichletBC(problem.Vd.sub(0), 0.0, left, method="pointwise"))
bcs.append(df.DirichletBC(problem.Vd.sub(1), 0.0, left, method="pointwise"))
bcs.append(df.DirichletBC(problem.Vd.sub(1), 0.0, right, method="pointwise"))
# everywhere = boundary.point_at((0,0), eps=1e6)
# bcs.append(df.DirichletBC(problem.Ve, 0.0, everywhere, method="pointwise"))
problem.set_bcs(bcs)
linear_solver = df.LUSolver("mumps")
solver = df.NewtonSolver(
df.MPI.comm_world, linear_solver, df.PETScFactory.instance()
| |
north < 0 or north > 360:
srfCornerPtsLL = srfCentroidL = srfCentroid = contextMeshJoined = northRad = northVec = scale = outputGeometryIndex = workingSubFolderPath = horizonFileType = horizonFileTypeLabel = unitConversionFactor = None
validInputData = False
printMsg = "Please input north angle value from 0 to 360."
return srfCornerPtsLL, srfCentroidL, srfCentroid, contextMeshJoined, northRad, northVec, scale, outputGeometryIndex, workingSubFolderPath, horizonFileType, horizonFileTypeLabel, unitConversionFactor, validInputData, printMsg
except Exception, e: # check if it's a vector
north.Unitize()
northRad, northVec = gismo_preparation.angle2northClockwise(north)
northVec.Unitize()
if (scale == None) or (scale <= 0):
scale = 1 # default
if workingFolderPath == None:
# nothing inputted to "workingFolder_" input, use default Gismo folder instead (C:\gismo)
gismoFolder = sc.sticky["gismo_gismoFolder"] # "gismoFolder_" input of Gismo_Gismo component
workingSubFolderPath = os.path.join(gismoFolder, "horizon_files")
else:
# something inputted to "workingFolder_" input
workingSubFolderPath = os.path.join(workingFolderPath, "horizon_files")
folderCreated = gismo_preparation.createFolder(workingSubFolderPath)
if folderCreated == False:
srfCornerPtsLL = srfCentroidL = srfCentroid = contextMeshJoined = northRad = northVec = scale = outputGeometryIndex = workingSubFolderPath = horizonFileType = horizonFileTypeLabel = unitConversionFactor = None
validInputData = False
printMsg = "workingFolder_ input is invalid.\n" + \
"Input the string in the following format (example): C:\someFolder.\n" + \
"Or do not input anything, in which case a default Gismo folder will be used instead."
return srfCornerPtsLL, srfCentroidL, srfCentroid, contextMeshJoined, northRad, northVec, scale, outputGeometryIndex, workingSubFolderPath, horizonFileType, horizonFileTypeLabel, unitConversionFactor, validInputData, printMsg
if (horizonFileType == None) or (horizonFileType == 0): # .hor file with no heading (Meteonorm 6 and Meteonorm 7)
horizonFileType = 0 # default
horizonFileTypeLabel = ""
elif (horizonFileType == 1): # PV*SOL .hor file
horizonFileType = 1
horizonFileTypeLabel = "_PVSOL"
elif (horizonFileType == 2): # PVsyst 5 and PVsyst 6 .hor file
horizonFileType = 2
horizonFileTypeLabel = "_PVsyst5_6"
elif (horizonFileType == 3): # PVsyst 4 .hor file
horizonFileType = 3
horizonFileTypeLabel = "_PVsyst4"
elif (horizonFileType < 0) or (horizonFileType > 3):
horizonFileType = 0 # default Meteonorm
horizonFileTypeLabel = ""
print "horizonFileType_ input only supports the following values:\n" + \
"0 - Meteonorm .hor file.\n" + \
"1 - PVSOL .hor file,\n" + \
"2 - PVsyst 5 and PVsyst 6 .hor file,\n" + \
"3 - PVsyst4 .hor file.\n" + \
" \n" + \
"horizonFileType_ input set to 0 (Meteonorm) by default."
if (outputGeometryIndex == None) or (outputGeometryIndex < 0):
outputGeometryIndex = 0 # default
else:
if (outputGeometryIndex + 1) > len(pathsAnalysisGeometry):
srfCornerPtsLL = srfCentroidL = srfCentroid = contextMeshJoined = northRad = northVec = scale = outputGeometryIndex = workingSubFolderPath = horizonFileType = horizonFileTypeLabel = unitConversionFactor = None
validInputData = False
printMsg = "The index number inputted into \"outputGeometryIndex_\" is higher than number of inputted objects into \"_analysisGeometry\". Please choose an input for \"outputGeometryIndex_\" from 0 to %s." % str(len(analysisGeometryBranchesLists)-1)
return srfCornerPtsLL, srfCentroidL, srfCentroid, contextMeshJoined, northRad, northVec, scale, outputGeometryIndex, workingSubFolderPath, horizonFileType, horizonFileTypeLabel, unitConversionFactor, validInputData, printMsg
elif srfCentroidL[outputGeometryIndex] == None:
srfCornerPtsLL = srfCentroidL = srfCentroid = contextMeshJoined = northRad = northVec = scale = outputGeometryIndex = workingSubFolderPath = horizonFileType = horizonFileTypeLabel = unitConversionFactor = None
validInputData = False
printMsg = "The %s supplied to the \"outputGeometryIndex_\" input, points to the %s. item in the \"_analysisGeometry\" input. This item is neither a surface, nor a point, therefor it's invalid.\n" % (outputGeometryIndex, outputGeometryIndex) + \
"Remove that item from your \"_analysisGeometry\" input, or change the value supplied to the \"outputGeometryIndex_\" so that it points to some other valid \"_analysisGeometry\" item."
outputGeometryIndex = None # set bellow the "printMsg" variable, so that it does not confront with it
return srfCornerPtsLL, srfCentroidL, srfCentroid, contextMeshJoined, northRad, northVec, scale, outputGeometryIndex, workingSubFolderPath, horizonFileType, horizonFileTypeLabel, unitConversionFactor, validInputData, printMsg
srfCentroid = srfCentroidL[outputGeometryIndex]
unitConversionFactor, unitSystemLabel = gismo_preparation.checkUnits() # factor to convert Rhino document units to meters.
validInputData = True
printMsg = "ok"
return srfCornerPtsLL, srfCentroidL, srfCentroid, contextMeshJoined, northRad, northVec, scale, outputGeometryIndex, workingSubFolderPath, horizonFileType, horizonFileTypeLabel, unitConversionFactor, validInputData, printMsg
def calculateHorizonAngles(contextMeshJoined, origin, northRad, unitConversionFactor):
originLifted = Rhino.Geometry.Point3d(origin.X, origin.Y, origin.Z + 0.01) # fix for rays intersection, if user inputted a ground surface to "_contex" input
# create skyDome
skyDomeRadius = 200 / unitConversionFactor # in meters
skyDomeSphere = Rhino.Geometry.Sphere(originLifted, skyDomeRadius)
skyDomeSrf = skyDomeSphere.ToBrep().Faces[0]
# small number of rays (for example: precisionU = 30, precisionV = 10) can result in rays missing the contextMeshJoined, thererfor the shadingMaskSrf will not be created
precisionU = 3600 # rays shot per 0.1 degrees (10th of a degree)
precisionV = 1200 # rays shot per 0.075 degrees - more denser than precisionU
#precisionU = 140
#precisionV = 40
halvedSkyDomeSrf = skyDomeSrf.Trim(Rhino.Geometry.Interval(skyDomeSrf.Domain(0)[0], skyDomeSrf.Domain(0)[1]), Rhino.Geometry.Interval(0, skyDomeSrf.Domain(1)[1])) # split the skyDome sphere in half
halvedSkyDomeSrf.SetDomain(1, Rhino.Geometry.Interval(0, halvedSkyDomeSrf.Domain(1)[1])) # shrink the halvedSkyDomeSrf V start domain
clockwise = True
if clockwise == True:
# reverse the U domain (from counter-clockwise to clockwise)
uStart,uEnd = halvedSkyDomeSrf.Domain(0)
interval0 = Rhino.Geometry.Interval(-uEnd, -uStart)
halvedSkyDomeSrf.SetDomain(0,interval0)
halvedSkyDomeSrf = halvedSkyDomeSrf.Reverse(0)
else:
pass
# rotate the halvedSkyDomeSrf by 90 degrees so that it starts at 0 degrees (+Y axis)
transformMatrixRotate1 = Rhino.Geometry.Transform.Rotation(math.radians(90), Rhino.Geometry.Vector3d(0,0,1), originLifted)
halvedSkyDomeSrf.Transform(transformMatrixRotate1)
# rotation due to north angle position
#transformMatrixRotate2 = Rhino.Geometry.Transform.Rotation(-northRad, Rhino.Geometry.Vector3d(0,0,1), originLifted) # counter-clockwise
transformMatrixRotate2 = Rhino.Geometry.Transform.Rotation(northRad, Rhino.Geometry.Vector3d(0,0,1), originLifted) # clockwise
halvedSkyDomeSrf.Transform(transformMatrixRotate2)
skyDomeDomainUmin, skyDomeDomainUmax = halvedSkyDomeSrf.Domain(0)
skyDomeDomainVmin, skyDomeDomainVmax = halvedSkyDomeSrf.Domain(1)
stepU = (skyDomeDomainUmax - skyDomeDomainUmin)/precisionU
stepV = (skyDomeDomainVmax - skyDomeDomainVmin)/precisionV
# check for intersection between the contextMeshJoined and rays
horizonAnglesRoseMeshPts = []
lastRowPoints = []
hitted = False # initial switch
for i in xrange(0,precisionU):
u = skyDomeDomainUmin + stepU*i
if (i % 10 == 0):
horizonAnglesRoseMeshPts.append(originLifted)
firstRowPt = halvedSkyDomeSrf.PointAt(u,0)
horizonAnglesRoseMeshPts.append(firstRowPt)
for k in xrange(0,precisionV):
v = skyDomeDomainVmin + stepV*k
skyDomePt = halvedSkyDomeSrf.PointAt(u,v)
rayVector = skyDomePt-originLifted
ray = Rhino.Geometry.Ray3d(originLifted, rayVector)
rayIntersectParam = Rhino.Geometry.Intersect.Intersection.MeshRay(contextMeshJoined,ray)
if rayIntersectParam >= 0:
# ray hitted something in that column
hitted = True
lastRowPt = skyDomePt
continue
else:
# ray did not hit anything in that column
pass
if hitted == False:
lastRowPt = halvedSkyDomeSrf.PointAt(u,0)
lastRowPoints.append(lastRowPt)
hitted = False # reset the hitted switch
# calculate the horizonAngles from lastRowPoints:
azimuthsD = [] # depends on precisionU and precisionV
horizonAnglesD = []
horizonAnglesD_for_colors = [] # made of horizonAnglesD duplicates to account for the origin point of the horizonAnglesRoseMeshPts
for azimuth,lastRowPt in enumerate(lastRowPoints):
if (azimuth % 10 == 0): # only take azimuths 0,10,20,30... 3580,3590
projectedLastRowPt = Rhino.Geometry.Point3d(lastRowPt.X, lastRowPt.Y, originLifted.Z)
tangent_horizonAngleR = (lastRowPt.Z - originLifted.Z)/originLifted.DistanceTo(projectedLastRowPt)
if tangent_horizonAngleR < 0.001: # fix if horizonAngle = 0
tangent_horizonAngleR = 0
horizonAngleR = math.atan(tangent_horizonAngleR)
horizonAngleD = math.degrees(horizonAngleR) # .hor files have integer values for horizon angles
horizonAnglesD.append(int(horizonAngleD))
azimuthsD.append(int(azimuth/10)) # convert the azimuths from 0,10,20,30... 3580,3590 to 0,1,2,3... 358,359
horizonAnglesD_for_colors.append(horizonAngleD)
horizonAnglesD_for_colors.append(horizonAngleD)
# possible future creation of contextShadingMask (more precisely contextShadingMaskUnscaledUnrotated), the same as from "Terrain shading mask" component by its code starting from " if maskStyle == 0: # spherical terrain shading mask" (line ?)
contextShadingMaskUnscaledUnrotated = None
return azimuthsD, horizonAnglesD, originLifted, horizonAnglesRoseMeshPts, horizonAnglesD_for_colors, contextShadingMaskUnscaledUnrotated
def main(contextMeshJoined, srfCentroidL, northRad, outputGeometryIndex, unitConversionFactor):
azimuthsD_dataTree = Grasshopper.DataTree[object]()
horizonAnglesD_dataTree = Grasshopper.DataTree[object]()
maximalAzimuthD_dataTree = Grasshopper.DataTree[object]()
maximalHorizonAngleD_dataTree = Grasshopper.DataTree[object]()
paths = _analysisGeometry.Paths
for index,srfCentroid in enumerate(srfCentroidL):
if srfCentroid != None: # the inputted _analysisGeometry is not a point nor a single faced brep
azimuthsD, horizonAnglesD, originLifted, horizonAnglesRoseMeshPts_notPicked, horizonAnglesD_for_colors_notPicked, contextShadingMaskUnscaledUnrotated_notPicked = calculateHorizonAngles(contextMeshJoined, srfCentroid, northRad, unitConversionFactor)
# maximualHorizonAngle, maximalAzimuth
maximalHorizonAngle_maximalAzimuth = []
for i,azimuth in enumerate(azimuthsD):
maximalHorizonAngle_maximalAzimuth.append([horizonAnglesD[i], azimuthsD[i]])
maximalHorizonAngle_maximalAzimuth.sort()
maximalHorizonAngleD = maximalHorizonAngle_maximalAzimuth[-1][0]
maximalAzimuthD = maximalHorizonAngle_maximalAzimuth[-1][1]
# extract azimuthsD and horizonAnglesD for geometry outputs (horizonAnglesRose, compassCrvs, title, legend) and for horizon file (.hor)
if index == outputGeometryIndex:
azimuthsD_for_horizonFile = azimuthsD
horizonAnglesD_for_horizonFile = horizonAnglesD
maximalAzimuthD_for_title = maximalAzimuthD
maximalHorizonAngleD_for_title = maximalHorizonAngleD
horizonAnglesRoseMeshPts = horizonAnglesRoseMeshPts_notPicked
horizonAnglesD_for_colors = horizonAnglesD_for_colors_notPicked
contextShadingMaskUnscaledUnrotated = contextShadingMaskUnscaledUnrotated_notPicked # it's always None, unless in future contextShadingMaskUnscaledUnrotated is created in "calculateHorizonAngles" function
# add the azimuthsD, horizonAnglesD, maximalAzimuthD, maximalHorizonAngleD to data trees
azimuthsD_dataTree.AddRange(azimuthsD, paths[index])
horizonAnglesD_dataTree.AddRange(horizonAnglesD, paths[index])
maximalAzimuthD_dataTree.AddRange([maximalAzimuthD], paths[index])
maximalHorizonAngleD_dataTree.AddRange([maximalHorizonAngleD], paths[index])
return azimuthsD_dataTree, horizonAnglesD_dataTree, azimuthsD_for_horizonFile, horizonAnglesD_for_horizonFile, maximalAzimuthD_dataTree, maximalHorizonAngleD_dataTree, maximalAzimuthD_for_title, maximalHorizonAngleD_for_title, horizonAnglesRoseMeshPts, horizonAnglesD_for_colors, contextShadingMaskUnscaledUnrotated
def | |
import random
import constants as C
import exceptions as e
class Booster:
def __init__(self, img, income1=False, income2=False,
special=False, vp=False):
self.img = img
self.income1 = income1
self.income2 = income2
self.special = special
self.vp = vp
self.used = False # Only used on boosters with special actions.
def resolve_effect(self, player):
"""Function for giving the player points when passing.
Args:
player: Player object of the player that passed.
"""
reason = "Because of your old booster"
if self.vp == "mine1":
mines = player.faction.mine_built
if player.lost_planet:
mines += 1
player.resolve_gain(f"vp{mines}", reason)
elif self.vp == "trade2":
trading_stations = player.faction.trading_station_built
player.resolve_gain(f"vp{trading_stations * 2}", reason)
elif self.vp == "researchlab3":
research_labs = player.faction.research_lab_built
player.resolve_gain(f"vp{research_labs * 3}", reason)
elif self.vp == "planetaryacademy4":
academys = player.faction.academy_built
planetary_institutes = player.faction.planetary_institute_built
total = academys + planetary_institutes
player.resolve_gain(f"vp{total * 4}", reason)
elif self.vp == "gaia1":
gaia_planets = len(
[
planet for planet in player.empire
if planet.type == "Gaia"
and planet.structure != "Gaiaformer"
]
)
player.resolve_gain(f"vp{gaia_planets}", reason)
def __str__(self):
return (
f"Booster: {self.income1 or self.special or self.vp} "
f"| {self.income2}"
)
class Terraform(Booster):
"""
More specific class for the gain 1 terraforming step booster
special action.
"""
def resolve_effect(self, player, gp, rnd):
"""Receive the reward from doing this boosters special action.
Args:
player: Player object of the player that acquired the tile.
gp: GaiaProject main game object.
rnd: Active Round object.
"""
print(
"You have gained 1 terraforming step. You must now build a mine."
)
player.mine(gp, rnd, 1, action="boost_terraform")
self.used = True
class ExtraRange(Booster):
"""
More specific class for the gain 3 extra range booster special action.
"""
def resolve_effect(self, player, gp, rnd):
"""Receive the reward from doing this boosters special action.
Args:
player: Player object of the player that acquired the tile.
gp: GaiaProject main game object.
rnd: Active Round object.
"""
print(
"You have gained 3 extra range. You must now build a Mine or "
"start a Gaia Project."
)
action = "boost_range"
while True:
planet = player.choose_planet(gp.universe, action)
try:
# Player want to build a mine.
if planet.type in C.MINE_TYPES:
player.mine(
gp,
rnd,
extra_range=3,
p_chosen=planet,
action=action
)
# Player wants to start a gaia Project.
else:
player.gaia(
gp.universe,
p_chosen=planet,
action=action,
extra_range=3
)
except e.ExtraRangeError:
continue
break
self.used = True
class RoundScoring:
def __init__(self, img, vp, goal, first_half, second_half):
self.img = img
self.vp = vp
self.goal = goal
self.first_half = first_half
self.second_half = second_half
def __str__(self):
return self.goal
class EndScoring:
def __init__(self, img, goal, neutral):
self.img = img
self.goal = goal
# Neutral player in 2 player game.
self.neutral = neutral
def __str__(self):
return self.goal
class Scoring:
"""Scoring board."""
def __init__(self):
# These lists are filled in the randomise functions.
# Boosters
self.boosters = []
# Round scoring
self.rounds = [] # Filled in the randomise_scoring function.
# End scoring
self.end_scoring = [] # Filled in the randomise_scoring function.
def randomise_boosters(self, players):
boosters = [
Booster("BOOknw.png", income1="ore1", income2="knowledge1"),
Booster("BOOpwt.png", income1="powertoken2", income2="ore1"),
Booster("BOOqic.png", income1="credits2", income2="qic1"),
Terraform(
"BOOter.png", special="terraforming1", income2="credits2"
),
ExtraRange("BOOnav.png", special="range3", income2="power2"),
Booster("BOOmin.png", vp="mine1", income2="ore1"),
Booster("BOOtrs.png", vp="trade2", income2="ore1"),
Booster("BOOlab.png", vp="researchlab3", income2="knowledge1"),
Booster("BOOpia.png", vp="planetaryacademy4", income2="power4"),
Booster("BOOgai.png", vp="gaia1", income2="credits4"),
]
for _ in range(players + 3):
self.boosters.append(
boosters.pop(random.randrange(len(boosters)))
)
def randomise_scoring(self):
round_tiles = [
RoundScoring("RNDter.png", 2, "terraforming", 4, 6),
RoundScoring("RNDstp.png", 2, "research", 2, 4),
RoundScoring("RNDmin.png", 2, "mine", 4, 6),
RoundScoring("RNDfed.png", 5, "fedtoken", 0, 5),
RoundScoring("RNDtrs3.png", 3, "trade", 3, 6),
RoundScoring("RNDtrs4.png", 4, "trade", 3, 6),
RoundScoring("RNDgai3.png", 3, "gaiamine", 2, 2),
RoundScoring("RNDgai4.png", 4, "gaiamine", 2, 2),
RoundScoring("RNDpia.png", 5, "planetaryacademy", 0, 5),
RoundScoring("RNDpia.png", 5, "planetaryacademy", 0, 5)
]
for _ in range(6):
self.rounds.append(
round_tiles.pop(random.randrange(len(round_tiles)))
)
end_scoring_tiles = [
EndScoring("FINfed.png", "structures_federation", 10),
EndScoring("FINbld.png", "structures", 11),
EndScoring("FINtyp.png", "planet_types", 5),
EndScoring("FINgai.png", "gaia_planets", 4),
EndScoring("FINsec.png", "sectors", 6),
EndScoring("FINsat.png", "satellites", 8)
]
self.end_scoring = [
end_scoring_tiles.pop(random.randrange(len(end_scoring_tiles))),
end_scoring_tiles.pop(random.randrange(len(end_scoring_tiles)))
]
def end_game_scoring(self, gp):
# TODO MORE PLAYERS CRITICAL Only works while playing against Automa.
# TODO MINOR pretty print end scoring tile names.
print("\nEnd Scoring.")
# Score points from end scoring tiles.
for end_tile in self.end_scoring:
print(f"Now scoring {end_tile.goal}:")
# Used for determining the winner and for shared places.
scores = []
for player in gp.players:
if end_tile.goal == "structures_federation":
if type(player).__name__ == "Automa":
end_tile_score = len(player.empire) - 1
scores.append([player, end_tile_score])
else:
print(
f"{player.faction.name}, how many structures that "
"are part of a Federation do you have?"
)
while True:
end_tile_score = input("--> ")
try:
end_tile_score = int(end_tile_score)
except ValueError:
print("! Please only type a number.")
continue
else:
break
scores.append([player, end_tile_score])
elif end_tile.goal == "structures":
end_tile_score = len(player.empire)
scores.append([player, end_tile_score])
elif end_tile.goal == "planet_types":
end_tile_score = len(
{planet.type for planet in player.empire}
)
scores.append([player, end_tile_score])
elif end_tile.goal == "gaia_planets":
end_tile_score = len(
[
planet for planet in player.empire
if planet.type == "Gaia"
and planet.strucure != "Gaiaformer"
]
)
scores.append([player, end_tile_score])
elif end_tile.goal == "sectors":
end_tile_score = len(
{planet.sector for planet in player.empire}
)
scores.append([player, end_tile_score])
elif end_tile.goal == "satellites":
if type(player).__name__ == "Automa":
message = (
"How many satellites does the Automa have?"
)
else:
message = (
f"{player.faction.name}, how many satellites do "
"you have?"
)
print(message)
while True:
end_tile_score = input("--> ")
try:
end_tile_score = int(end_tile_score)
except ValueError:
print("! Please only type a number.")
continue
else:
break
scores.append([player, end_tile_score])
place123 = False
place12 = False
place1 = False
place23 = False
place2 = False
place3 = False
neutral_score = end_tile.neutral
scores.append(["Neutral", neutral_score])
# Sort by score in reverse for highest to lowest.
scores.sort(key=lambda score: score[1], reverse=True)
# TODO MEDIUM print out a summary of what each player got. So don't
# just print out the end result, but print out how many different
# planet types everyone got and how many gaia planets etc.
# All players are tied.
if scores[0][1] == scores[1][1] == scores[2][1]:
place123 = True
elif scores[0][1] == scores[1][1]:
place12 = True
place3 = True
else:
place1 = True
# Players 2 and 3 are tied
if scores[1][1] == scores[2][1]:
place23 = True
else:
place2 = True
place3 = True
i = 0
while i < 3:
player = scores[i][0]
if player == "Neutral":
i += 1
continue
if type(player).__name__ == "Automa":
description = "Automa"
else:
description = player.faction.name
if place123:
player.vp += 12
print(
f"+ {description} has gained 12 Victory "
f"Points."
)
elif place12 and i != 2:
player.vp += 15
print(
f"+ {description} has gained 15 Victory "
f"Points."
)
elif place1 and i == 0:
player.vp += 18
print(
f"+ {description} has gained 18 Victory "
f"Points."
)
if place23 and i > 0:
player.vp += 9
print(
f"+ {description} has gained 9 Victory "
f"Points."
)
elif place2 and i == 1:
player.vp += 12
print(
f"+ {description} has gained 12 Victory "
f"Points."
)
elif place3 and i == 2:
player.vp += 6
print(
f"+ {description} has gained 6 Victory "
f"Points."
)
i += 1
# Just an empty print for white space between end scoring tiles.
print()
# Score points for research track progress and resources.
print(
"Now scoring Research track progress and resources. For every "
"step past level 2 on a research track you get 4 Victory Points.\n"
"For every 3 Credits, Knowledge, and Ore in any combination, you "
"get 1 Victory Point."
)
# Just an empty print for white space between player scoring.
print()
for player in gp.players:
print(
"Research track progress and resource victory points for "
f"{player.faction.name}:"
)
# If this flag is never set to True, a message telling the player
# that no points were gained this way will be displayed.
research_progress = False
if int(player.terraforming.name[-1]) > 2:
score = (int(player.terraforming.name[-1]) - 2) * 4
player.vp += score
print(f"+ {score} Victory Points from the Terraforming track.")
research_progress = True
if int(player.navigation.name[-1]) > 2:
score = (int(player.navigation.name[-1]) - 2) * 4
player.vp += score
print(f"+ {score} Victory Points from the Navigation track.")
research_progress = True
if int(player.a_i.name[-1]) > 2:
score = (int(player.a_i.name[-1]) - 2) * 4
player.vp += score
print(
f"+ {score} Victory Points from the Artificial "
"Intelligence | |
approximated value
if N_sma >= 5:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss
class PlainRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(PlainRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(PlainRAdam, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
p_data_fp32.add_(-step_size, exp_avg)
p.data.copy_(p_data_fp32)
return loss
class AdamW(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, warmup = 0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, warmup = warmup)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
if group['warmup'] > state['step']:
scheduled_lr = 1e-8 + state['step'] * group['lr'] / group['warmup']
else:
scheduled_lr = group['lr']
step_size = scheduled_lr * math.sqrt(bias_correction2) / bias_correction1
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * scheduled_lr, p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, denom)
p.data.copy_(p_data_fp32)
return loss
class Ranger(Optimizer):
# https://github.com/lessw2020/Ranger-Deep-Learning-Optimizer/blob/master/ranger.py
def __init__(self, params, lr=1e-3, alpha=0.5, k=6,
N_sma_threshhold=5, betas=(.95, 0.999), eps=1e-5, weight_decay=0):
# parameter checks
if not 0.0 <= alpha <= 1.0:
raise ValueError(f'Invalid slow update rate: {alpha}')
if not 1 <= k:
raise ValueError(f'Invalid lookahead steps: {k}')
if not lr > 0:
raise ValueError(f'Invalid Learning Rate: {lr}')
if not eps > 0:
raise ValueError(f'Invalid eps: {eps}')
# parameter comments:
# beta1 (momentum) of .95 seems to work better than .90...
# N_sma_threshold of 5 seems better in testing than 4.
# In both cases, worth testing on your dataset (.90 vs .95, 4 vs 5) to make sure which works best for you.
# prep defaults and init torch.optim base
defaults = dict(lr=lr, alpha=alpha, k=k, step_counter=0, betas=betas, N_sma_threshhold=N_sma_threshhold, eps=eps, weight_decay=weight_decay)
super().__init__(params, defaults)
# adjustable threshold
self.N_sma_threshhold = N_sma_threshhold
# look ahead params
self.alpha = alpha
self.k = k
# radam buffer for state
self.radam_buffer = [[None,None,None] for ind in range(10)]
def __setstate__(self, state):
print("set state called")
super(Ranger, self).__setstate__(state)
def step(self, closure=None):
loss = None
# Evaluate averages and grad, update param tensors
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Ranger optimizer does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p] #get state dict for this param
if len(state) == 0: #if first time to run...init dictionary with our desired entries
#if self.first_run_check==0:
#self.first_run_check=1
#print("Initializing slow buffer...should not see this at load from saved model!")
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
#look ahead weight storage now in state dict
state['slow_buffer'] = torch.empty_like(p.data)
state['slow_buffer'].copy_(p.data)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
#begin computations
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
#compute variance mov avg
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
#compute mean moving avg
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.radam_buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
if N_sma > self.N_sma_threshhold:
step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
if N_sma > self.N_sma_threshhold:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
#integrated look ahead...
#we do it at the param level instead of group level
if state['step'] % group['k'] == 0:
slow_p = state['slow_buffer'] #get access to slow param tensor
slow_p.add_(self.alpha, p.data - slow_p) #(fast weights - slow weights) * alpha
p.data.copy_(slow_p) #copy interpolated weights to RAdam param tensor
return loss
# class RAdam(Optimizer):
# def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
# defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
# self.buffer = [[None, None, None] for ind in range(10)]
# super(RAdam, self).__init__(params, defaults)
# def __setstate__(self, state):
# super(RAdam, self).__setstate__(state)
# def step(self, closure=None):
# loss = None
# if closure is not None:
# loss = closure()
# for group in self.param_groups:
# for p in group['params']:
# if p.grad is None:
# continue
# grad = p.grad.data.float()
# if grad.is_sparse:
# raise RuntimeError('RAdam does not support sparse gradients')
# p_data_fp32 = p.data.float()
# state = self.state[p]
# if len(state) == 0:
# state['step'] = 0
# state['exp_avg'] = torch.zeros_like(p_data_fp32)
# state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
# else:
# state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
# state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
# exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
# beta1, beta2 = group['betas']
# exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# exp_avg.mul_(beta1).add_(1 - beta1, grad)
# state['step'] += 1
# buffered = self.buffer[int(state['step'] % 10)]
# if state['step'] == buffered[0]:
# N_sma, step_size = buffered[1], buffered[2]
# else:
# buffered[0] = state['step']
# beta2_t = beta2 ** state['step']
# N_sma_max = 2 / (1 - beta2) - 1
# N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
# buffered[1] = N_sma
# if N_sma >= 5:
# step_size = math.sqrt((1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step'])
# else:
# step_size = 1.0 / (1 - beta1 ** state['step'])
# buffered[2] = step_size
# if group['weight_decay'] != 0:
# p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# # more conservative since it's an approximated value
# if | |
numpy.random.SeedSequence or numpy.random.RandomState, optional
Old name for parameter `seed`.
Its usage will not yet cause a deprecation warning,
but it is still recommended to use `seed` now.
Outdated since 0.4.0.
deterministic : bool, optional
Deprecated since 0.4.0.
See method ``to_deterministic()`` for an alternative and for
details about what the "deterministic mode" actually does.
Examples
--------
>>> import imgaug.augmenters as iaa
>>> aug = iaa.Resize(32)
Resize all images to ``32x32`` pixels.
>>> aug = iaa.Resize(0.5)
Resize all images to ``50`` percent of their original size.
>>> aug = iaa.Resize((16, 22))
Resize all images to a random height and width within the discrete
interval ``[16..22]`` (uniformly sampled per image).
>>> aug = iaa.Resize((0.5, 0.75))
Resize all any input image so that its height (``H``) and width (``W``)
become ``H*v`` and ``W*v``, where ``v`` is uniformly sampled from the
interval ``[0.5, 0.75]``.
>>> aug = iaa.Resize([16, 32, 64])
Resize all images either to ``16x16``, ``32x32`` or ``64x64`` pixels.
>>> aug = iaa.Resize({"height": 32})
Resize all images to a height of ``32`` pixels and keeps the original
width.
>>> aug = iaa.Resize({"height": 32, "width": 48})
Resize all images to a height of ``32`` pixels and a width of ``48``.
>>> aug = iaa.Resize({"height": 32, "width": "keep-aspect-ratio"})
Resize all images to a height of ``32`` pixels and resizes the
x-axis (width) so that the aspect ratio is maintained.
>>> aug = iaa.Resize(
>>> {"shorter-side": 224, "longer-side": "keep-aspect-ratio"})
Resize all images to a height/width of ``224`` pixels, depending on which
axis is shorter and resize the other axis so that the aspect ratio is
maintained.
>>> aug = iaa.Resize({"height": (0.5, 0.75), "width": [16, 32, 64]})
Resize all images to a height of ``H*v``, where ``H`` is the original
height and ``v`` is a random value sampled from the interval
``[0.5, 0.75]``. The width/x-axis of each image is resized to either
``16`` or ``32`` or ``64`` pixels.
>>> aug = iaa.Resize(32, interpolation=["linear", "cubic"])
Resize all images to ``32x32`` pixels. Randomly use either ``linear``
or ``cubic`` interpolation.
"""
def __init__(self, size, interpolation="cubic",
seed=None, name=None,
random_state="deprecated", deterministic="deprecated"):
super(Resize, self).__init__(
seed=seed, name=name,
random_state=random_state, deterministic=deterministic)
self.size, self.size_order = self._handle_size_arg(size, False)
self.interpolation = self._handle_interpolation_arg(interpolation)
@classmethod
def _handle_size_arg(cls, size, subcall):
def _dict_to_size_tuple(val1, val2):
kaa = "keep-aspect-ratio"
not_both_kaa = (val1 != kaa or val2 != kaa)
assert not_both_kaa, (
"Expected at least one value to not be \"keep-aspect-ratio\", "
"but got it two times.")
size_tuple = []
for k in [val1, val2]:
if k in ["keep-aspect-ratio", "keep"]:
entry = iap.Deterministic(k)
else:
entry = cls._handle_size_arg(k, True)
size_tuple.append(entry)
return tuple(size_tuple)
def _contains_any_key(dict_, keys):
return any([key in dict_ for key in keys])
# HW = height, width
# SL = shorter, longer
size_order = "HW"
if size == "keep":
result = iap.Deterministic("keep")
elif ia.is_single_number(size):
assert size > 0, "Expected only values > 0, got %s" % (size,)
result = iap.Deterministic(size)
elif not subcall and isinstance(size, dict):
if len(size.keys()) == 0:
result = iap.Deterministic("keep")
elif _contains_any_key(size, ["height", "width"]):
height = size.get("height", "keep")
width = size.get("width", "keep")
result = _dict_to_size_tuple(height, width)
elif _contains_any_key(size, ["shorter-side", "longer-side"]):
shorter = size.get("shorter-side", "keep")
longer = size.get("longer-side", "keep")
result = _dict_to_size_tuple(shorter, longer)
size_order = "SL"
else:
raise ValueError(
"Expected dictionary containing no keys, "
"the keys \"height\" and/or \"width\", "
"or the keys \"shorter-side\" and/or \"longer-side\". "
"Got keys: %s." % (str(size.keys()),))
elif isinstance(size, tuple):
assert len(size) == 2, (
"Expected size tuple to contain exactly 2 values, "
"got %d." % (len(size),))
assert size[0] > 0 and size[1] > 0, (
"Expected size tuple to only contain values >0, "
"got %d and %d." % (size[0], size[1]))
if ia.is_single_float(size[0]) or ia.is_single_float(size[1]):
result = iap.Uniform(size[0], size[1])
else:
result = iap.DiscreteUniform(size[0], size[1])
elif isinstance(size, list):
if len(size) == 0:
result = iap.Deterministic("keep")
else:
all_int = all([ia.is_single_integer(v) for v in size])
all_float = all([ia.is_single_float(v) for v in size])
assert all_int or all_float, (
"Expected to get only integers or floats.")
assert all([v > 0 for v in size]), (
"Expected all values to be >0.")
result = iap.Choice(size)
elif isinstance(size, iap.StochasticParameter):
result = size
else:
raise ValueError(
"Expected number, tuple of two numbers, list of numbers, "
"dictionary of form "
"{'height': number/tuple/list/'keep-aspect-ratio'/'keep', "
"'width': <analogous>}, dictionary of form "
"{'shorter-side': number/tuple/list/'keep-aspect-ratio'/"
"'keep', 'longer-side': <analogous>} "
"or StochasticParameter, got %s." % (type(size),)
)
if subcall:
return result
return result, size_order
@classmethod
def _handle_interpolation_arg(cls, interpolation):
if interpolation == ia.ALL:
interpolation = iap.Choice(
["nearest", "linear", "area", "cubic"])
elif ia.is_single_integer(interpolation):
interpolation = iap.Deterministic(interpolation)
elif ia.is_string(interpolation):
interpolation = iap.Deterministic(interpolation)
elif ia.is_iterable(interpolation):
interpolation = iap.Choice(interpolation)
elif isinstance(interpolation, iap.StochasticParameter):
pass
else:
raise Exception(
"Expected int or string or iterable or StochasticParameter, "
"got %s." % (type(interpolation),))
return interpolation
# Added in 0.4.0.
def _augment_batch_(self, batch, random_state, parents, hooks):
nb_rows = batch.nb_rows
samples = self._draw_samples(nb_rows, random_state)
if batch.images is not None:
batch.images = self._augment_images_by_samples(batch.images,
samples)
if batch.heatmaps is not None:
# TODO this uses the same interpolation as for images for heatmaps
# while other augmenters resort to cubic
batch.heatmaps = self._augment_maps_by_samples(
batch.heatmaps, "arr_0to1", samples)
if batch.segmentation_maps is not None:
batch.segmentation_maps = self._augment_maps_by_samples(
batch.segmentation_maps, "arr",
(samples[0], samples[1], [None] * nb_rows))
for augm_name in ["keypoints", "bounding_boxes", "polygons",
"line_strings"]:
augm_value = getattr(batch, augm_name)
if augm_value is not None:
func = functools.partial(
self._augment_keypoints_by_samples,
samples=samples)
cbaois = self._apply_to_cbaois_as_keypoints(augm_value, func)
setattr(batch, augm_name, cbaois)
return batch
# Added in 0.4.0.
def _augment_images_by_samples(self, images, samples):
input_was_array = False
input_dtype = None
if ia.is_np_array(images):
input_was_array = True
input_dtype = images.dtype
samples_a, samples_b, samples_ip = samples
result = []
for i, image in enumerate(images):
h, w = self._compute_height_width(image.shape, samples_a[i],
samples_b[i], self.size_order)
image_rs = ia.imresize_single_image(image, (h, w),
interpolation=samples_ip[i])
result.append(image_rs)
if input_was_array:
all_same_size = (len({image.shape for image in result}) == 1)
if all_same_size:
result = np.array(result, dtype=input_dtype)
return result
# Added in 0.4.0.
def _augment_maps_by_samples(self, augmentables, arr_attr_name, samples):
result = []
samples_h, samples_w, samples_ip = samples
for i, augmentable in enumerate(augmentables):
arr = getattr(augmentable, arr_attr_name)
arr_shape = arr.shape
img_shape = augmentable.shape
h_img, w_img = self._compute_height_width(
img_shape, samples_h[i], samples_w[i], self.size_order)
h = int(np.round(h_img * (arr_shape[0] / img_shape[0])))
w = int(np.round(w_img * (arr_shape[1] / img_shape[1])))
h = max(h, 1)
w = max(w, 1)
if samples_ip[0] is not None:
# TODO change this for heatmaps to always have cubic or
# automatic interpolation?
augmentable_resize = augmentable.resize(
(h, w), interpolation=samples_ip[i])
else:
augmentable_resize = augmentable.resize((h, w))
augmentable_resize.shape = (h_img, w_img) + img_shape[2:]
result.append(augmentable_resize)
return result
# Added in 0.4.0.
def _augment_keypoints_by_samples(self, kpsois, samples):
result = []
samples_a, samples_b, _samples_ip = samples
for i, kpsoi in enumerate(kpsois):
h, w = self._compute_height_width(
kpsoi.shape, samples_a[i], samples_b[i], self.size_order)
new_shape = (h, w) + kpsoi.shape[2:]
keypoints_on_image_rs = kpsoi.on_(new_shape)
result.append(keypoints_on_image_rs)
return result
def _draw_samples(self, nb_images, random_state):
rngs = random_state.duplicate(3)
if isinstance(self.size, tuple):
samples_h = self.size[0].draw_samples(nb_images,
random_state=rngs[0])
samples_w = self.size[1].draw_samples(nb_images,
random_state=rngs[1])
else:
samples_h = self.size.draw_samples(nb_images, random_state=rngs[0])
samples_w = samples_h
samples_ip = self.interpolation.draw_samples(nb_images,
random_state=rngs[2])
return samples_h, samples_w, samples_ip
@classmethod
def _compute_height_width(cls, image_shape, sample_a, sample_b, size_order):
imh, imw = image_shape[0:2]
if size_order == 'SL':
# size order: short, long
if imh < imw:
h, w = sample_a, sample_b
else:
w, h = sample_a, sample_b
else:
# size order: height, width
h, w = sample_a, sample_b
if ia.is_single_float(h):
assert h > 0, "Expected 'h' to be >0, got %.4f" % (h,)
h = int(np.round(imh * h))
h = h if h > 0 else 1
elif h == "keep":
h = imh
if ia.is_single_float(w):
assert w > 0, "Expected 'w' to be >0, got %.4f" % (w,)
w = int(np.round(imw * w))
w = w if w > 0 else 1
elif w == "keep":
w = imw
# at least the checks for keep-aspect-ratio must come after
# the float checks, as they are dependent on the results
# this is also why these are not written as elifs
if h == "keep-aspect-ratio":
h_per_w_orig = imh / imw
h = int(np.round(w * h_per_w_orig))
if w == "keep-aspect-ratio":
w_per_h_orig = imw / imh
w = int(np.round(h * w_per_h_orig))
return | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C): 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Requirements
# - pyvmomi >= 6.0.0.2016.4
# TODO:
# * more jq examples
# * optional folder hierarchy
"""
$ jq '._meta.hostvars[].config' data.json | head
{
"alternateguestname": "",
"instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675",
"memoryhotaddenabled": false,
"guestfullname": "Red Hat Enterprise Linux 7 (64-bit)",
"changeversion": "2016-05-16T18:43:14.977925Z",
"uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4",
"cpuhotremoveenabled": false,
"vpmcenabled": false,
"firmware": "bios",
"""
from __future__ import print_function
import atexit
import datetime
import itertools
import json
import os
import re
import ssl
import sys
import uuid
from time import time
from jinja2 import Environment
from six import integer_types, PY3
from six.moves import configparser
try:
import argparse
except ImportError:
sys.exit('Error: This inventory script required "argparse" python module. Please install it or upgrade to python-2.7')
try:
from pyVmomi import vim, vmodl
from pyVim.connect import SmartConnect, Disconnect
except ImportError:
sys.exit("ERROR: This inventory script required 'pyVmomi' Python module, it was not able to load it")
def regex_match(s, pattern):
'''Custom filter for regex matching'''
reg = re.compile(pattern)
if reg.match(s):
return True
else:
return False
def select_chain_match(inlist, key, pattern):
'''Get a key from a list of dicts, squash values to a single list, then filter'''
outlist = [x[key] for x in inlist]
outlist = list(itertools.chain(*outlist))
outlist = [x for x in outlist if regex_match(x, pattern)]
return outlist
class VMwareMissingHostException(Exception):
pass
class VMWareInventory(object):
__name__ = 'VMWareInventory'
guest_props = False
instances = []
debug = False
load_dumpfile = None
write_dumpfile = None
maxlevel = 1
lowerkeys = True
config = None
cache_max_age = None
cache_path_cache = None
cache_path_index = None
cache_dir = None
server = None
port = None
username = None
password = <PASSWORD>
validate_certs = True
host_filters = []
skip_keys = []
groupby_patterns = []
groupby_custom_field_excludes = []
safe_types = [bool, str, float, None] + list(integer_types)
iter_types = [dict, list]
bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
vimTableMaxDepth = {
"vim.HostSystem": 2,
"vim.VirtualMachine": 2,
}
custom_fields = {}
# use jinja environments to allow for custom filters
env = Environment()
env.filters['regex_match'] = regex_match
env.filters['select_chain_match'] = select_chain_match
# translation table for attributes to fetch for known vim types
vimTable = {
vim.Datastore: ['_moId', 'name'],
vim.ResourcePool: ['_moId', 'name'],
vim.HostSystem: ['_moId', 'name'],
}
@staticmethod
def _empty_inventory():
return {"_meta": {"hostvars": {}}}
def __init__(self, load=True):
self.inventory = VMWareInventory._empty_inventory()
if load:
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Check the cache
cache_valid = self.is_cache_valid()
# Handle Cache
if self.args.refresh_cache or not cache_valid:
self.do_api_calls_update_cache()
else:
self.debugl('loading inventory from cache')
self.inventory = self.get_inventory_from_cache()
def debugl(self, text):
if self.args.debug:
try:
text = str(text)
except UnicodeEncodeError:
text = text.encode('ascii', 'ignore')
print('%s %s' % (datetime.datetime.now(), text))
def show(self):
# Data to print
self.debugl('dumping results')
data_to_print = None
if self.args.host:
data_to_print = self.get_host_info(self.args.host)
elif self.args.list:
# Display list of instances for inventory
data_to_print = self.inventory
return json.dumps(data_to_print, indent=2)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
valid = False
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
valid = True
return valid
def do_api_calls_update_cache(self):
''' Get instances and cache the data '''
self.inventory = self.instances_to_inventory(self.get_instances())
self.write_to_cache(self.inventory)
def write_to_cache(self, data):
''' Dump inventory to json file '''
with open(self.cache_path_cache, 'wb') as f:
f.write(json.dumps(data))
def get_inventory_from_cache(self):
''' Read in jsonified inventory '''
jdata = None
with open(self.cache_path_cache, 'rb') as f:
jdata = f.read()
return json.loads(jdata)
def read_settings(self):
''' Reads the settings from the vmware_inventory.ini file '''
scriptbasename = __file__
scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '')
defaults = {'vmware': {
'server': '',
'port': 443,
'username': '',
'password': '',
'validate_certs': True,
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
'cache_name': 'ansible-vmware',
'cache_path': '~/.ansible/tmp',
'cache_max_age': 3600,
'max_object_level': 1,
'skip_keys': 'declaredalarmstate,'
'disabledmethod,'
'dynamicproperty,'
'dynamictype,'
'environmentbrowser,'
'managedby,'
'parent,'
'childtype,'
'resourceconfig',
'alias_pattern': '{{ config.name + "_" + config.uuid }}',
'host_pattern': '{{ guest.ipaddress }}',
'host_filters': '{{ runtime.powerstate == "poweredOn" }}',
'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
'lower_var_keys': True,
'custom_field_group_prefix': 'vmware_tag_',
'groupby_custom_field_excludes': '',
'groupby_custom_field': False}
}
if PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
# where is the config?
vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path'])
vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
config.read(vmware_ini_path)
if 'vmware' not in config.sections():
config.add_section('vmware')
# apply defaults
for k, v in defaults['vmware'].items():
if not config.has_option('vmware', k):
config.set('vmware', k, str(v))
# where is the cache?
self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
if self.cache_dir and not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
# set the cache filename and max age
cache_name = config.get('vmware', 'cache_name')
self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
self.debugl('cache path is %s' % self.cache_path_cache)
self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))
# mark the connection info
self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server'))
self.debugl('server is %s' % self.server)
self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username'))
self.debugl('username is %s' % self.username)
self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password', raw=True))
self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
if self.validate_certs in ['no', 'false', 'False', False]:
self.validate_certs = False
self.debugl('cert validation is %s' % self.validate_certs)
# behavior control
self.maxlevel = int(config.get('vmware', 'max_object_level'))
self.debugl('max object level is %s' % self.maxlevel)
self.lowerkeys = config.get('vmware', 'lower_var_keys')
if type(self.lowerkeys) != bool:
if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
self.lowerkeys = True
else:
self.lowerkeys = False
self.debugl('lower keys is %s' % self.lowerkeys)
self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
self.debugl('skip keys is %s' % self.skip_keys)
temp_host_filters = list(config.get('vmware', 'host_filters').split('}},'))
for host_filter in temp_host_filters:
host_filter = host_filter.rstrip()
if host_filter != "":
if not host_filter.endswith("}}"):
host_filter += "}}"
self.host_filters.append(host_filter)
self.debugl('host filters are %s' % self.host_filters)
temp_groupby_patterns = list(config.get('vmware', 'groupby_patterns').split('}},'))
for groupby_pattern in temp_groupby_patterns:
groupby_pattern = groupby_pattern.rstrip()
if groupby_pattern != "":
if not groupby_pattern.endswith("}}"):
groupby_pattern += "}}"
self.groupby_patterns.append(groupby_pattern)
self.debugl('groupby patterns are %s' % self.groupby_patterns)
temp_groupby_custom_field_excludes = config.get('vmware', 'groupby_custom_field_excludes')
self.groupby_custom_field_excludes = [x.strip('"') for x in [y.strip("'") for y in temp_groupby_custom_field_excludes.split(",")]]
self.debugl('groupby exclude strings are %s' % self.groupby_custom_field_excludes)
# Special feature to disable the brute force serialization of the
# virtual machine objects. The key name for these properties does not
# matter because the values are just items for a larger list.
if config.has_section('properties'):
self.guest_props = []
for prop in config.items('properties'):
self.guest_props.append(prop[1])
# save the config
self.config = config
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi')
parser.add_argument('--debug', action='store_true', default=False,
help='show debug info')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to VSphere (default: False - use cache files)')
parser.add_argument('--max-instances', default=None, type=int,
help='maximum number of instances to retrieve')
self.args = parser.parse_args()
def get_instances(self):
''' Get a list of vm instances with pyvmomi '''
kwargs = {'host': self.server,
'user': self.username,
'pwd': <PASSWORD>,
'port': int(self.port)}
if hasattr(ssl, 'SSLContext') and not self.validate_certs:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
kwargs['sslContext'] = context
return self._get_instances(kwargs)
def _get_instances(self, inkwargs):
''' Make API calls '''
instances = []
try:
si = SmartConnect(**inkwargs)
except ssl.SSLError as connection_error:
if '[SSL: CERTIFICATE_VERIFY_FAILED]' in str(connection_error) and self.validate_certs:
sys.exit("Unable to connect to ESXi server due to %s, "
"please specify validate_certs=False and try again" % connection_error)
except Exception as exc:
self.debugl("Unable to connect to ESXi server due to %s" % exc)
sys.exit("Unable to connect to ESXi server due to %s" % exc)
self.debugl('retrieving all instances')
if not si:
sys.exit("Could not connect to the specified host using specified "
"username and password")
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Create a search container for virtualmachines
self.debugl('creating containerview for virtualmachines')
container = content.rootFolder
viewType = [vim.VirtualMachine]
recursive = True
containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
children = containerView.view
for child in children:
# If requested, limit the total number of instances
if self.args.max_instances:
if len(instances) >= self.args.max_instances:
break
instances.append(child)
self.debugl("%s total instances in container view" % len(instances))
if self.args.host:
instances = [x for x in instances if x.name == self.args.host]
instance_tuples = []
for instance in sorted(instances):
if self.guest_props:
ifacts = self.facts_from_proplist(instance)
else:
ifacts = self.facts_from_vobj(instance)
instance_tuples.append((instance, ifacts))
self.debugl('facts collected for all instances')
try:
cfm = content.customFieldsManager
if cfm is not None and cfm.field:
for f in cfm.field:
if not f.managedObjectType or f.managedObjectType == vim.VirtualMachine:
self.custom_fields[f.key] = f.name
self.debugl('%d custom fields collected' % len(self.custom_fields))
except vmodl.RuntimeFault as exc:
self.debugl("Unable to gather custom fields due to %s" % exc.msg)
except | |
<gh_stars>0
# Copyright (c) 2013, Preferred Infrastructure, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A core of maf - an environment for computational experimentations on waf.
This module contains the core functionality of maf that handles parameterized
tasks and metanodes.
"""
import collections
import copy
import os
import os.path
import types
import inspect
try:
import cPickle as pickle
except ImportError:
import pickle
import waflib.Build
import waflib.Utils
from waflib.TaskGen import before_method, feature
def options(opt):
pass
def configure(conf):
pass
class ExperimentContext(waflib.Build.BuildContext):
"""Context class of waf experiment (a.k.a. maf)."""
cmd = 'experiment'
fun = 'experiment'
variant = 'experiment'
def __init__(self, **kw):
super(ExperimentContext, self).__init__(**kw)
self._experiment_graph = ExperimentGraph()
# Callback registered by BuildContext.add_pre_fun is called right after
# all wscripts are executed.
super(ExperimentContext, self).add_pre_fun(
ExperimentContext._process_call_objects)
def __call__(self, **kw):
"""Main method to generate tasks."""
call_object = CallObject(**kw)
self._experiment_graph.add_call_object(call_object)
def _process_call_objects(self):
"""Callback function called right after all wscripts are executed.
This function virtually generates all task generators under
ExperimentContext.
"""
# Run topological sort on dependency graph.
call_objects = self._experiment_graph.get_sorted_call_objects()
# TODO(beam2d): Remove this stub file name.
self._parameter_id_generator = ParameterIdGenerator(
'build/experiment/.maf_id_table',
'build/experiment/.maf_id_table.tsv')
self._nodes = collections.defaultdict(set)
try:
for call_object in call_objects:
self._process_call_object(call_object)
finally:
self._parameter_id_generator.save()
def _process_call_object(self, call_object):
self._set_rule_and_dependson(call_object)
if hasattr(call_object, 'for_each'):
self._generate_aggregation_tasks(call_object, 'for_each')
elif hasattr(call_object, 'aggregate_by'):
self._generate_aggregation_tasks(call_object, 'aggregate_by')
else:
self._generate_tasks(call_object)
def _set_rule_and_dependson(self, call_object):
# dependson attribute is a variable or a function, changes of which
# will be automatically traced; this is set by two ways:
# 1) write dependson attribute in wscript
# 2) give rule in Rule object having non-empty dependson
rule = call_object.rule
if 'rule' in call_object.__dict__ and not isinstance(rule, str):
if not isinstance(rule, Rule):
rule = Rule(rule)
rule.add_dependson(getattr(call_object, 'dependson', []))
call_object.rule = lambda task: rule.fun(task)
call_object.dependson = rule.stred_dependson()
else:
call_object.dependson = []
def _generate_tasks(self, call_object):
if not call_object.source:
for parameter in call_object.parameters:
self._generate_task(call_object, [], parameter)
parameter_lists = []
# Generate all valid list of parameters corresponding to source nodes.
for node in call_object.source:
node_params = self._nodes[node]
if not node_params:
# node is physical. We use empty parameter as a dummy.
node_params = {Parameter()}
if not parameter_lists:
for node_param in node_params:
parameter_lists.append([node_param])
continue
new_lists = []
for node_param in node_params:
for parameter_list in parameter_lists:
if any(p.conflict_with(node_param) for p in parameter_list):
continue
new_list = list(parameter_list)
new_list.append(node_param)
new_lists.append(new_list)
parameter_lists = new_lists
for parameter_list in parameter_lists:
for parameter in call_object.parameters:
if any(p.conflict_with(parameter) for p in parameter_list):
continue
self._generate_task(call_object, parameter_list, parameter)
def _generate_task(self, call_object, source_parameter, parameter):
# Create target parameter by merging source parameter and task-gen
# parameter.
target_parameter = Parameter()
for p in source_parameter:
target_parameter.update(p)
target_parameter.update(parameter)
for node in call_object.target:
self._nodes[node].add(target_parameter)
# Convert source/target meta nodes to physical nodes.
physical_source = self._resolve_meta_nodes(
call_object.source, source_parameter)
physical_target = self._resolve_meta_nodes(
call_object.target, target_parameter)
# Create arguments of BuildContext.__call__.
physical_call_object = copy.deepcopy(call_object)
physical_call_object.source = physical_source
physical_call_object.target = physical_target
del physical_call_object.parameters
self._call_super(
physical_call_object, source_parameter, target_parameter)
def _generate_aggregation_tasks(self, call_object, key_type):
# In aggregation tasks, source and target must be only one (meta) node.
# Source node must be meta node. Whether target node is meta or not is
# automatically decided by source parameters and for_each/aggregate_by
# keys.
if not call_object.source or len(call_object.source) > 1:
raise InvalidMafArgumentException(
"'source' in aggregation must include only one meta node")
if not call_object.target or len(call_object.target) > 1:
raise InvalidMafArgumentException(
"'target' in aggregation must include only one meta node")
source_node = call_object.source[0]
target_node = call_object.target[0]
source_parameters = self._nodes[source_node]
# Mapping from target parameter to list of source parameter.
target_to_source = collections.defaultdict(set)
for source_parameter in source_parameters:
target_parameter = Parameter()
if key_type == 'for_each':
for key in call_object.for_each:
target_parameter[key] = source_parameter[key]
elif key_type == 'aggregate_by':
for key in source_parameter:
if key not in call_object.aggregate_by:
target_parameter[key] = source_parameter[key]
target_to_source[target_parameter].add(source_parameter)
for target_parameter in target_to_source:
source_parameter = target_to_source[target_parameter]
source = [self._resolve_meta_node(source_node, parameter)
for parameter in source_parameter]
target = self._resolve_meta_node(target_node, target_parameter)
self._nodes[target_node].add(target_parameter)
# Create arguments of BuildContext.__call__.
physical_call_object = copy.deepcopy(call_object)
physical_call_object.source = source
physical_call_object.target = target
if key_type == 'for_each':
del physical_call_object.for_each
else:
del physical_call_object.aggregate_by
self._call_super(
physical_call_object, source_parameter, target_parameter)
def _call_super(self, call_object, source_parameter, target_parameter):
taskgen = super(ExperimentContext, self).__call__(
**call_object.__dict__)
taskgen.env.source_parameter = source_parameter
taskgen.env.update(target_parameter.to_str_valued_dict())
depkeys = [('dependson%d' % i) for i in range(len(call_object.dependson))]
taskgen.env.update(dict(zip(depkeys, call_object.dependson)))
taskgen.parameter = target_parameter
def _resolve_meta_nodes(self, nodes, parameters):
if not isinstance(parameters, list):
parameters = [parameters] * len(nodes)
physical_nodes = []
for node, parameter in zip(nodes, parameters):
physical_nodes.append(self._resolve_meta_node(node, parameter))
return physical_nodes
def _resolve_meta_node(self, node, parameter):
if parameter:
parameter_id = self._parameter_id_generator.get_id(parameter)
node = os.path.join(
node, '-'.join([parameter_id, os.path.basename(node)]))
if node[0] == '/':
return self.root.find_resource(node)
return self.path.find_or_declare(node)
class CyclicDependencyException(Exception):
"""Exception raised when experiment graph has a cycle."""
pass
class InvalidMafArgumentException(Exception):
"""Exception raised when arguments of ExperimentContext.__call__ is wrong.
"""
pass
class Parameter(dict):
"""Parameter of maf task.
This is a dict with hash(). Be careful to use it with set(); parameter has
hash(), but is mutable.
"""
def __hash__(self):
# TODO(beam2d): Should we cache this value?
return hash(frozenset(self.iteritems()))
def conflict_with(self, parameter):
"""Checks whether the parameter conflicts with given other parameter.
:return: True if self conflicts with parameter, i.e. contains different
values corresponding to same key.
:rtype: bool
"""
common_keys = set(self) & set(parameter)
return any(self[key] != parameter[key] for key in common_keys)
def to_str_valued_dict(self):
"""Gets dictionary with stringized values.
:return: A dictionary with same key and stringized values.
:rtype: dict of str key and str value
"""
return dict([(k, str(self[k])) for k in self])
class Rule(object):
"""A wrapper object of a rule function with associate values,
which change is tracked on the experiment.
:param fun: target function of the task.
:param dependson: list of variable or function, which one wants to track.
All these variables are later converted to string values, so if
one wants to pass the variable of user-defined class, that class
must provide meaningful `__str__` method.
"""
def __init__(self, fun, dependson=[]):
self.fun = fun
self.dependson = dependson
self.dependson.append(self.fun)
def add_dependson(self, dependson):
self.dependson += dependson
def stred_dependson(self):
def to_str(d):
# Callable object is converted to its source code as str.
if _is_callable(d):
return inspect.getsource(d)
else:
return str(d)
return map(to_str, self.dependson)
class CallObject(object):
"""Object representing one call of ``ExperimentContext.__call__()``."""
def __init__(self, **kw):
"""Initializes a call object.
``kw['source']`` and ``kw['target']`` are converted into list of
strings.
:param **kw: Arguments of ``ExperimentContext.__call__``.
"""
self.__dict__.update(kw)
for key in ['source', 'target', 'features']:
_let_element_to_be_list(self.__dict__, key)
for key in ['for_each', 'aggregate_by']:
if hasattr(self, key):
_let_element_to_be_list(self.__dict__, key)
self.__dict__['features'].append('experiment')
if 'parameters' not in self.__dict__:
self.parameters = [Parameter()]
"""List of parameters indicated by the taskgen call."""
else:
self.parameters = [Parameter(p) for p in self.parameters]
def __eq__(self, other):
return self.__dict__ == other.__dict__
class ExperimentGraph(object):
"""Bipartite graph consisting of meta node and call object node."""
def __init__(self):
self._edges = collections.defaultdict(set)
self._call_objects = []
def add_call_object(self, call_object):
"""Adds call object node, related meta nodes and edges.
:param call_object: Call object be added.
:type call_object: :py:class:`CallObject`
"""
index = len(self._call_objects)
self._call_objects.append(call_object)
for in_node in call_object.source:
self._edges[in_node].add(index)
for out_node in call_object.target:
self._edges[index].add(out_node)
def get_sorted_call_objects(self):
"""Runs topological sort on the experiment graph.
:return: List of call objects that topologically sorted.
:rtype: list of :py:class:`CallObject`
"""
nodes = self._collect_independent_nodes()
edges = copy.deepcopy(self._edges)
reverse_edges = collections.defaultdict(set)
for node |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.