code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import os
import sys
import warnings
import glob
import cv2
import numpy as np
from io_utils import PostprocessUtils
from io_utils import LabelFilter
class AnnotationWriter():
def __init__(self, cfg):
self.base_path = cfg.BASE_PATH
self.data_def = cfg.DATA_DEF[0]
self.batch_glob = cfg.BATCH_GLOB
self.sensors = cfg.SENSORS
self.channel_def = self._generate_channel_def()
self.out_path = os.path.join(self.base_path, cfg.OUT_FILE)
self.background = cfg.BACKGROUND
self.filters = cfg.FILTER[0]
self.data = {}
self.data_size = None
self.n_remaining_labels = 0
self.n_filtered_labels = 0
def run(self):
self._gather_sample_info()
print("Write Data ...")
self._write_data()
print(f"In total {self.data_size} samples processed. {self.n_remaining_labels} Labels remain while {self.n_filtered_labels} labels are filtered")
def _generate_channel_def(self):
channel_def = {}
for s in self.sensors:
_def = {**self.data_def["common"], **self.data_def[s]}
channel_def[s] = _def
return channel_def
def _gather_sample_info(self):
""" gathers all filenames of rendered images together """
print("searching for relevant files...")
b_dir = os.path.join(self.base_path, self.batch_glob)
batches = glob.glob(b_dir)
for sensor, s_def in self.channel_def.items():
sen_dict = {}
for channel, d in s_def.items():
ch_files = []
for b_path in batches:
#define path
g_str = d['glob']
in_dir = os.path.join(b_path, sensor, g_str) # assumption of path structure: base_path/[sensor]/[channel_glob]
_files = glob.glob(in_dir)
assert len(_files) != 0, "no files found here: " + in_dir
ch_files.extend(_files)
if not self.data_size:
self.data_size = len(ch_files)
else:
assert len(ch_files) == self.data_size, "different number of samples for: " + g_str
ch_files.sort() # ensure same ordering for all modalities
sen_dict[channel] = ch_files
self.data[sensor] = sen_dict
print(f"For sensor {sensor}, {self.data_size} data samples with following modalities found: {s_def.keys()}")
return
def _filter_data(self, data, label_key="inst_label"):
"applies label filter on data"
labels_orig = data[label_key]
# for statistical reasons
n_fil = 0
n_orig = np.unique(labels_orig).shape[0]
for _l in np.unique(labels_orig):
if _l != self.background:
binary_mask = np.where(labels_orig == _l, 1, 0).astype(np.bool)
for _method_key, filter_cfg in self.filters.items():
filter_method = eval("LabelFilter."+_method_key)
is_filtered = filter_method(binary_mask, filter_cfg, data=data)
if is_filtered:
# if one method filters current binary mask, no need for further filters
labels_orig[binary_mask] = self.background
n_fil += 1
break
n_rem = np.unique(data[label_key]).shape[0]
assert n_orig == n_rem + n_fil
return data, n_rem, n_fil
def _write_data(self):
""" writes dictionary of data channels into file
"""
raise NotImplementedError("Please Implement this method")
return
def _load_image(self, image_path):
"""Load one image
Args:
image_path: path to image
width: desired width of returned image
height: desired heigt of returned image
chanels:
Returns:
image (np.array)
"""
image = cv2.imread(image_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)
return image
| [
"numpy.unique",
"numpy.where",
"os.path.join",
"cv2.imread",
"glob.glob"
] | [((449, 491), 'os.path.join', 'os.path.join', (['self.base_path', 'cfg.OUT_FILE'], {}), '(self.base_path, cfg.OUT_FILE)\n', (461, 491), False, 'import os\n'), ((1370, 1415), 'os.path.join', 'os.path.join', (['self.base_path', 'self.batch_glob'], {}), '(self.base_path, self.batch_glob)\n', (1382, 1415), False, 'import os\n'), ((1434, 1450), 'glob.glob', 'glob.glob', (['b_dir'], {}), '(b_dir)\n', (1443, 1450), False, 'import glob\n'), ((2812, 2834), 'numpy.unique', 'np.unique', (['labels_orig'], {}), '(labels_orig)\n', (2821, 2834), True, 'import numpy as np\n'), ((4087, 4152), 'cv2.imread', 'cv2.imread', (['image_path', '(cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)'], {}), '(image_path, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)\n', (4097, 4152), False, 'import cv2\n'), ((2761, 2783), 'numpy.unique', 'np.unique', (['labels_orig'], {}), '(labels_orig)\n', (2770, 2783), True, 'import numpy as np\n'), ((3460, 3486), 'numpy.unique', 'np.unique', (['data[label_key]'], {}), '(data[label_key])\n', (3469, 3486), True, 'import numpy as np\n'), ((1763, 1798), 'os.path.join', 'os.path.join', (['b_path', 'sensor', 'g_str'], {}), '(b_path, sensor, g_str)\n', (1775, 1798), False, 'import os\n'), ((1895, 1912), 'glob.glob', 'glob.glob', (['in_dir'], {}), '(in_dir)\n', (1904, 1912), False, 'import glob\n'), ((2904, 2937), 'numpy.where', 'np.where', (['(labels_orig == _l)', '(1)', '(0)'], {}), '(labels_orig == _l, 1, 0)\n', (2912, 2937), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import argparse
import cv2
import time
import numpy as np
BLUE = 'blue'
GREEN = 'green'
RED = 'red'
ORANGE = 'orange'
YELLOW = 'yellow'
BGR = 'bgr'
HSV = 'hsv'
HSV_MIN = 'hsv_min'
HSV_MAX = 'hsv_max'
COLOUR_MAP = {
BLUE : {
HSV_MIN : (110, 50, 50),
HSV_MAX : (130, 255, 255),
BGR : (255, 0, 0),
},
GREEN : {
HSV_MIN : (42, 62, 63),
HSV_MAX : (92, 255, 235),
BGR : (0, 255, 0),
},
RED : {
HSV_MIN : (0, 131, 126),
HSV_MAX : (179, 255, 255),
BGR : (0, 0, 255),
},
ORANGE : {
HSV_MIN : (0, 150, 210),
HSV_MAX : (44, 291, 286),
BGR : (0, 165, 255),
},
YELLOW : {
HSV_MIN : (10, 100, 100),
HSV_MAX : (45, 255, 255),
BGR : (0, 255, 255),
}
}
# Minimum area for detection
MIN_AREA = 50
DEF_WIDTH = 160
DEF_HEIGHT = 120
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose",
action="store_true", default=False,
help="Increase output verbosity")
parser.add_argument("-f", "--flip",
action="store_true", default=False,
help="Flip video to mirror the view")
parser.add_argument("--width",
default=DEF_WIDTH,
help="Video width")
parser.add_argument("--height",
default=DEF_HEIGHT,
help="Video height")
parser.add_argument("-c", "--colour",
default=GREEN,
help="The colour to track")
return parser.parse_args()
def main():
args = parse_args()
colour = args.colour
range_min = COLOUR_MAP[colour][HSV_MIN]
range_max = COLOUR_MAP[colour][HSV_MAX]
dot_colour = COLOUR_MAP[colour][BGR]
cv2.namedWindow("Input")
cv2.namedWindow("HSV")
cv2.namedWindow("Mask")
cv2.namedWindow("Erosion")
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, args.width)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, args.height)
while True:
grabbed, frame = capture.read()
if not grabbed or frame is None:
continue
if args.flip:
frame = np.fliplr(frame).copy()
img_HSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
img_mask = cv2.inRange(img_HSV, range_min, range_max)
img_erode = cv2.erode(img_mask, None, iterations=3)
moments = cv2.moments(img_erode, True)
if moments['m00'] >= MIN_AREA:
x = moments['m10'] / moments['m00']
y = moments['m01'] / moments['m00']
print(x, ", ", y)
cv2.circle(frame, (int(x), int(y)), 5, dot_colour, -1)
cv2.imshow("Input",frame)
cv2.imshow("HSV", img_HSV)
cv2.imshow("Mask", img_mask)
cv2.imshow("Erosion", img_erode)
if cv2.waitKey(10) == 27:
break
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"cv2.inRange",
"cv2.erode",
"numpy.fliplr",
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"cv2.moments",
"cv2.waitKey",
"cv2.namedWindow"
] | [((935, 960), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (958, 960), False, 'import argparse\n'), ((1733, 1757), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Input"""'], {}), "('Input')\n", (1748, 1757), False, 'import cv2\n'), ((1762, 1784), 'cv2.namedWindow', 'cv2.namedWindow', (['"""HSV"""'], {}), "('HSV')\n", (1777, 1784), False, 'import cv2\n'), ((1789, 1812), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Mask"""'], {}), "('Mask')\n", (1804, 1812), False, 'import cv2\n'), ((1817, 1843), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Erosion"""'], {}), "('Erosion')\n", (1832, 1843), False, 'import cv2\n'), ((1859, 1878), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1875, 1878), False, 'import cv2\n'), ((2837, 2860), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2858, 2860), False, 'import cv2\n'), ((2192, 2230), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2HSV'], {}), '(frame, cv2.COLOR_BGR2HSV)\n', (2204, 2230), False, 'import cv2\n'), ((2250, 2292), 'cv2.inRange', 'cv2.inRange', (['img_HSV', 'range_min', 'range_max'], {}), '(img_HSV, range_min, range_max)\n', (2261, 2292), False, 'import cv2\n'), ((2313, 2352), 'cv2.erode', 'cv2.erode', (['img_mask', 'None'], {'iterations': '(3)'}), '(img_mask, None, iterations=3)\n', (2322, 2352), False, 'import cv2\n'), ((2371, 2399), 'cv2.moments', 'cv2.moments', (['img_erode', '(True)'], {}), '(img_erode, True)\n', (2382, 2399), False, 'import cv2\n'), ((2641, 2667), 'cv2.imshow', 'cv2.imshow', (['"""Input"""', 'frame'], {}), "('Input', frame)\n", (2651, 2667), False, 'import cv2\n'), ((2675, 2701), 'cv2.imshow', 'cv2.imshow', (['"""HSV"""', 'img_HSV'], {}), "('HSV', img_HSV)\n", (2685, 2701), False, 'import cv2\n'), ((2710, 2738), 'cv2.imshow', 'cv2.imshow', (['"""Mask"""', 'img_mask'], {}), "('Mask', img_mask)\n", (2720, 2738), False, 'import cv2\n'), ((2747, 2779), 'cv2.imshow', 'cv2.imshow', (['"""Erosion"""', 'img_erode'], {}), "('Erosion', img_erode)\n", (2757, 2779), False, 'import cv2\n'), ((2792, 2807), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (2803, 2807), False, 'import cv2\n'), ((2150, 2166), 'numpy.fliplr', 'np.fliplr', (['frame'], {}), '(frame)\n', (2159, 2166), True, 'import numpy as np\n')] |
__copyright__ = """
Copyright (C) 2020 <NAME>
Copyright (C) 2020 <NAME>
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from scipy.stats import poisson
import numpy as np
__doc__ = """
.. currentmodule:: pydemic
.. autoclass:: Simulation
.. autoclass:: SimulationState
.. autoclass:: StateLogger
"""
class SimulationState:
"""
Manages the state for :class:`Simulation`'s.
User-specified compartments are accessed as attributes, e.g.::
>>> state = SimulationState(0., {'a': np.array([1., 0.])}, {})
>>> state.a
array([1., 0.])
>>> state.t
0.
Note that :class:`Simulation` initializes state with an extra axis
relative to the input data, corresponding to the number of requested
stochastic samples (see :meth:`Simulation.__call__`).
Any user-implemented axes occupy all but the first axis of the state arrays.
.. automethod:: __init__
.. automethod:: sum
"""
def __init__(self, t, compartments, hidden_compartments, passive_compartments):
"""
:arg t: The current time.
:arg compartments: A :class:`dict` of current values
(as :class:`numpy.ndarray`'s) of all canonical compartments (the keys).
:arg hidden_compartments: A :class:`dict` of current values
(as :class:`numpy.ndarray`'s) of all compartments (the keys) not present
in ``compartments`` (i.e., those used to implement
and :class:`ErlangProcess`.)
:arg passive_compartments: A :class:`tuple` of compartment keys which are
computed for :class:`PassiveReaction`'s
(i.e., those which do not count toward the total population).
"""
self.t = t
self.y = {**compartments, **hidden_compartments}
self.compartments = list(compartments.keys())
self.hidden_compartments = list(hidden_compartments.keys())
self.passive_compartments = passive_compartments
self.sum_compartments = {}
for item in self.compartments:
self.sum_compartments[item] = [item]
for full_key in self.hidden_compartments:
if item == full_key.split(':')[0]:
self.sum_compartments[item].append(full_key)
def __getattr__(self, item):
return sum(self.y[key] for key in self.sum_compartments[item])
def sum(self):
"""
:returns: The total population across all summed compartments.
"""
return sum(val.sum() for key, val in self.y.items()
if key not in self.passive_compartments)
class StateLogger:
"""
Used to log simulation results returned by
:meth:`Simulation.__call__`.
.. attribute:: t
A :class:`numpy.ndarray` of output times.
.. attribute:: y
A :class:`dict` whose values are :class:`numpy.ndarray`'s of the
timeseries for each key (each of :attr:`Simulation.compartments`).
The time axis is the first axis of the :class:`numpy.ndarray`'s.
"""
def __init__(self, chunk_length=1000):
self.chunk_length = chunk_length
self.t = np.zeros(shape=(self.chunk_length,))
self.slice = 0
self.quantile_data = None
def initialize_with_state(self, state):
self.t[0] = state.t
self.compartments = state.compartments.copy()
self.y = {}
for key in state.compartments:
val = state.y[key]
ary = np.zeros(shape=(self.chunk_length,)+val.shape)
ary[0] = val
self.y[key] = ary
self.slice = 1
def __call__(self, state):
if self.slice == self.t.shape[0]:
self.add_chunk()
self.t[self.slice] = state.t
for key in state.compartments:
self.y[key][self.slice] = state.__getattr__(key)
self.slice += 1
def add_chunk(self):
self.t = np.concatenate([self.t, np.zeros(shape=(self.chunk_length,))])
for key, val in self.y.items():
shape = (self.chunk_length,)+val.shape[1:]
self.y[key] = np.concatenate([val, np.zeros(shape=shape)])
def cleanup(self, flatten_first_axis_if_unit=True):
self.trim(flatten_first_axis_if_unit=flatten_first_axis_if_unit)
def trim(self, flatten_first_axis_if_unit=True):
self.t = self.t[:self.slice]
for key in self.y.keys():
if self.y[key].ndim > 1:
if flatten_first_axis_if_unit and self.y[key].shape[1] == 1:
self.y[key] = self.y[key][:self.slice, 0, ...]
else:
self.y[key] = self.y[key][:self.slice, ...]
else:
self.y[key] = self.y[key][:self.slice]
def __repr__(self):
text = "{0:s} with\n".format(str(type(self)))
text += " - t from {0:g} to {1:g}\n".format(self.t[0], self.t[-1])
for compartment in self.compartments:
text += " - {0:s} {1:s}\n".format(compartment,
str(self.y[compartment].shape))
return text[:-1]
_default_quantiles = (0.0455, 0.3173, 0.5, 0.6827, 0.9545)
class QuantileLogger:
"""
Used to log simulation results returned by
:meth:`Simulation.__call__`.
.. attribute:: t
A :class:`numpy.ndarray` of output times.
.. attribute:: y
A :class:`dict` whose values are :class:`numpy.ndarray`'s of the
timeseries for each key (each of :attr:`Simulation.compartments`).
The time axis is the first axis of the :class:`numpy.ndarray`'s.
"""
def __init__(self, chunk_length=1000, quantiles=_default_quantiles):
self.quantiles = quantiles
self.chunk_length = chunk_length
self.t = np.zeros(shape=(self.chunk_length,))
self.slice = 0
def initialize_with_state(self, state):
self.y_samples = {}
self.t[0] = state.t
self.compartments = state.compartments.copy()
for key in self.compartments:
val = state.y[key]
ary = np.zeros(shape=(self.chunk_length,)+val.shape)
ary[0] = val
self.y_samples[key] = ary
def __call__(self, state):
self.slice += 1
if self.slice == self.t.shape[0]:
self.add_chunk()
self.t[self.slice] = state.t
for key in self.compartments:
self.y_samples[key][self.slice] = state.__getattr__(key)
def cleanup(self, flatten_first_axis_if_unit=True):
self.trim(flatten_first_axis_if_unit=flatten_first_axis_if_unit)
self.quantile_data = {}
for key in self.y_samples:
# FIXME: this will not work for Gillespie direct
self.quantile_data[key] = np.array(
[np.quantile(self.y_samples[key], quantile, axis=1)
for quantile in self.quantiles]
)
def add_chunk(self):
self.t = np.concatenate([self.t, np.zeros(shape=(self.chunk_length,))])
for key, val in self.y_samples.items():
shape = (self.chunk_length,)+val.shape[1:]
self.y_samples[key] = np.concatenate([val, np.zeros(shape=shape)])
def trim(self, flatten_first_axis_if_unit=True):
self.t = self.t[:self.slice]
for key in self.y_samples.keys():
if flatten_first_axis_if_unit and self.y_samples[key].shape[1] == 1:
self.y_samples[key] = self.y_samples[key][:self.slice, 0, ...]
else:
self.y_samples[key] = self.y_samples[key][:self.slice, ...]
class Simulation:
"""
Main driver for compartmental model simulations.
.. automethod:: __init__
.. automethod:: __call__
.. attribute:: compartments
The compartment names comprising the simulation state, inferred as the set of
all :attr:`Reaction.lhs`'s and :attr:`Reaction.rhs`'s from the input list
of :class:`Reaction`'s.
"""
def __init__(self, reactions):
"""
:arg reactions: A :class:`list` of :class:`Reaction`'s
(or subclasses thereof) used to specify the dynamics of the
compartmental model.
"""
def flatten(items):
for i in items:
if isinstance(i, (list, tuple)):
for j in flatten(i):
yield j
else:
yield i
rhs_keys = []
lhs_keys = []
passive_compartments = []
for reaction in reactions:
from pydemic.reactions import PassiveReaction
if not isinstance(reaction, PassiveReaction):
lhs_keys.append(reaction.lhs)
rhs_keys.append(reaction.rhs)
else:
passive_compartments.extend([reaction.lhs, reaction.rhs])
lhs_keys = set(flatten(lhs_keys))
rhs_keys = set(flatten(rhs_keys))
self.compartments = list((lhs_keys | rhs_keys) - set([None]))
self.passive_compartments = list(set(passive_compartments) - set([None]))
self.evolved_compartments = self.compartments + self.passive_compartments
self._network = tuple(react for reaction in reactions
for react in reaction.get_reactions())
all_lhs = set(x.lhs for x in self._network) - set([None])
all_rhs = set(x.rhs for x in self._network) - set([None])
self.hidden_compartments = list((all_lhs | all_rhs) - set(self.compartments))
def print_network(self):
for reaction in self._network:
print(reaction)
def step_gillespie_direct(self, time, state, dt):
increments = {}
# FIXME: fix this for split reactions
for reaction in self._network:
reaction_rate = reaction.evaluator(time, state)
dY = reaction_rate
if (reaction.lhs, reaction.rhs) in increments:
increments[reaction.lhs, reaction.rhs] += dY
else:
increments[reaction.lhs, reaction.rhs] = dY
# WARNING: need to be sure we're pulling from the right
# reaction here! I might have solved an XY problem ...
reactions = list(increments.keys())
r1, r2 = np.random.rand(2)
flattened_array = np.hstack([increments[k].reshape(-1) for k in reactions])
cumulative_rates = np.cumsum(flattened_array)
if cumulative_rates[-1] == 0.:
return dt
dt = - np.log(r1) / cumulative_rates[-1]
r2 *= cumulative_rates[-1]
reaction_index = np.searchsorted(cumulative_rates, r2)
full_shape = (len(reactions),) + (increments[reactions[0]].shape)
full_index = np.unravel_index(reaction_index, full_shape)
# WARNING: It's also not entirely clear that this produces
# the right rate distributions for processes that have two
# different lhs <--> rhs reactions ...
lhs, rhs = reactions[full_index[0]] # pylint: disable=E1126
state.y[lhs][full_index[1:]] -= 1.
state.y[rhs][full_index[1:]] += 1.
# FIXME: fix this for split reactions
if state.y[lhs][full_index[1:]] < 0:
state.y[lhs][full_index[1:]] += 1.
state.y[rhs][full_index[1:]] -= 1.
return dt
def step(self, time, state, dt, stochastic_method=None):
increments = {}
for reaction in self._network:
from pydemic.reactions import PassiveReaction
if not isinstance(reaction, PassiveReaction): # FIXME
dY = np.empty_like(state.y[reaction.lhs])
dY[...] = dt * reaction.evaluator(time, state)
if stochastic_method == "tau_leap":
dY[...] = poisson.rvs(dY)
dY_max = state.y[reaction.lhs].copy()
for (_lhs, _rhs), incr in increments.items():
if reaction.lhs == _lhs:
dY_max -= incr
dY = np.minimum(dY_max, dY)
if (reaction.lhs, reaction.rhs) in increments:
increments[reaction.lhs, reaction.rhs] += dY
else:
increments[reaction.lhs, reaction.rhs] = dY
for (lhs, rhs), dY in increments.items():
state.y[lhs] -= dY
state.y[rhs] += dY
return dt
def initialize_full_state(self, time, y0, samples):
compartment_vals = {}
for key, ary in y0.items():
ary = np.array(ary)
shape = (samples,) + ary.shape
compartment_vals[key] = np.empty(shape, dtype='float64')
compartment_vals[key][...] = ary[None, ...]
hidden_compartment_vals = {}
template = compartment_vals[self.compartments[0]]
for key in self.hidden_compartments:
hidden_compartment_vals[key] = np.zeros_like(template)
state = SimulationState(time, compartment_vals, hidden_compartment_vals,
self.passive_compartments)
return state
def __call__(self, tspan, y0, dt, stochastic_method=None, samples=1, seed=None,
logger=None):
"""
:arg tspan: A :class:`tuple` specifying the initiala and final times.
:arg y0: A :class:`dict` with the initial values
(as :class:`numpy.ndarray`'s) for each of :attr:`compartments`.
:arg dt: The (initial) timestep to use.
:arg stochastic_method: A :class:`string` specifying whether to use
direct Gillespie stepping (`'direct'`) or :math:`\\tau`-leaing
(`'tau_leap'`).
Defaults to *None*, i.e., a deterministic evolution.
:arg samples: The number of stochastic samples to simulate simultaneously.
Defaults to ``1``.
:arg seed: The value with which to seed :mod:`numpy`'s random number.
Defaults to *None*, in which case no seed is passed.
:returns: A :class:`~pydemic.simulation.StateLogger`.
"""
if seed is not None:
np.random.seed(seed)
else:
np.random.seed()
start_time, end_time = tspan
state = self.initialize_full_state(start_time, y0, samples)
if logger is None:
result = StateLogger()
else:
result = logger
result.initialize_with_state(state)
time = start_time
while time < end_time:
if stochastic_method in [None, "tau_leap"]:
dt = self.step(time, state, dt, stochastic_method=stochastic_method)
elif stochastic_method in ["direct"]:
dt = self.step_gillespie_direct(time, state, dt)
time += dt
state.t = time
result(state)
result.cleanup()
return result
def step_deterministic(self, t, y):
dy = np.zeros_like(y)
state = self.array_to_state(t, y)
dy_state = self.array_to_state(t, dy)
for reaction in self._network:
rate = reaction.evaluator(t, state)
if type(reaction.rhs) == tuple:
for rhs in reaction.rhs:
dy_state.y[rhs] += rate
else:
dy_state.y[reaction.rhs] += rate
if reaction.lhs is not None:
dy_state.y[reaction.lhs] -= rate
return self.state_to_array(dy_state)
def array_to_state(self, time, array):
n_evolved = len(self.evolved_compartments)
array = array.reshape(n_evolved, *self.compartment_shape)
y = {comp: array[i] for i, comp in enumerate(self.evolved_compartments)}
return SimulationState(time, y, {}, self.passive_compartments)
def state_to_array(self, state):
array = np.empty((len(self.evolved_compartments),)+self.compartment_shape)
for i, comp in enumerate(self.evolved_compartments):
array[i] = state.y[comp]
return array.reshape(-1)
def solve_deterministic(self, t_span, y0, rtol=1e-6):
"""
:arg tspan: A :class:`tuple` specifying the initiala and final times.
:arg y0: A :class:`dict` with the initial values
(as :class:`numpy.ndarray`'s) for each of :attr:`compartments`.
"""
template = y0[self.compartments[0]]
self.compartment_shape = template.shape
state = SimulationState(t_span[0], y0, {}, self.passive_compartments)
y0_array = self.state_to_array(state)
from scipy.integrate import solve_ivp
result = solve_ivp(self.step_deterministic, t_span, y0_array,
first_step=.1, dense_output=True, rtol=rtol, atol=1e-20,
method='DOP853')
return result
def dense_to_logger(self, solve_ivp_result, times):
lower = (1 - 1e-10) * solve_ivp_result.t[0]
upper = (1 + 1e-10) * solve_ivp_result.t[-1]
all_within_t = all(lower <= t <= upper for t in times)
if not all_within_t:
raise ValueError(
'Extrapolation outside of simulation timespan not allowed.'
)
logger = StateLogger()
shape = (len(self.evolved_compartments),)+self.compartment_shape
def get_state_at_t(t):
array = solve_ivp_result.sol(t).reshape(*shape)
comps = {comp: array[i]
for i, comp in enumerate(self.evolved_compartments)}
return SimulationState(t, comps, {}, self.passive_compartments)
logger.initialize_with_state(get_state_at_t(times[0]))
for t in times[1:]:
logger(get_state_at_t(t))
logger.cleanup()
return logger
| [
"numpy.random.rand",
"numpy.minimum",
"numpy.searchsorted",
"numpy.log",
"scipy.integrate.solve_ivp",
"scipy.stats.poisson.rvs",
"numpy.array",
"numpy.zeros",
"numpy.quantile",
"numpy.empty",
"numpy.unravel_index",
"numpy.random.seed",
"numpy.empty_like",
"numpy.cumsum",
"numpy.zeros_lik... | [((4129, 4165), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.chunk_length,)'}), '(shape=(self.chunk_length,))\n', (4137, 4165), True, 'import numpy as np\n'), ((6745, 6781), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.chunk_length,)'}), '(shape=(self.chunk_length,))\n', (6753, 6781), True, 'import numpy as np\n'), ((11205, 11222), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (11219, 11222), True, 'import numpy as np\n'), ((11334, 11360), 'numpy.cumsum', 'np.cumsum', (['flattened_array'], {}), '(flattened_array)\n', (11343, 11360), True, 'import numpy as np\n'), ((11533, 11570), 'numpy.searchsorted', 'np.searchsorted', (['cumulative_rates', 'r2'], {}), '(cumulative_rates, r2)\n', (11548, 11570), True, 'import numpy as np\n'), ((11666, 11710), 'numpy.unravel_index', 'np.unravel_index', (['reaction_index', 'full_shape'], {}), '(reaction_index, full_shape)\n', (11682, 11710), True, 'import numpy as np\n'), ((15832, 15848), 'numpy.zeros_like', 'np.zeros_like', (['y'], {}), '(y)\n', (15845, 15848), True, 'import numpy as np\n'), ((17500, 17631), 'scipy.integrate.solve_ivp', 'solve_ivp', (['self.step_deterministic', 't_span', 'y0_array'], {'first_step': '(0.1)', 'dense_output': '(True)', 'rtol': 'rtol', 'atol': '(1e-20)', 'method': '"""DOP853"""'}), "(self.step_deterministic, t_span, y0_array, first_step=0.1,\n dense_output=True, rtol=rtol, atol=1e-20, method='DOP853')\n", (17509, 17631), False, 'from scipy.integrate import solve_ivp\n'), ((4458, 4506), 'numpy.zeros', 'np.zeros', ([], {'shape': '((self.chunk_length,) + val.shape)'}), '(shape=(self.chunk_length,) + val.shape)\n', (4466, 4506), True, 'import numpy as np\n'), ((7047, 7095), 'numpy.zeros', 'np.zeros', ([], {'shape': '((self.chunk_length,) + val.shape)'}), '(shape=(self.chunk_length,) + val.shape)\n', (7055, 7095), True, 'import numpy as np\n'), ((13457, 13470), 'numpy.array', 'np.array', (['ary'], {}), '(ary)\n', (13465, 13470), True, 'import numpy as np\n'), ((13550, 13582), 'numpy.empty', 'np.empty', (['shape'], {'dtype': '"""float64"""'}), "(shape, dtype='float64')\n", (13558, 13582), True, 'import numpy as np\n'), ((13823, 13846), 'numpy.zeros_like', 'np.zeros_like', (['template'], {}), '(template)\n', (13836, 13846), True, 'import numpy as np\n'), ((15019, 15039), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (15033, 15039), True, 'import numpy as np\n'), ((15066, 15082), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (15080, 15082), True, 'import numpy as np\n'), ((4915, 4951), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.chunk_length,)'}), '(shape=(self.chunk_length,))\n', (4923, 4951), True, 'import numpy as np\n'), ((7933, 7969), 'numpy.zeros', 'np.zeros', ([], {'shape': '(self.chunk_length,)'}), '(shape=(self.chunk_length,))\n', (7941, 7969), True, 'import numpy as np\n'), ((11439, 11449), 'numpy.log', 'np.log', (['r1'], {}), '(r1)\n', (11445, 11449), True, 'import numpy as np\n'), ((12526, 12562), 'numpy.empty_like', 'np.empty_like', (['state.y[reaction.lhs]'], {}), '(state.y[reaction.lhs])\n', (12539, 12562), True, 'import numpy as np\n'), ((12947, 12969), 'numpy.minimum', 'np.minimum', (['dY_max', 'dY'], {}), '(dY_max, dY)\n', (12957, 12969), True, 'import numpy as np\n'), ((5096, 5117), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape'}), '(shape=shape)\n', (5104, 5117), True, 'import numpy as np\n'), ((7752, 7802), 'numpy.quantile', 'np.quantile', (['self.y_samples[key]', 'quantile'], {'axis': '(1)'}), '(self.y_samples[key], quantile, axis=1)\n', (7763, 7802), True, 'import numpy as np\n'), ((8130, 8151), 'numpy.zeros', 'np.zeros', ([], {'shape': 'shape'}), '(shape=shape)\n', (8138, 8151), True, 'import numpy as np\n'), ((12709, 12724), 'scipy.stats.poisson.rvs', 'poisson.rvs', (['dY'], {}), '(dY)\n', (12720, 12724), False, 'from scipy.stats import poisson\n')] |
# Author: <NAME>, <<EMAIL>>
#
# License: BSD (3-clause)
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_raises, assert_true, assert_equal
from mne.utils import requires_version
from mne.decoding.search_light import SlidingEstimator, GeneralizingEstimator
from mne.decoding.transformer import Vectorizer
def make_data():
n_epochs, n_chan, n_time = 50, 32, 10
X = np.random.rand(n_epochs, n_chan, n_time)
y = np.arange(n_epochs) % 2
for ii in range(n_time):
coef = np.random.randn(n_chan)
X[y == 0, :, ii] += coef
X[y == 1, :, ii] -= coef
return X, y
@requires_version('sklearn', '0.17')
def test_search_light():
"""Test SlidingEstimator"""
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.metrics import roc_auc_score, make_scorer
from sklearn.ensemble import BaggingClassifier
X, y = make_data()
n_epochs, _, n_time = X.shape
# init
assert_raises(ValueError, SlidingEstimator, 'foo')
sl = SlidingEstimator(Ridge())
sl = SlidingEstimator(LogisticRegression())
# fit
assert_equal(sl.__repr__()[:18], '<SlidingEstimator(')
sl.fit(X, y)
assert_equal(sl.__repr__()[-28:], ', fitted with 10 estimators>')
assert_raises(ValueError, sl.fit, X[1:], y)
assert_raises(ValueError, sl.fit, X[:, :, 0], y)
sl.fit(X, y, sample_weight=np.ones_like(y))
# transforms
assert_raises(ValueError, sl.predict, X[:, :, :2])
y_pred = sl.predict(X)
assert_true(y_pred.dtype == int)
assert_array_equal(y_pred.shape, [n_epochs, n_time])
y_proba = sl.predict_proba(X)
assert_true(y_proba.dtype == float)
assert_array_equal(y_proba.shape, [n_epochs, n_time, 2])
# score
score = sl.score(X, y)
assert_array_equal(score.shape, [n_time])
assert_true(np.sum(np.abs(score)) != 0)
assert_true(score.dtype == float)
sl = SlidingEstimator(LogisticRegression())
assert_equal(sl.scoring, None)
# Scoring method
for scoring in ['foo', 999]:
sl = SlidingEstimator(LogisticRegression(), scoring=scoring)
sl.fit(X, y)
assert_raises((ValueError, TypeError), sl.score, X, y)
# Check sklearn's roc_auc fix: scikit-learn/scikit-learn#6874
# -- 3 class problem
sl = SlidingEstimator(LogisticRegression(random_state=0),
scoring='roc_auc')
y = np.arange(len(X)) % 3
sl.fit(X, y)
assert_raises(ValueError, sl.score, X, y)
# -- 2 class problem not in [0, 1]
y = np.arange(len(X)) % 2 + 1
sl.fit(X, y)
score = sl.score(X, y)
assert_array_equal(score, [roc_auc_score(y - 1, _y_pred - 1)
for _y_pred in sl.decision_function(X).T])
y = np.arange(len(X)) % 2
# Cannot pass a metric as a scoring parameter
sl1 = SlidingEstimator(LogisticRegression(), scoring=roc_auc_score)
sl1.fit(X, y)
assert_raises(ValueError, sl1.score, X, y)
# Now use string as scoring
sl1 = SlidingEstimator(LogisticRegression(), scoring='roc_auc')
sl1.fit(X, y)
rng = np.random.RandomState(0)
X = rng.randn(*X.shape) # randomize X to avoid AUCs in [0, 1]
score_sl = sl1.score(X, y)
assert_array_equal(score_sl.shape, [n_time])
assert_true(score_sl.dtype == float)
# Check that scoring was applied adequately
scoring = make_scorer(roc_auc_score, needs_threshold=True)
score_manual = [scoring(est, x, y) for est, x in zip(
sl1.estimators_, X.transpose(2, 0, 1))]
assert_array_equal(score_manual, score_sl)
# n_jobs
sl = SlidingEstimator(LogisticRegression(random_state=0), n_jobs=1,
scoring='roc_auc')
score_1job = sl.fit(X, y).score(X, y)
sl.n_jobs = 2
score_njobs = sl.fit(X, y).score(X, y)
assert_array_equal(score_1job, score_njobs)
sl.predict(X)
# n_jobs > n_estimators
sl.fit(X[..., [0]], y)
sl.predict(X[..., [0]])
# pipeline
class _LogRegTransformer(LogisticRegression):
# XXX needs transformer in pipeline to get first proba only
def transform(self, X):
return super(_LogRegTransformer, self).predict_proba(X)[..., 1]
pipe = make_pipeline(SlidingEstimator(_LogRegTransformer()),
LogisticRegression())
pipe.fit(X, y)
pipe.predict(X)
# n-dimensional feature space
X = np.random.rand(10, 3, 4, 2)
y = np.arange(10) % 2
y_preds = list()
for n_jobs in [1, 2]:
pipe = SlidingEstimator(
make_pipeline(Vectorizer(), LogisticRegression()), n_jobs=n_jobs)
y_preds.append(pipe.fit(X, y).predict(X))
features_shape = pipe.estimators_[0].steps[0][1].features_shape_
assert_array_equal(features_shape, [3, 4])
assert_array_equal(y_preds[0], y_preds[1])
# Bagging classifiers
X = np.random.rand(10, 3, 4)
for n_jobs in (1, 2):
pipe = SlidingEstimator(BaggingClassifier(None, 2), n_jobs=n_jobs)
pipe.fit(X, y)
pipe.score(X, y)
assert_true(isinstance(pipe.estimators_[0], BaggingClassifier))
@requires_version('sklearn', '0.17')
def test_generalization_light():
"""Test GeneralizingEstimator"""
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
X, y = make_data()
n_epochs, _, n_time = X.shape
# fit
gl = GeneralizingEstimator(LogisticRegression())
assert_equal(repr(gl)[:23], '<GeneralizingEstimator(')
gl.fit(X, y)
gl.fit(X, y, sample_weight=np.ones_like(y))
assert_equal(gl.__repr__()[-28:], ', fitted with 10 estimators>')
# transforms
y_pred = gl.predict(X)
assert_array_equal(y_pred.shape, [n_epochs, n_time, n_time])
assert_true(y_pred.dtype == int)
y_proba = gl.predict_proba(X)
assert_true(y_proba.dtype == float)
assert_array_equal(y_proba.shape, [n_epochs, n_time, n_time, 2])
# transform to different datasize
y_pred = gl.predict(X[:, :, :2])
assert_array_equal(y_pred.shape, [n_epochs, n_time, 2])
# score
score = gl.score(X[:, :, :3], y)
assert_array_equal(score.shape, [n_time, 3])
assert_true(np.sum(np.abs(score)) != 0)
assert_true(score.dtype == float)
gl = GeneralizingEstimator(LogisticRegression(), scoring='roc_auc')
gl.fit(X, y)
score = gl.score(X, y)
auc = roc_auc_score(y, gl.estimators_[0].predict_proba(X[..., 0])[..., 1])
assert_equal(score[0, 0], auc)
for scoring in ['foo', 999]:
gl = GeneralizingEstimator(LogisticRegression(), scoring=scoring)
gl.fit(X, y)
assert_raises((ValueError, TypeError), gl.score, X, y)
# Check sklearn's roc_auc fix: scikit-learn/scikit-learn#6874
# -- 3 class problem
gl = GeneralizingEstimator(LogisticRegression(), scoring='roc_auc')
y = np.arange(len(X)) % 3
gl.fit(X, y)
assert_raises(ValueError, gl.score, X, y)
# -- 2 class problem not in [0, 1]
y = np.arange(len(X)) % 2 + 1
gl.fit(X, y)
score = gl.score(X, y)
manual_score = [[roc_auc_score(y - 1, _y_pred) for _y_pred in _y_preds]
for _y_preds in gl.decision_function(X).transpose(1, 2, 0)]
assert_array_equal(score, manual_score)
# n_jobs
gl = GeneralizingEstimator(LogisticRegression(), n_jobs=2)
gl.fit(X, y)
y_pred = gl.predict(X)
assert_array_equal(y_pred.shape, [n_epochs, n_time, n_time])
score = gl.score(X, y)
assert_array_equal(score.shape, [n_time, n_time])
# n_jobs > n_estimators
gl.fit(X[..., [0]], y)
gl.predict(X[..., [0]])
# n-dimensional feature space
X = np.random.rand(10, 3, 4, 2)
y = np.arange(10) % 2
y_preds = list()
for n_jobs in [1, 2]:
pipe = GeneralizingEstimator(
make_pipeline(Vectorizer(), LogisticRegression()), n_jobs=n_jobs)
y_preds.append(pipe.fit(X, y).predict(X))
features_shape = pipe.estimators_[0].steps[0][1].features_shape_
assert_array_equal(features_shape, [3, 4])
assert_array_equal(y_preds[0], y_preds[1])
| [
"numpy.ones_like",
"numpy.abs",
"numpy.random.rand",
"numpy.arange",
"sklearn.linear_model.Ridge",
"sklearn.metrics.make_scorer",
"sklearn.linear_model.LogisticRegression",
"nose.tools.assert_raises",
"nose.tools.assert_true",
"sklearn.metrics.roc_auc_score",
"sklearn.ensemble.BaggingClassifier"... | [((646, 681), 'mne.utils.requires_version', 'requires_version', (['"""sklearn"""', '"""0.17"""'], {}), "('sklearn', '0.17')\n", (662, 681), False, 'from mne.utils import requires_version\n'), ((5184, 5219), 'mne.utils.requires_version', 'requires_version', (['"""sklearn"""', '"""0.17"""'], {}), "('sklearn', '0.17')\n", (5200, 5219), False, 'from mne.utils import requires_version\n'), ((420, 460), 'numpy.random.rand', 'np.random.rand', (['n_epochs', 'n_chan', 'n_time'], {}), '(n_epochs, n_chan, n_time)\n', (434, 460), True, 'import numpy as np\n'), ((1032, 1082), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'SlidingEstimator', '"""foo"""'], {}), "(ValueError, SlidingEstimator, 'foo')\n", (1045, 1082), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((1326, 1369), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'sl.fit', 'X[1:]', 'y'], {}), '(ValueError, sl.fit, X[1:], y)\n', (1339, 1369), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((1374, 1422), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'sl.fit', 'X[:, :, 0]', 'y'], {}), '(ValueError, sl.fit, X[:, :, 0], y)\n', (1387, 1422), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((1493, 1543), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'sl.predict', 'X[:, :, :2]'], {}), '(ValueError, sl.predict, X[:, :, :2])\n', (1506, 1543), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((1575, 1607), 'nose.tools.assert_true', 'assert_true', (['(y_pred.dtype == int)'], {}), '(y_pred.dtype == int)\n', (1586, 1607), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((1612, 1664), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['y_pred.shape', '[n_epochs, n_time]'], {}), '(y_pred.shape, [n_epochs, n_time])\n', (1630, 1664), False, 'from numpy.testing import assert_array_equal\n'), ((1703, 1738), 'nose.tools.assert_true', 'assert_true', (['(y_proba.dtype == float)'], {}), '(y_proba.dtype == float)\n', (1714, 1738), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((1743, 1799), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['y_proba.shape', '[n_epochs, n_time, 2]'], {}), '(y_proba.shape, [n_epochs, n_time, 2])\n', (1761, 1799), False, 'from numpy.testing import assert_array_equal\n'), ((1844, 1885), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['score.shape', '[n_time]'], {}), '(score.shape, [n_time])\n', (1862, 1885), False, 'from numpy.testing import assert_array_equal\n'), ((1934, 1967), 'nose.tools.assert_true', 'assert_true', (['(score.dtype == float)'], {}), '(score.dtype == float)\n', (1945, 1967), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((2021, 2051), 'nose.tools.assert_equal', 'assert_equal', (['sl.scoring', 'None'], {}), '(sl.scoring, None)\n', (2033, 2051), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((2510, 2551), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'sl.score', 'X', 'y'], {}), '(ValueError, sl.score, X, y)\n', (2523, 2551), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((2983, 3025), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'sl1.score', 'X', 'y'], {}), '(ValueError, sl1.score, X, y)\n', (2996, 3025), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((3155, 3179), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (3176, 3179), True, 'import numpy as np\n'), ((3282, 3326), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['score_sl.shape', '[n_time]'], {}), '(score_sl.shape, [n_time])\n', (3300, 3326), False, 'from numpy.testing import assert_array_equal\n'), ((3331, 3367), 'nose.tools.assert_true', 'assert_true', (['(score_sl.dtype == float)'], {}), '(score_sl.dtype == float)\n', (3342, 3367), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((3431, 3479), 'sklearn.metrics.make_scorer', 'make_scorer', (['roc_auc_score'], {'needs_threshold': '(True)'}), '(roc_auc_score, needs_threshold=True)\n', (3442, 3479), False, 'from sklearn.metrics import roc_auc_score, make_scorer\n'), ((3602, 3644), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['score_manual', 'score_sl'], {}), '(score_manual, score_sl)\n', (3620, 3644), False, 'from numpy.testing import assert_array_equal\n'), ((3883, 3926), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['score_1job', 'score_njobs'], {}), '(score_1job, score_njobs)\n', (3901, 3926), False, 'from numpy.testing import assert_array_equal\n'), ((4467, 4494), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(4)', '(2)'], {}), '(10, 3, 4, 2)\n', (4481, 4494), True, 'import numpy as np\n'), ((4857, 4899), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['y_preds[0]', 'y_preds[1]'], {}), '(y_preds[0], y_preds[1])\n', (4875, 4899), False, 'from numpy.testing import assert_array_equal\n'), ((4935, 4959), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(4)'], {}), '(10, 3, 4)\n', (4949, 4959), True, 'import numpy as np\n'), ((5803, 5863), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['y_pred.shape', '[n_epochs, n_time, n_time]'], {}), '(y_pred.shape, [n_epochs, n_time, n_time])\n', (5821, 5863), False, 'from numpy.testing import assert_array_equal\n'), ((5868, 5900), 'nose.tools.assert_true', 'assert_true', (['(y_pred.dtype == int)'], {}), '(y_pred.dtype == int)\n', (5879, 5900), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((5939, 5974), 'nose.tools.assert_true', 'assert_true', (['(y_proba.dtype == float)'], {}), '(y_proba.dtype == float)\n', (5950, 5974), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((5979, 6043), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['y_proba.shape', '[n_epochs, n_time, n_time, 2]'], {}), '(y_proba.shape, [n_epochs, n_time, n_time, 2])\n', (5997, 6043), False, 'from numpy.testing import assert_array_equal\n'), ((6124, 6179), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['y_pred.shape', '[n_epochs, n_time, 2]'], {}), '(y_pred.shape, [n_epochs, n_time, 2])\n', (6142, 6179), False, 'from numpy.testing import assert_array_equal\n'), ((6234, 6278), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['score.shape', '[n_time, 3]'], {}), '(score.shape, [n_time, 3])\n', (6252, 6278), False, 'from numpy.testing import assert_array_equal\n'), ((6327, 6360), 'nose.tools.assert_true', 'assert_true', (['(score.dtype == float)'], {}), '(score.dtype == float)\n', (6338, 6360), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((6561, 6591), 'nose.tools.assert_equal', 'assert_equal', (['score[0, 0]', 'auc'], {}), '(score[0, 0], auc)\n', (6573, 6591), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((6999, 7040), 'nose.tools.assert_raises', 'assert_raises', (['ValueError', 'gl.score', 'X', 'y'], {}), '(ValueError, gl.score, X, y)\n', (7012, 7040), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((7318, 7357), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['score', 'manual_score'], {}), '(score, manual_score)\n', (7336, 7357), False, 'from numpy.testing import assert_array_equal\n'), ((7483, 7543), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['y_pred.shape', '[n_epochs, n_time, n_time]'], {}), '(y_pred.shape, [n_epochs, n_time, n_time])\n', (7501, 7543), False, 'from numpy.testing import assert_array_equal\n'), ((7575, 7624), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['score.shape', '[n_time, n_time]'], {}), '(score.shape, [n_time, n_time])\n', (7593, 7624), False, 'from numpy.testing import assert_array_equal\n'), ((7752, 7779), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(4)', '(2)'], {}), '(10, 3, 4, 2)\n', (7766, 7779), True, 'import numpy as np\n'), ((8147, 8189), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['y_preds[0]', 'y_preds[1]'], {}), '(y_preds[0], y_preds[1])\n', (8165, 8189), False, 'from numpy.testing import assert_array_equal\n'), ((469, 488), 'numpy.arange', 'np.arange', (['n_epochs'], {}), '(n_epochs)\n', (478, 488), True, 'import numpy as np\n'), ((537, 560), 'numpy.random.randn', 'np.random.randn', (['n_chan'], {}), '(n_chan)\n', (552, 560), True, 'import numpy as np\n'), ((1109, 1116), 'sklearn.linear_model.Ridge', 'Ridge', ([], {}), '()\n', (1114, 1116), False, 'from sklearn.linear_model import Ridge, LogisticRegression\n'), ((1144, 1164), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1162, 1164), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1995, 2015), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2013, 2015), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2205, 2259), 'nose.tools.assert_raises', 'assert_raises', (['(ValueError, TypeError)', 'sl.score', 'X', 'y'], {}), '((ValueError, TypeError), sl.score, X, y)\n', (2218, 2259), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((2378, 2412), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)'}), '(random_state=0)\n', (2396, 2412), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2916, 2936), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2934, 2936), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3086, 3106), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (3104, 3106), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3685, 3719), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(0)'}), '(random_state=0)\n', (3703, 3719), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4363, 4383), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (4381, 4383), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4503, 4516), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (4512, 4516), True, 'import numpy as np\n'), ((4810, 4852), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['features_shape', '[3, 4]'], {}), '(features_shape, [3, 4])\n', (4828, 4852), False, 'from numpy.testing import assert_array_equal\n'), ((5538, 5558), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (5556, 5558), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6393, 6413), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6411, 6413), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6729, 6783), 'nose.tools.assert_raises', 'assert_raises', (['(ValueError, TypeError)', 'gl.score', 'X', 'y'], {}), '((ValueError, TypeError), gl.score, X, y)\n', (6742, 6783), False, 'from nose.tools import assert_raises, assert_true, assert_equal\n'), ((6907, 6927), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6925, 6927), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7403, 7423), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (7421, 7423), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7788, 7801), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (7797, 7801), True, 'import numpy as np\n'), ((8100, 8142), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['features_shape', '[3, 4]'], {}), '(features_shape, [3, 4])\n', (8118, 8142), False, 'from numpy.testing import assert_array_equal\n'), ((1454, 1469), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (1466, 1469), True, 'import numpy as np\n'), ((2137, 2157), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (2155, 2157), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2700, 2733), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['(y - 1)', '(_y_pred - 1)'], {}), '(y - 1, _y_pred - 1)\n', (2713, 2733), False, 'from sklearn.metrics import roc_auc_score\n'), ((5018, 5044), 'sklearn.ensemble.BaggingClassifier', 'BaggingClassifier', (['None', '(2)'], {}), '(None, 2)\n', (5035, 5044), False, 'from sklearn.ensemble import BaggingClassifier\n'), ((5667, 5682), 'numpy.ones_like', 'np.ones_like', (['y'], {}), '(y)\n', (5679, 5682), True, 'import numpy as np\n'), ((6661, 6681), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (6679, 6681), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7179, 7208), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['(y - 1)', '_y_pred'], {}), '(y - 1, _y_pred)\n', (7192, 7208), False, 'from sklearn.metrics import roc_auc_score\n'), ((1909, 1922), 'numpy.abs', 'np.abs', (['score'], {}), '(score)\n', (1915, 1922), True, 'import numpy as np\n'), ((4627, 4639), 'mne.decoding.transformer.Vectorizer', 'Vectorizer', ([], {}), '()\n', (4637, 4639), False, 'from mne.decoding.transformer import Vectorizer\n'), ((4641, 4661), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (4659, 4661), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6302, 6315), 'numpy.abs', 'np.abs', (['score'], {}), '(score)\n', (6308, 6315), True, 'import numpy as np\n'), ((7917, 7929), 'mne.decoding.transformer.Vectorizer', 'Vectorizer', ([], {}), '()\n', (7927, 7929), False, 'from mne.decoding.transformer import Vectorizer\n'), ((7931, 7951), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (7949, 7951), False, 'from sklearn.linear_model import LogisticRegression\n')] |
# This script is going to add subspace to each direction.
import os
import pickle as pkl
import argparse
import numpy as np
import math
import torch
import torch.nn as nn
import torch.optim as optim
from derivable_models.derivable_generator import get_derivable_generator
from utils.file_utils import check_transformer_experiments_directory, get_generator_info, prepare_test_z, \
create_subspace_distill_directory, restore_saved_checkpoints, get_sorted_subspace_prediction, thrC, post_proC, \
get_dir_lists
from utils.image_precossing import _tanh_to_sigmoid, resize_images, _sigmoid_to_tanh, post_process_image
import torchvision
def forward_generator(generator, z, layer):
f = generator([z], which_block=layer, pre_model=True)
x = generator([f], which_block=layer, post_model=True)
return x
def easy_forward(generator, z, layer, pre_model=False, post_model=False, batch_size=4):
b, c, w, h = z.shape
results = []
for i in range(0, b, batch_size):
z_i = z[i: min(i + batch_size, b)]
x_i = generator([z_i], which_block=layer, pre_model=pre_model, post_model=post_model).detach()
results.append(x_i)
return torch.cat(results, dim=0)
def orthogonalize(directions, eps=1e-16):
B, dim = directions.shape
for i in range(B - 1):
x1x2 = np.sum(directions[None, i] * directions[(i + 1):, :], axis=1, keepdims=True)
x1x1 = np.sum(directions[None, i] * directions[None, i], axis=1, keepdims=True)
a = x1x2 / (x1x1 + eps)
directions[(i + 1):] = directions[(i + 1):, :] - a * directions[None, i]
return directions
def main(args):
eps = 1e-16
dim_z = args.dim_z
os.makedirs(args.outputs, exist_ok=True)
out_dir, exp_name = check_transformer_experiments_directory(args, args.exp_id)
out_dir2, exp_name2 = create_subspace_distill_directory(out_dir, args, args.exp_id2)
print('Experiment name2: ', exp_name2)
print('Output directory: ', out_dir2)
generator = get_derivable_generator(args.gan_model, args.inversion_type, args)
generator = torch.nn.DataParallel(generator)
generator.cuda()
fmap_size, fmap_ch, image_size, image_ch = get_generator_info(args, generator, which_layer=args.s_layer)
# The layer to which the self-expressive layer attaches
layer = args.s_layer
# The batch size of training self-expressive layer.
batch_size = args.s_batch_size
test_zs = prepare_test_z(args)
iterations = args.total_iterations
D0 = restore_saved_checkpoints(os.path.join(out_dir, 'checkpoints'), args.restore_which_step).cuda()
print(D0.shape)
n_dirs = D0.shape[0]
# for iter in range(iterations):
# a_b_c+a:c+e
if args.which_dirs == 'all':
dir_lists = [x for x in range(n_dirs)]
else:
dir_lists = get_dir_lists(args.which_dirs)
for dir_i in dir_lists:
# create the self-expressive layer.
S0 = torch.ones([fmap_ch, fmap_ch], dtype=torch.float32).cuda() * args.subspace_init_eps
S0.requires_grad = True
if args.optim == 'Adam':
optimizer = optim.Adam(params=[S0], lr=args.lr)
elif args.optim == 'SGD':
optimizer = optim.SGD(params=[S0], lr=args.lr)
else:
raise NotImplemented('We don\'t support \'%s\' type of optimizer, please check it out. ' % args.optim)
out_dir3 = os.path.join(out_dir2, 'direction_%d' % dir_i)
os.makedirs(out_dir3, exist_ok=True)
d = D0[dir_i].view(1, dim_z, 1, 1)
print('Note: calculating Subspace for direction %d: ' % dir_i)
for iter in range(iterations):
z0 = torch.randn([batch_size, dim_z, 1, 1], dtype=torch.float32).cuda()
alpha = _sigmoid_to_tanh(torch.rand(size=(batch_size, 1, 1, 1)).cuda()) * args.t_scale
z = z0 + d * alpha
f = generator([z], which_block=layer, pre_model=True)
x = generator([f], which_block=layer, post_model=True)
f_reshape = f.transpose(0, 1).reshape((fmap_ch, -1))
S_k = S0 * (1.0 - torch.eye(n=fmap_ch, m=fmap_ch).cuda())
f_rec = torch.matmul(S_k, f_reshape).reshape((fmap_ch, batch_size, fmap_size, fmap_size)).transpose(0, 1)
x_rec = generator([f_rec], which_block=layer, post_model=True)
optimizer.zero_grad()
if args.sparse_type == 'L2':
loss_sparse = torch.mean(torch.pow(S_k, 2.0))
elif args.sparse_type == 'L1':
loss_sparse = torch.mean(torch.abs(S_k))
else:
raise NotImplemented('Type not implemented.')
loss_feature = torch.mean(torch.pow(f - f_rec, 2.0))
loss_data = torch.mean(torch.pow(x - x_rec, 2.0))
loss = args.wgt_f * loss_feature + args.wgt_x * loss_data + args.wgt_spa * loss_sparse
loss.backward()
optimizer.step()
if iter % args.report_value == 0:
procedure_remainder = 'direction (%d/%d) Iteration (%d/%d), ' % (dir_i, n_dirs, iter, iterations)
loss_remainder = 'loss=%.4f, loss_feature=%.4f, loss_data=%.4f, loss_sparse=%.4f.' % \
(float(loss.item()), float(loss_feature.item()), float(loss_data.item()),
float(loss_sparse.item()))
print(procedure_remainder + loss_remainder)
if (iter + 1) % args.report_image == 0:
n_samples = args.n_samples
# save reconstruction images.
x_show = resize_images(post_process_image(x).detach().cpu().numpy(), args.resize)
x_show = torch.from_numpy(x_show)
x_rec_show = resize_images(post_process_image(x_rec).detach().cpu().numpy(), args.resize)
x_rec_show = torch.from_numpy(x_rec_show)
x_show = torch.cat([x_show, x_rec_show], dim=0)
rec_path = os.path.join(out_dir3, 'reconstruction_imgs_iter_%d_dir_%d.png' % (iter, dir_i))
torchvision.utils.save_image(x_show, rec_path, nrow=x_rec_show.shape[0])
print('Save reconstruction images to \'%s\'. ' % rec_path)
# save reconstruction images.
# visualize subspace.
test_z = test_zs[np.random.choice(test_zs.shape[0], n_samples, replace=False)].view(n_samples, dim_z, 1, 1)
S0_abs = torch.relu(S0)
S0_val = S0_abs.detach().cpu().numpy()
S_val = thrC(S0_val.copy().T, args.alpha).T
predict, L_val = post_proC(S_val, args.n_subspaces, args.subspace_dim, args.power)
predict, p_sum = get_sorted_subspace_prediction(predict, args)
features = generator([test_z], which_block=layer, pre_model=True).detach()
n_interps = args.n_interps
for cls_i in range(1, args.n_subspaces + 1, 1):
exchanging_images = []
if p_sum[cls_i-1] > args.show_threshold:
for img_i in range(n_samples):
alpha = torch.linspace(-args.t_scale, args.t_scale, n_interps).view(n_interps, 1, 1, 1).cuda()
test_zi = test_z[None, img_i]
test_zi_m = test_zi + d * alpha
if args.same_density:
test_zi_norm = torch.sqrt(eps + torch.sum(test_zi * test_zi, dim=1, keepdim=True))
test_zi_m_norm = torch.sqrt(eps + torch.sum(test_zi_m * test_zi_m, dim=1, keepdim=True))
test_zi_m = test_zi_m / test_zi_m_norm * test_zi_norm
test_fi_m = easy_forward(generator, test_zi_m, layer, pre_model=True, post_model=False).detach()
test_fi_ms = [test_fi_m]
test_fi_m = test_fi_m.transpose(0, 1).cpu().numpy()
for img_j in range(n_samples):
feature_j = features[img_j].view(1, fmap_ch, fmap_size, fmap_size)
feature_j = torch.repeat_interleave(feature_j, repeats=n_interps, dim=0).transpose(0, 1).cpu().numpy()
feature_j[predict == cls_i] = test_fi_m[predict == cls_i]
test_fi_ms.append(torch.from_numpy(feature_j).transpose(0, 1).cuda())
test_fi_ms = torch.cat(test_fi_ms, dim=0).detach()
test_fi_ms = easy_forward(generator, test_fi_ms, layer, pre_model=False, post_model=True,
batch_size=4).detach()
test_fi_ms = resize_images(post_process_image(test_fi_ms).cpu().numpy(), args.resize)
exchanging_images.append(torch.from_numpy(test_fi_ms))
exchanging_images = torch.cat(exchanging_images, dim=0)
save_path = os.path.join(out_dir3, 'exchange_images_dir_%d_layer_%d_class_%d_iter_%d.png' %
(dir_i, layer, cls_i, iter))
torchvision.utils.save_image(exchanging_images, save_path, nrow=n_interps)
print('save image to %s. ' % out_dir3)
# the subspace mask of class_i, save it.
subspace_i = predict == cls_i
save_path = os.path.join(out_dir3, 'saved_subspace_dir_%d_layer_%d_class_%d_iter_%d.pkl' %
(dir_i, layer, cls_i, iter))
with open(save_path, 'wb') as file_out:
pkl.dump(subspace_i, file_out)
print('Save subspace images %s.' % save_path)
if __name__ == '__main__':
print('Working on applying transformer on unsupervised GAN discovery.')
parser = argparse.ArgumentParser(description='GAN Transformer discovery.')
parser.add_argument('-o', '--outputs', type=str, default='./TRAIN', help='Directory to output corresponding images or loggings.')
parser.add_argument('--exp_id', default='MaximumTraversingMask', type=str, help='experiment prefix for easy debugging. ')
# Parameters for Multi-Code GAN Inversion
parser.add_argument('--inversion_type', default='PGGAN-Layerwise',
help='Inversion type, "PGGAN-Multi-Z" for Multi-Code-GAN prior.')
# Generator Setting, Check models/model_settings for available GAN models
parser.add_argument('--gan_model', default='pggan_celebahq', help='The name of model used.', type=str)
parser.add_argument('--which_class', default=239, type=int, help='The class of BigGAN.')
parser.add_argument('--layer', default=3, type=int, help='which layer to plug transformer into.')
parser.add_argument('--dim_z', default=512, type=int, help='experiment prefix for easy debugging. ')
parser.add_argument('--report_value', default=10, type=int, help='The step of reporting value.')
parser.add_argument('--report_model', default=1000, type=int, help='The step of reporting value.')
parser.add_argument('--total_iterations', default=5000, type=int, help='The total number of iterations.')
parser.add_argument('--optim', default='Adam', type=str, help='The optimizer used.')
parser.add_argument('--lr', default=1e-4, type=float, help='The learning rate of the optimizer.')
parser.add_argument('--t_scale', default=10.0, type=float, help='The scale of scaling.')
parser.add_argument('--n_dirs', default=20, type=int, help='The number of directions.')
parser.add_argument('--batch_size', default=16, type=int, help='The batch size of the input')
# report images configuration.
parser.add_argument('--report_image', default=500, type=int, help='The step of reporting value.')
parser.add_argument('--n_interps', default=11, type=int, help='The number of interpolation of visualization. ')
parser.add_argument('--n_samples', default=6, type=int, help='The number of samples pf visualization. ')
parser.add_argument('--n_dir_per_sheet', default=10, type=int, help='The number of samples pf visualization. ')
parser.add_argument('--resize', default=128, type=int, help='The number of samples pf visualization. ')
parser.add_argument('--same_density', default=0, type=int, help='The number of samples pf visualization. ')
parser.add_argument('--reduce_mean', default=0, type=int, help='The number of samples pf visualization. ')
parser.add_argument('--wgt_pos', default=0.1, type=float, help='The learning rate of the optimizer.')
parser.add_argument('--wgt_neg', default=10.0, type=float, help='The learning rate of the optimizer.')
parser.add_argument('--wgt_orth', default=100.0, type=float, help='The learning rate of the optimizer.')
# configurations for subspace experiments.
parser.add_argument('--exp_id2', default='SubspaceDiscovery', type=str, help='The number of subspaces to apply. ')
parser.add_argument('--n_subspaces', default=6, type=int, help='The number of subspaces to apply. ')
parser.add_argument('--s_layer', default=5, type=int, help='The layer to which the self-expressive layer attach. ')
parser.add_argument('--s_batch_size', default=4, type=int, help='The batch size of the subspace learning. ')
parser.add_argument('--subspace_init_eps', default=1e-4, type=int, help='The batch size of the subspace learning. ')
parser.add_argument('--restore_which_step', default=-1, type=int, help='The step to restore. ')
parser.add_argument('--sparse_type', default='L2', type=str, help='The type of sparsity to use. ')
parser.add_argument('--wgt_f', default=1.0, type=float, help='The weights for feature space reconstruction loss. ')
parser.add_argument('--wgt_x', default=1.0, type=float, help='The weights for data space reconstruction loss')
parser.add_argument('--wgt_spa', default=0.1, type=float, help='The weights for sparsity term. ')
# configuration for spectral clustering.
parser.add_argument('--subspace_dim', type=int, default=12, help='The number of subspace dimension.')
parser.add_argument('--power', type=float, default=3.0, help='The power of the alpha.')
parser.add_argument('--alpha', type=float, default=0.2, help='The power of the alpha.')
parser.add_argument('--which_dirs', type=str, default='all', help='The power of the alpha.')
parser.add_argument('--show_threshold', type=int, default=40, help='The power of the alpha.')
args = parser.parse_args()
if args.n_dir_per_sheet > args.n_dirs:
args.n_dir_per_sheet = args.n_dirs
main(args)
| [
"utils.file_utils.get_dir_lists",
"torch.pow",
"torch.from_numpy",
"torch.sum",
"torch.repeat_interleave",
"torchvision.utils.save_image",
"utils.file_utils.post_proC",
"argparse.ArgumentParser",
"utils.file_utils.get_generator_info",
"torch.eye",
"torch.relu",
"utils.file_utils.prepare_test_z... | [((1172, 1197), 'torch.cat', 'torch.cat', (['results'], {'dim': '(0)'}), '(results, dim=0)\n', (1181, 1197), False, 'import torch\n'), ((1678, 1718), 'os.makedirs', 'os.makedirs', (['args.outputs'], {'exist_ok': '(True)'}), '(args.outputs, exist_ok=True)\n', (1689, 1718), False, 'import os\n'), ((1743, 1801), 'utils.file_utils.check_transformer_experiments_directory', 'check_transformer_experiments_directory', (['args', 'args.exp_id'], {}), '(args, args.exp_id)\n', (1782, 1801), False, 'from utils.file_utils import check_transformer_experiments_directory, get_generator_info, prepare_test_z, create_subspace_distill_directory, restore_saved_checkpoints, get_sorted_subspace_prediction, thrC, post_proC, get_dir_lists\n'), ((1828, 1890), 'utils.file_utils.create_subspace_distill_directory', 'create_subspace_distill_directory', (['out_dir', 'args', 'args.exp_id2'], {}), '(out_dir, args, args.exp_id2)\n', (1861, 1890), False, 'from utils.file_utils import check_transformer_experiments_directory, get_generator_info, prepare_test_z, create_subspace_distill_directory, restore_saved_checkpoints, get_sorted_subspace_prediction, thrC, post_proC, get_dir_lists\n'), ((1993, 2059), 'derivable_models.derivable_generator.get_derivable_generator', 'get_derivable_generator', (['args.gan_model', 'args.inversion_type', 'args'], {}), '(args.gan_model, args.inversion_type, args)\n', (2016, 2059), False, 'from derivable_models.derivable_generator import get_derivable_generator\n'), ((2076, 2108), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['generator'], {}), '(generator)\n', (2097, 2108), False, 'import torch\n'), ((2178, 2239), 'utils.file_utils.get_generator_info', 'get_generator_info', (['args', 'generator'], {'which_layer': 'args.s_layer'}), '(args, generator, which_layer=args.s_layer)\n', (2196, 2239), False, 'from utils.file_utils import check_transformer_experiments_directory, get_generator_info, prepare_test_z, create_subspace_distill_directory, restore_saved_checkpoints, get_sorted_subspace_prediction, thrC, post_proC, get_dir_lists\n'), ((2431, 2451), 'utils.file_utils.prepare_test_z', 'prepare_test_z', (['args'], {}), '(args)\n', (2445, 2451), False, 'from utils.file_utils import check_transformer_experiments_directory, get_generator_info, prepare_test_z, create_subspace_distill_directory, restore_saved_checkpoints, get_sorted_subspace_prediction, thrC, post_proC, get_dir_lists\n'), ((9982, 10047), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""GAN Transformer discovery."""'}), "(description='GAN Transformer discovery.')\n", (10005, 10047), False, 'import argparse\n'), ((1314, 1388), 'numpy.sum', 'np.sum', (['(directions[None, i] * directions[i + 1:, :])'], {'axis': '(1)', 'keepdims': '(True)'}), '(directions[None, i] * directions[i + 1:, :], axis=1, keepdims=True)\n', (1320, 1388), True, 'import numpy as np\n'), ((1406, 1478), 'numpy.sum', 'np.sum', (['(directions[None, i] * directions[None, i])'], {'axis': '(1)', 'keepdims': '(True)'}), '(directions[None, i] * directions[None, i], axis=1, keepdims=True)\n', (1412, 1478), True, 'import numpy as np\n'), ((2810, 2840), 'utils.file_utils.get_dir_lists', 'get_dir_lists', (['args.which_dirs'], {}), '(args.which_dirs)\n', (2823, 2840), False, 'from utils.file_utils import check_transformer_experiments_directory, get_generator_info, prepare_test_z, create_subspace_distill_directory, restore_saved_checkpoints, get_sorted_subspace_prediction, thrC, post_proC, get_dir_lists\n'), ((3379, 3425), 'os.path.join', 'os.path.join', (['out_dir2', "('direction_%d' % dir_i)"], {}), "(out_dir2, 'direction_%d' % dir_i)\n", (3391, 3425), False, 'import os\n'), ((3434, 3470), 'os.makedirs', 'os.makedirs', (['out_dir3'], {'exist_ok': '(True)'}), '(out_dir3, exist_ok=True)\n', (3445, 3470), False, 'import os\n'), ((3101, 3136), 'torch.optim.Adam', 'optim.Adam', ([], {'params': '[S0]', 'lr': 'args.lr'}), '(params=[S0], lr=args.lr)\n', (3111, 3136), True, 'import torch.optim as optim\n'), ((2527, 2563), 'os.path.join', 'os.path.join', (['out_dir', '"""checkpoints"""'], {}), "(out_dir, 'checkpoints')\n", (2539, 2563), False, 'import os\n'), ((3195, 3229), 'torch.optim.SGD', 'optim.SGD', ([], {'params': '[S0]', 'lr': 'args.lr'}), '(params=[S0], lr=args.lr)\n', (3204, 3229), True, 'import torch.optim as optim\n'), ((4658, 4683), 'torch.pow', 'torch.pow', (['(f - f_rec)', '(2.0)'], {}), '(f - f_rec, 2.0)\n', (4667, 4683), False, 'import torch\n'), ((4720, 4745), 'torch.pow', 'torch.pow', (['(x - x_rec)', '(2.0)'], {}), '(x - x_rec, 2.0)\n', (4729, 4745), False, 'import torch\n'), ((5661, 5685), 'torch.from_numpy', 'torch.from_numpy', (['x_show'], {}), '(x_show)\n', (5677, 5685), False, 'import torch\n'), ((5821, 5849), 'torch.from_numpy', 'torch.from_numpy', (['x_rec_show'], {}), '(x_rec_show)\n', (5837, 5849), False, 'import torch\n'), ((5875, 5913), 'torch.cat', 'torch.cat', (['[x_show, x_rec_show]'], {'dim': '(0)'}), '([x_show, x_rec_show], dim=0)\n', (5884, 5913), False, 'import torch\n'), ((5941, 6026), 'os.path.join', 'os.path.join', (['out_dir3', "('reconstruction_imgs_iter_%d_dir_%d.png' % (iter, dir_i))"], {}), "(out_dir3, 'reconstruction_imgs_iter_%d_dir_%d.png' % (iter, dir_i)\n )\n", (5953, 6026), False, 'import os\n'), ((6038, 6110), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['x_show', 'rec_path'], {'nrow': 'x_rec_show.shape[0]'}), '(x_show, rec_path, nrow=x_rec_show.shape[0])\n', (6066, 6110), False, 'import torchvision\n'), ((6421, 6435), 'torch.relu', 'torch.relu', (['S0'], {}), '(S0)\n', (6431, 6435), False, 'import torch\n'), ((6584, 6649), 'utils.file_utils.post_proC', 'post_proC', (['S_val', 'args.n_subspaces', 'args.subspace_dim', 'args.power'], {}), '(S_val, args.n_subspaces, args.subspace_dim, args.power)\n', (6593, 6649), False, 'from utils.file_utils import check_transformer_experiments_directory, get_generator_info, prepare_test_z, create_subspace_distill_directory, restore_saved_checkpoints, get_sorted_subspace_prediction, thrC, post_proC, get_dir_lists\n'), ((6684, 6729), 'utils.file_utils.get_sorted_subspace_prediction', 'get_sorted_subspace_prediction', (['predict', 'args'], {}), '(predict, args)\n', (6714, 6729), False, 'from utils.file_utils import check_transformer_experiments_directory, get_generator_info, prepare_test_z, create_subspace_distill_directory, restore_saved_checkpoints, get_sorted_subspace_prediction, thrC, post_proC, get_dir_lists\n'), ((2927, 2978), 'torch.ones', 'torch.ones', (['[fmap_ch, fmap_ch]'], {'dtype': 'torch.float32'}), '([fmap_ch, fmap_ch], dtype=torch.float32)\n', (2937, 2978), False, 'import torch\n'), ((3643, 3702), 'torch.randn', 'torch.randn', (['[batch_size, dim_z, 1, 1]'], {'dtype': 'torch.float32'}), '([batch_size, dim_z, 1, 1], dtype=torch.float32)\n', (3654, 3702), False, 'import torch\n'), ((4418, 4437), 'torch.pow', 'torch.pow', (['S_k', '(2.0)'], {}), '(S_k, 2.0)\n', (4427, 4437), False, 'import torch\n'), ((4523, 4537), 'torch.abs', 'torch.abs', (['S_k'], {}), '(S_k)\n', (4532, 4537), False, 'import torch\n'), ((8963, 8998), 'torch.cat', 'torch.cat', (['exchanging_images'], {'dim': '(0)'}), '(exchanging_images, dim=0)\n', (8972, 8998), False, 'import torch\n'), ((9035, 9152), 'os.path.join', 'os.path.join', (['out_dir3', "('exchange_images_dir_%d_layer_%d_class_%d_iter_%d.png' % (dir_i, layer,\n cls_i, iter))"], {}), "(out_dir3, \n 'exchange_images_dir_%d_layer_%d_class_%d_iter_%d.png' % (dir_i, layer,\n cls_i, iter))\n", (9047, 9152), False, 'import os\n'), ((9217, 9291), 'torchvision.utils.save_image', 'torchvision.utils.save_image', (['exchanging_images', 'save_path'], {'nrow': 'n_interps'}), '(exchanging_images, save_path, nrow=n_interps)\n', (9245, 9291), False, 'import torchvision\n'), ((9510, 9626), 'os.path.join', 'os.path.join', (['out_dir3', "('saved_subspace_dir_%d_layer_%d_class_%d_iter_%d.pkl' % (dir_i, layer,\n cls_i, iter))"], {}), "(out_dir3, \n 'saved_subspace_dir_%d_layer_%d_class_%d_iter_%d.pkl' % (dir_i, layer,\n cls_i, iter))\n", (9522, 9626), False, 'import os\n'), ((3747, 3785), 'torch.rand', 'torch.rand', ([], {'size': '(batch_size, 1, 1, 1)'}), '(size=(batch_size, 1, 1, 1))\n', (3757, 3785), False, 'import torch\n'), ((4068, 4099), 'torch.eye', 'torch.eye', ([], {'n': 'fmap_ch', 'm': 'fmap_ch'}), '(n=fmap_ch, m=fmap_ch)\n', (4077, 4099), False, 'import torch\n'), ((4128, 4156), 'torch.matmul', 'torch.matmul', (['S_k', 'f_reshape'], {}), '(S_k, f_reshape)\n', (4140, 4156), False, 'import torch\n'), ((6304, 6364), 'numpy.random.choice', 'np.random.choice', (['test_zs.shape[0]', 'n_samples'], {'replace': '(False)'}), '(test_zs.shape[0], n_samples, replace=False)\n', (6320, 6364), True, 'import numpy as np\n'), ((9759, 9789), 'pickle.dump', 'pkl.dump', (['subspace_i', 'file_out'], {}), '(subspace_i, file_out)\n', (9767, 9789), True, 'import pickle as pkl\n'), ((8888, 8916), 'torch.from_numpy', 'torch.from_numpy', (['test_fi_ms'], {}), '(test_fi_ms)\n', (8904, 8916), False, 'import torch\n'), ((8488, 8516), 'torch.cat', 'torch.cat', (['test_fi_ms'], {'dim': '(0)'}), '(test_fi_ms, dim=0)\n', (8497, 8516), False, 'import torch\n'), ((7445, 7494), 'torch.sum', 'torch.sum', (['(test_zi * test_zi)'], {'dim': '(1)', 'keepdim': '(True)'}), '(test_zi * test_zi, dim=1, keepdim=True)\n', (7454, 7494), False, 'import torch\n'), ((7562, 7615), 'torch.sum', 'torch.sum', (['(test_zi_m * test_zi_m)'], {'dim': '(1)', 'keepdim': '(True)'}), '(test_zi_m * test_zi_m, dim=1, keepdim=True)\n', (7571, 7615), False, 'import torch\n'), ((5577, 5598), 'utils.image_precossing.post_process_image', 'post_process_image', (['x'], {}), '(x)\n', (5595, 5598), False, 'from utils.image_precossing import _tanh_to_sigmoid, resize_images, _sigmoid_to_tanh, post_process_image\n'), ((5729, 5754), 'utils.image_precossing.post_process_image', 'post_process_image', (['x_rec'], {}), '(x_rec)\n', (5747, 5754), False, 'from utils.image_precossing import _tanh_to_sigmoid, resize_images, _sigmoid_to_tanh, post_process_image\n'), ((7125, 7179), 'torch.linspace', 'torch.linspace', (['(-args.t_scale)', 'args.t_scale', 'n_interps'], {}), '(-args.t_scale, args.t_scale, n_interps)\n', (7139, 7179), False, 'import torch\n'), ((8776, 8806), 'utils.image_precossing.post_process_image', 'post_process_image', (['test_fi_ms'], {}), '(test_fi_ms)\n', (8794, 8806), False, 'from utils.image_precossing import _tanh_to_sigmoid, resize_images, _sigmoid_to_tanh, post_process_image\n'), ((8395, 8422), 'torch.from_numpy', 'torch.from_numpy', (['feature_j'], {}), '(feature_j)\n', (8411, 8422), False, 'import torch\n'), ((8164, 8224), 'torch.repeat_interleave', 'torch.repeat_interleave', (['feature_j'], {'repeats': 'n_interps', 'dim': '(0)'}), '(feature_j, repeats=n_interps, dim=0)\n', (8187, 8224), False, 'import torch\n')] |
"""Analysis tools."""
import ast
import json
import os
from typing import Any, Dict, Optional, Union
import matplotlib.dates as dates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
def analyze() -> None:
"""Return info messages as dict, plot prize timeline and save both.
Notes
-----
For some cards, the grade data is of type `str` instead of `int` or `float`.
pd.DataFrames assign the data-types column-wise by using the most compatible
type. For example, the grade "Authentic" for only one card causes pandas
to transform the whole column with grades in {1.0, 1.5, ... 10.0} to be
stored as `str`. Therefore, all grades are converted to `str` for a unified
treatment. These strings are written like floats with one decimal place.
"""
# Read grades. Delimeter has to be semicolon only.
if not os.path.exists("input/input.csv"):
raise ValueError("'input.csv' not found")
df = pd.read_csv("input/input.csv", sep=";")
# Convert List-String in input column to List[Union[float,int]].
grades = df["grades (list)"].apply(ast.literal_eval).tolist()
# Iterate over cards in input DataFrame.
for row in range(len(df)):
# Get card_name from urls
card_name = _get_card_name(df.iloc[row, 0])
# Important: Convert col to `str` as described in the notes.
df_out = pd.read_csv(f"output/data/{card_name}.csv", parse_dates=["date"])
df_out["grade"] = df_out["grade"].astype(str)
# Iterate over grades per card.
for g in grades[row]:
# Parse grade as str due to reasons above.
g = str(float(g))
# Info on grades outside {1.0, 1.5, ... 10.0}.
msg_grades = _str_unusual_grades(df_out)
# Info on the share with this grade.
msg_grades_n = _str_n_grade(df_out, g)
# Drop rows with different grades.
df_filt = _filter_grade_data(df_out, g)
# Compute compund annual growth rate (CAGR).
msg_return = _str_comp_ann_g(df_filt)
# Store infos in dictionary and print.
card_dict: Dict[str, Optional[str]] = {
"ident": f"{card_name}-{g}",
"compound annual growth": msg_return,
"info grades number": msg_grades_n,
}
# Drop message if there are no unausual grades.
if msg_grades is None:
pass
else:
card_dict["info grades"] = msg_grades
# Print info.
for v in card_dict.values():
print(v)
# Save dictionary.
if not os.path.exists("output/nmbrs"):
os.makedirs("output/nmbrs")
with open(f"output/nmbrs/{card_name}-grade-{g}.json", "w") as fp:
json.dump(card_dict, fp)
# Plot and save prize trend.
_scatter_prize_time(df_filt, f"{card_name}-grade-{g}")
def _str_unusual_grades(df: pd.DataFrame) -> Union[str, None]:
"""Print the number of unusual grades."""
grades = np.arange(0, 10.5, 0.5).astype(float)
catch_grades = []
for item in df["grade"]:
try:
if float(item) not in grades:
catch_grades.append(item)
except ValueError:
catch_grades.append(item)
if catch_grades == []:
return None
else:
return (
f"– Over all grades, {len(catch_grades)} of {len(df)} cards do not receive"
f" standard grades. These grades are in {set(catch_grades)}"
)
def _get_card_name(card_url: str) -> str: # noqa: D102
c_name = card_url.split("-cards/")[1].split("/values")[0].replace("/", "-")
return c_name
def _str_n_grade(df: pd.DataFrame, grade: str) -> str:
"""Print the number of cards with grade `grade`."""
n_cards = len(df)
n_grade = len(df[(df["grade"]) == grade])
perc = round((n_grade / n_cards) * 100, 2)
return (
f"– The number of cards with grade {grade} is {n_grade} "
f"of {n_cards} cards. That is {perc}%."
)
def _filter_grade_data(df: pd.DataFrame, grade: str) -> pd.DataFrame:
"""Reduce df to date and price data for cards with grade `grade`."""
df = df[(df["grade"]) == grade]
df = df[["date", "prize"]]
return df
def _str_comp_ann_g(df: pd.DataFrame) -> str:
"""Print the average annual prize growth."""
if df.empty is True:
return "There is no prize data for this grade."
else:
df["year"] = pd.DatetimeIndex(df["date"]).year
df["avg_prize_in_year"] = df.groupby("year")["prize"].transform("mean")
# Create pd.DataFrame for annual average prizes.
years = df["year"].drop_duplicates()
prizes = df["avg_prize_in_year"].drop_duplicates()
avg_df = pd.DataFrame({"year": years, "prize": prizes})
# cagr = (1 + R) ** (1 / n) - 1
t_0 = min(avg_df["year"])
t_T = max(avg_df["year"])
p_0 = avg_df[t_0 == avg_df["year"]]["prize"].iloc[0]
p_T = avg_df[t_T == avg_df["year"]]["prize"].iloc[0]
R = p_T / p_0
n = t_T - t_0
if n == 0:
return (
"– The compound annual growth rate cannot be computed because "
"there is no data from multiple years available."
)
else:
cagr = R ** (1 / n) - 1
# Compute compound annual growth rate in percent.
cagr = round(cagr * 100, 2)
return (
f"– The compound annual growth rate from {min(years)} "
f"to {max(years)} is {cagr}%."
)
def _scatter_prize_time(df: pd.DataFrame, title: str) -> Any:
if df.empty is True:
print("No prize data, no plot.")
else:
x = dates.date2num(df["date"])
y = df["prize"]
fig, ax = plt.subplots(figsize=(12, 9))
ax.scatter(df["date"], y, alpha=0.66)
# Draw red trend line.
fit = np.polyfit(x, y, deg=20)
p = np.poly1d(fit)
ax.plot(x, p(x), "r--")
# Rotate date labels.
fig.autofmt_xdate()
ax.grid(linestyle="-", color="black", alpha=0.25)
ax.tick_params(length=6, width=2, labelsize=20)
ax.set_title(title, fontsize=22)
ax.set_xlabel(xlabel="Date", size=26, labelpad=14)
ax.set_ylabel(ylabel="Prize in $", size=26, labelpad=14)
fig.tight_layout()
if not os.path.exists("output/img"):
os.makedirs("output/img")
plt.savefig(f"output/img/{title}.png")
plt.show()
return fig
| [
"matplotlib.dates.date2num",
"os.path.exists",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"numpy.polyfit",
"pandas.DatetimeIndex",
"os.makedirs",
"json.dump",
"numpy.poly1d",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((966, 1005), 'pandas.read_csv', 'pd.read_csv', (['"""input/input.csv"""'], {'sep': '""";"""'}), "('input/input.csv', sep=';')\n", (977, 1005), True, 'import pandas as pd\n'), ((872, 905), 'os.path.exists', 'os.path.exists', (['"""input/input.csv"""'], {}), "('input/input.csv')\n", (886, 905), False, 'import os\n'), ((1392, 1457), 'pandas.read_csv', 'pd.read_csv', (['f"""output/data/{card_name}.csv"""'], {'parse_dates': "['date']"}), "(f'output/data/{card_name}.csv', parse_dates=['date'])\n", (1403, 1457), True, 'import pandas as pd\n'), ((4864, 4910), 'pandas.DataFrame', 'pd.DataFrame', (["{'year': years, 'prize': prizes}"], {}), "({'year': years, 'prize': prizes})\n", (4876, 4910), True, 'import pandas as pd\n'), ((5847, 5873), 'matplotlib.dates.date2num', 'dates.date2num', (["df['date']"], {}), "(df['date'])\n", (5861, 5873), True, 'import matplotlib.dates as dates\n'), ((5917, 5946), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12, 9)'}), '(figsize=(12, 9))\n', (5929, 5946), True, 'import matplotlib.pyplot as plt\n'), ((6040, 6064), 'numpy.polyfit', 'np.polyfit', (['x', 'y'], {'deg': '(20)'}), '(x, y, deg=20)\n', (6050, 6064), True, 'import numpy as np\n'), ((6077, 6091), 'numpy.poly1d', 'np.poly1d', (['fit'], {}), '(fit)\n', (6086, 6091), True, 'import numpy as np\n'), ((6584, 6622), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""output/img/{title}.png"""'], {}), "(f'output/img/{title}.png')\n", (6595, 6622), True, 'import matplotlib.pyplot as plt\n'), ((6631, 6641), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6639, 6641), True, 'import matplotlib.pyplot as plt\n'), ((3120, 3143), 'numpy.arange', 'np.arange', (['(0)', '(10.5)', '(0.5)'], {}), '(0, 10.5, 0.5)\n', (3129, 3143), True, 'import numpy as np\n'), ((4571, 4599), 'pandas.DatetimeIndex', 'pd.DatetimeIndex', (["df['date']"], {}), "(df['date'])\n", (4587, 4599), True, 'import pandas as pd\n'), ((6507, 6535), 'os.path.exists', 'os.path.exists', (['"""output/img"""'], {}), "('output/img')\n", (6521, 6535), False, 'import os\n'), ((6549, 6574), 'os.makedirs', 'os.makedirs', (['"""output/img"""'], {}), "('output/img')\n", (6560, 6574), False, 'import os\n'), ((2692, 2722), 'os.path.exists', 'os.path.exists', (['"""output/nmbrs"""'], {}), "('output/nmbrs')\n", (2706, 2722), False, 'import os\n'), ((2740, 2767), 'os.makedirs', 'os.makedirs', (['"""output/nmbrs"""'], {}), "('output/nmbrs')\n", (2751, 2767), False, 'import os\n'), ((2862, 2886), 'json.dump', 'json.dump', (['card_dict', 'fp'], {}), '(card_dict, fp)\n', (2871, 2886), False, 'import json\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This function simulates a variable speed pumping system. First, you enter in the pump and system curve information.
The function then calculates a curve relating rpm to flow and rpm to kW.
This is useful for estimating the flow of variable speed pumping systems
when no flow meter is available.
Example 1:
#Step 1: Create pump and system curves
PCurve_Flow = np.array([0, 164.14, 328.28, 492.61, 656.81, 761.88, 821.02, 952.51, 1039.33]) * 0.0864 #Convert MLD to L/s
PCurve_dkPa = np.array([16.03, 15.73, 15.51, 13.87, 11.82, 10.21, 9.26, 6.70, 4.42]) * 9.81 #Convert m to kPa
PCurve_kW = np.array([48.83, 55.83, 71.34, 81.34, 88.57, 91.63, 90.22, 80.55, 66.88])
PCurve_eff = np.array([0, 0.4528, 0.6989, 0.8225, 0.8583, 0.8313, 0.8254, 0.7758, 0.6722])
Sys_curve = np.full((9), dP_meas) #A flat system of 9 kPa
#Step 2.:Define a reasonable RPM operating range. Note that if there is no intersection with the VFD speed, pump curve, or
#system curve, the function below will not work.
RPM_Max = 670
RPM_Min = 515
rpm_range = np.arange(RPM_Min, RPM_Max, 1)
#Step 3. Generate Curves that relate rpm to flow and rpm to kW
Curve_rpm_to_flow, Curve_rpm_to_kW = pumpingSim(PCurve_Flow, PCurve_dkPa, PCurve_kW, Sys_curve, rpm_range)
Curve_flow_to_rpm = Flow_to_RPM(PCurve_MLD, PCurve_dkPa, Sys_curve, rpm_range)
@author: <NAME>
@date: Created on Fri Sep 11 15:41:46 2020
@license: MIT
"""
# Program to interpolate pump curve and system curve
from __future__ import division
import numpy as np
# The inperolation function is copied from:
#https://coderedirect.com/questions/501614/find-the-intersection-of-two-curves-given-by-x-y-data-with-high-precision-in
def interpolated_intercept(x, y1, y2):
"""Find the intercept of two curves, given by the same x data"""
def intercept(point1, point2, point3, point4):
"""find the intersection between two lines
the first line is defined by the line between point1 and point2
the first line is defined by the line between point3 and point4
each point is an (x,y) tuple.
So, for example, you can find the intersection between
intercept((0,0), (1,1), (0,1), (1,0)) = (0.5, 0.5)
Returns: the intercept, in (x,y) format
"""
def line(p1, p2):
A = (p1[1] - p2[1])
B = (p2[0] - p1[0])
C = (p1[0]*p2[1] - p2[0]*p1[1])
return A, B, -C
def intersection(L1, L2):
D = L1[0] * L2[1] - L1[1] * L2[0]
Dx = L1[2] * L2[1] - L1[1] * L2[2]
Dy = L1[0] * L2[2] - L1[2] * L2[0]
x = Dx / D
y = Dy / D
return x,y
L1 = line([point1[0],point1[1]], [point2[0],point2[1]])
L2 = line([point3[0],point3[1]], [point4[0],point4[1]])
R = intersection(L1, L2)
return R
idx = np.argwhere(np.diff(np.sign(y1 - y2)) != 0)
xc, yc = intercept((x[idx], y1[idx]),((x[idx+1], y1[idx+1])), ((x[idx], y2[idx])), ((x[idx+1], y2[idx+1])))
return xc,yc
def pumpingSim(PCurve_Flow, PCurve_dkPa, PCurve_kW, Sys_curve, RPM_range):
'''
This function simulates a variable speed pumping system with a static lift
head curve. First, you enter in the pump and system curve information.
The function then calculates a curve relating rpm to flow and rpm to kW.
This is useful for estimating the flow of variable speed pumping systems
when no flow meter is available.
Args:
PCurve_Flow -> Flow data points from pump curve
PCurve_dkPa -> Diff. Pressure data points from pump curve
PCurve_kW -> Power data points from pump curve
Sys_curve -> Data points from system curve
RPM_range -> RPM at which you want to calculate kW
Returns:
Polynomial curve of rpm and flow data (Curve_rpm_to_flow)
Polynomial curve of rpm and kW data (Curve_rpm_to_kW)
'''
Max_RPM = np.max(RPM_range)
vfd_range = RPM_range/Max_RPM
#Calculate flow as a function of pump RPM
flow_arr = np.array([])
for vfd_spd in vfd_range:
flow, yc = interpolated_intercept(PCurve_Flow * vfd_spd, PCurve_dkPa * vfd_spd**2, Sys_curve)
flow_arr = np.append(flow_arr,flow)
Curve_rpm_to_flow = np.poly1d(np.polyfit(vfd_range * Max_RPM, flow_arr, 2))
#Calculate power as a function of pump RPM
kW_arr = np.array([])
for vfd_spd in vfd_range:
Curve_Flow_to_kW = np.poly1d(np.polyfit(PCurve_Flow * vfd_spd, PCurve_kW * vfd_spd**3, 3)) #Establish new pump curve at the VFD speed using affinity laws
flow = Curve_rpm_to_flow(vfd_spd * Max_RPM)
kW = Curve_Flow_to_kW(flow)
kW_arr = np.append(kW_arr, kW)
Curve_rpm_to_kW = np.poly1d(np.polyfit(vfd_range * Max_RPM, kW_arr, 3))
return Curve_rpm_to_flow, Curve_rpm_to_kW
def Flow_to_RPM(PCurve_Flow, PCurve_dkPa, Sys_curve, RPM_range):
'''This function works similar to pumpingSim but instead creates a function
of flow to rpm.
'''
Max_RPM = np.max(RPM_range)
vfd_range = RPM_range/Max_RPM
#Calculate flow as a function of pump RPM
flow_arr = np.array([])
for vfd_spd in vfd_range:
flow, yc = interpolated_intercept(PCurve_Flow * vfd_spd, PCurve_dkPa * vfd_spd**2, Sys_curve)
flow_arr = np.append(flow_arr, flow)
Curve_flow_to_rpm = np.poly1d(np.polyfit(flow_arr, RPM_range, 2))
return Curve_flow_to_rpm
| [
"numpy.polyfit",
"numpy.max",
"numpy.append",
"numpy.array",
"numpy.sign"
] | [((4096, 4113), 'numpy.max', 'np.max', (['RPM_range'], {}), '(RPM_range)\n', (4102, 4113), True, 'import numpy as np\n'), ((4219, 4231), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4227, 4231), True, 'import numpy as np\n'), ((4559, 4571), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4567, 4571), True, 'import numpy as np\n'), ((5251, 5268), 'numpy.max', 'np.max', (['RPM_range'], {}), '(RPM_range)\n', (5257, 5268), True, 'import numpy as np\n'), ((5374, 5386), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5382, 5386), True, 'import numpy as np\n'), ((4386, 4411), 'numpy.append', 'np.append', (['flow_arr', 'flow'], {}), '(flow_arr, flow)\n', (4395, 4411), True, 'import numpy as np\n'), ((4448, 4492), 'numpy.polyfit', 'np.polyfit', (['(vfd_range * Max_RPM)', 'flow_arr', '(2)'], {}), '(vfd_range * Max_RPM, flow_arr, 2)\n', (4458, 4492), True, 'import numpy as np\n'), ((4883, 4904), 'numpy.append', 'np.append', (['kW_arr', 'kW'], {}), '(kW_arr, kW)\n', (4892, 4904), True, 'import numpy as np\n'), ((4948, 4990), 'numpy.polyfit', 'np.polyfit', (['(vfd_range * Max_RPM)', 'kW_arr', '(3)'], {}), '(vfd_range * Max_RPM, kW_arr, 3)\n', (4958, 4990), True, 'import numpy as np\n'), ((5541, 5566), 'numpy.append', 'np.append', (['flow_arr', 'flow'], {}), '(flow_arr, flow)\n', (5550, 5566), True, 'import numpy as np\n'), ((5604, 5638), 'numpy.polyfit', 'np.polyfit', (['flow_arr', 'RPM_range', '(2)'], {}), '(flow_arr, RPM_range, 2)\n', (5614, 5638), True, 'import numpy as np\n'), ((4649, 4711), 'numpy.polyfit', 'np.polyfit', (['(PCurve_Flow * vfd_spd)', '(PCurve_kW * vfd_spd ** 3)', '(3)'], {}), '(PCurve_Flow * vfd_spd, PCurve_kW * vfd_spd ** 3, 3)\n', (4659, 4711), True, 'import numpy as np\n'), ((2989, 3005), 'numpy.sign', 'np.sign', (['(y1 - y2)'], {}), '(y1 - y2)\n', (2996, 3005), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
#
# Nineth exercice: non-Cartesian MR image reconstruction
# =============================================
# In this tutorial we will reconstruct an MRI image from radial undersampled kspace measurements. Let us denote $\Omega$ the undersampling mask, the under-sampled Fourier transform now reads $F_{\Omega}$.
#
# Import neuroimaging data
# --------------------------------------
# We use the toy datasets available in pysap, more specifically a 2D brain slice and the radial under-sampling scheme. We compare zero-order image reconstruction with Compressed sensing reconstructions (analysis vs synthesis formulation) using the FISTA algorithm for the synthesis formulation and the Condat-Vu algorithm for the analysis formulation.
#
# We remind that the synthesis formulation reads (minimization in the sparsifying domain):
# $$
# \widehat{z} = \text{arg}\,\min_{z\in C^n_\Psi} \frac{1}{2} \|y - F_\Omega \Psi^*z \|_2^2 + \lambda \|z\|_1
# $$
# and the image solution is given by $\widehat{x} = \Psi^*\widehat{z}$. For an orthonormal wavelet transform,
# we have $n_\Psi=n$ while for a frame we may have $n_\Psi > n$.
#
# while the analysis formulation consists in minimizing the following cost function (min. in the image domain):
# $$
# \widehat{x} = \text{arg}\,\min_{x\in C^n} \frac{1}{2} \|y - F_\Omega x\|_2^2 + \lambda \|\Psi x\|_1 \,.
# $$
#
# - Author: <NAME> & <NAME>
# - Date: 01/06/2021
# - Target: ATSI MSc students, Paris-Saclay University
# In[9]:
# Package import
#from mri.numerics.fourier import NFFT
#from mri.numerics.reconstruct import sparse_rec_fista
#from mri.numerics.utils import generate_operators
#from mri.numerics.utils import convert_locations_to_mask
#from mri.parallel_mri.extract_sensitivity_maps import \
# gridded_inverse_fourier_transform_nd
from mri.operators import NonCartesianFFT, WaveletN, WaveletUD2
from mri.operators.utils import convert_locations_to_mask, gridded_inverse_fourier_transform_nd
from mri.reconstructors import SingleChannelReconstructor
import pysap
from pysap.data import get_sample_data
# Third party import
from modopt.math.metrics import ssim
from modopt.opt.linear import Identity
from modopt.opt.proximity import SparseThreshold
import numpy as np
import matplotlib.pyplot as plt
# Loading input data
# ---------------------------
# In[16]:
image = get_sample_data('2d-mri')
radial_mask = get_sample_data("mri-radial-samples")
kspace_loc = radial_mask.data
mask = pysap.Image(data=convert_locations_to_mask(kspace_loc, image.shape))
plt.figure()
plt.imshow(image, cmap='gray')
plt.figure()
plt.imshow(mask, cmap='gray')
plt.show()
# Generate the kspace
# -------------------
#
# From the 2D brain slice and the acquisition mask, we retrospectively
# undersample the k-space using a cartesian acquisition mask
# We then reconstruct the zero order solution as a baseline
# Get the locations of the kspace samples
# In[7]:
#fourier_op = NFFT(samples=kspace_loc, shape=image.shape)
#kspace_obs = fourier_op.op(image.data)
fourier_op = NonCartesianFFT(samples=kspace_loc, shape=image.shape,
implementation='cpu')
kspace_obs = fourier_op.op(image.data)
# Gridded solution
# In[ ]:
grid_space = np.linspace(-0.5, 0.5, num=image.shape[0])
grid2D = np.meshgrid(grid_space, grid_space)
grid_soln = gridded_inverse_fourier_transform_nd(kspace_loc, kspace_obs,
tuple(grid2D), 'linear')
plt.imshow(np.abs(grid_soln), cmap='gray')
# Calculate SSIM
base_ssim = ssim(grid_soln, image)
plt.title('Gridded Solution\nSSIM = ' + str(base_ssim))
plt.show()
# FISTA optimization
# ------------------
#
# We now want to refine the zero order solution using a FISTA optimization.
# The cost function is set to Proximity Cost + Gradient Cost
# In[11]:
linear_op = WaveletN(wavelet_name="sym8", nb_scales=4)
regularizer_op = SparseThreshold(Identity(), 6 * 1e-7, thresh_type="soft")
# # Generate operators
# In[12]:
reconstructor = SingleChannelReconstructor(
fourier_op=fourier_op,
linear_op=linear_op,
regularizer_op=regularizer_op,
gradient_formulation='synthesis',
verbose=1,
)
# Synthesis formulation: FISTA optimization
# ------------------------------------------------------------
#
# We now want to refine the zero order solution using a FISTA optimization.
# The cost function is set to Proximity Cost + Gradient Cost
# In[ ]:
x_final, costs, metrics = reconstructor.reconstruct(
kspace_data=kspace_obs,
optimization_alg='fista',
num_iterations=200,
)
image_rec = pysap.Image(data=np.abs(x_final))
recon_ssim = ssim(image_rec, image)
plt.imshow(np.abs(image_rec), cmap='gray')
recon_ssim = ssim(image_rec, image)
plt.title('FISTA Reconstruction\nSSIM = ' + str(recon_ssim))
plt.show()
# Analysis formulation: Condat-Vu reconstruction
# ---------------------------------------------------------------------
# In[14]:
linear_op = WaveletUD2(
wavelet_id=24,
nb_scale=4,
)
# In[15]:
reconstructor = SingleChannelReconstructor(
fourier_op=fourier_op,
linear_op=linear_op,
regularizer_op=regularizer_op,
gradient_formulation='analysis',
verbose=1,
)
# In[ ]:
x_final, costs, metrics = reconstructor.reconstruct(
kspace_data=kspace_obs,
optimization_alg='condatvu',
num_iterations=200,
)
image_rec = pysap.Image(data=np.abs(x_final))
plt.imshow(np.abs(image_rec), cmap='gray')
recon_ssim = ssim(image_rec, image)
plt.title('Condat-Vu Reconstruction\nSSIM = ' + str(recon_ssim))
plt.show()
| [
"matplotlib.pyplot.imshow",
"numpy.abs",
"mri.operators.WaveletUD2",
"mri.operators.NonCartesianFFT",
"mri.reconstructors.SingleChannelReconstructor",
"mri.operators.WaveletN",
"pysap.data.get_sample_data",
"modopt.math.metrics.ssim",
"matplotlib.pyplot.figure",
"numpy.linspace",
"mri.operators.... | [((2381, 2406), 'pysap.data.get_sample_data', 'get_sample_data', (['"""2d-mri"""'], {}), "('2d-mri')\n", (2396, 2406), False, 'from pysap.data import get_sample_data\n'), ((2421, 2458), 'pysap.data.get_sample_data', 'get_sample_data', (['"""mri-radial-samples"""'], {}), "('mri-radial-samples')\n", (2436, 2458), False, 'from pysap.data import get_sample_data\n'), ((2565, 2577), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2575, 2577), True, 'import matplotlib.pyplot as plt\n'), ((2578, 2608), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (2588, 2608), True, 'import matplotlib.pyplot as plt\n'), ((2609, 2621), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2619, 2621), True, 'import matplotlib.pyplot as plt\n'), ((2622, 2651), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask'], {'cmap': '"""gray"""'}), "(mask, cmap='gray')\n", (2632, 2651), True, 'import matplotlib.pyplot as plt\n'), ((2652, 2662), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2660, 2662), True, 'import matplotlib.pyplot as plt\n'), ((3070, 3146), 'mri.operators.NonCartesianFFT', 'NonCartesianFFT', ([], {'samples': 'kspace_loc', 'shape': 'image.shape', 'implementation': '"""cpu"""'}), "(samples=kspace_loc, shape=image.shape, implementation='cpu')\n", (3085, 3146), False, 'from mri.operators import NonCartesianFFT, WaveletN, WaveletUD2\n'), ((3261, 3303), 'numpy.linspace', 'np.linspace', (['(-0.5)', '(0.5)'], {'num': 'image.shape[0]'}), '(-0.5, 0.5, num=image.shape[0])\n', (3272, 3303), True, 'import numpy as np\n'), ((3313, 3348), 'numpy.meshgrid', 'np.meshgrid', (['grid_space', 'grid_space'], {}), '(grid_space, grid_space)\n', (3324, 3348), True, 'import numpy as np\n'), ((3568, 3590), 'modopt.math.metrics.ssim', 'ssim', (['grid_soln', 'image'], {}), '(grid_soln, image)\n', (3572, 3590), False, 'from modopt.math.metrics import ssim\n'), ((3647, 3657), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3655, 3657), True, 'import matplotlib.pyplot as plt\n'), ((3867, 3909), 'mri.operators.WaveletN', 'WaveletN', ([], {'wavelet_name': '"""sym8"""', 'nb_scales': '(4)'}), "(wavelet_name='sym8', nb_scales=4)\n", (3875, 3909), False, 'from mri.operators import NonCartesianFFT, WaveletN, WaveletUD2\n'), ((4039, 4189), 'mri.reconstructors.SingleChannelReconstructor', 'SingleChannelReconstructor', ([], {'fourier_op': 'fourier_op', 'linear_op': 'linear_op', 'regularizer_op': 'regularizer_op', 'gradient_formulation': '"""synthesis"""', 'verbose': '(1)'}), "(fourier_op=fourier_op, linear_op=linear_op,\n regularizer_op=regularizer_op, gradient_formulation='synthesis', verbose=1)\n", (4065, 4189), False, 'from mri.reconstructors import SingleChannelReconstructor\n'), ((4666, 4688), 'modopt.math.metrics.ssim', 'ssim', (['image_rec', 'image'], {}), '(image_rec, image)\n', (4670, 4688), False, 'from modopt.math.metrics import ssim\n'), ((4745, 4767), 'modopt.math.metrics.ssim', 'ssim', (['image_rec', 'image'], {}), '(image_rec, image)\n', (4749, 4767), False, 'from modopt.math.metrics import ssim\n'), ((4829, 4839), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4837, 4839), True, 'import matplotlib.pyplot as plt\n'), ((4989, 5026), 'mri.operators.WaveletUD2', 'WaveletUD2', ([], {'wavelet_id': '(24)', 'nb_scale': '(4)'}), '(wavelet_id=24, nb_scale=4)\n', (4999, 5026), False, 'from mri.operators import NonCartesianFFT, WaveletN, WaveletUD2\n'), ((5068, 5217), 'mri.reconstructors.SingleChannelReconstructor', 'SingleChannelReconstructor', ([], {'fourier_op': 'fourier_op', 'linear_op': 'linear_op', 'regularizer_op': 'regularizer_op', 'gradient_formulation': '"""analysis"""', 'verbose': '(1)'}), "(fourier_op=fourier_op, linear_op=linear_op,\n regularizer_op=regularizer_op, gradient_formulation='analysis', verbose=1)\n", (5094, 5217), False, 'from mri.reconstructors import SingleChannelReconstructor\n'), ((5492, 5514), 'modopt.math.metrics.ssim', 'ssim', (['image_rec', 'image'], {}), '(image_rec, image)\n', (5496, 5514), False, 'from modopt.math.metrics import ssim\n'), ((5580, 5590), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5588, 5590), True, 'import matplotlib.pyplot as plt\n'), ((3507, 3524), 'numpy.abs', 'np.abs', (['grid_soln'], {}), '(grid_soln)\n', (3513, 3524), True, 'import numpy as np\n'), ((3943, 3953), 'modopt.opt.linear.Identity', 'Identity', ([], {}), '()\n', (3951, 3953), False, 'from modopt.opt.linear import Identity\n'), ((4700, 4717), 'numpy.abs', 'np.abs', (['image_rec'], {}), '(image_rec)\n', (4706, 4717), True, 'import numpy as np\n'), ((5447, 5464), 'numpy.abs', 'np.abs', (['image_rec'], {}), '(image_rec)\n', (5453, 5464), True, 'import numpy as np\n'), ((2513, 2563), 'mri.operators.utils.convert_locations_to_mask', 'convert_locations_to_mask', (['kspace_loc', 'image.shape'], {}), '(kspace_loc, image.shape)\n', (2538, 2563), False, 'from mri.operators.utils import convert_locations_to_mask, gridded_inverse_fourier_transform_nd\n'), ((4636, 4651), 'numpy.abs', 'np.abs', (['x_final'], {}), '(x_final)\n', (4642, 4651), True, 'import numpy as np\n'), ((5419, 5434), 'numpy.abs', 'np.abs', (['x_final'], {}), '(x_final)\n', (5425, 5434), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_allclose
from qiskit.quantum_info import random_density_matrix
from mps_tomo.uncert import R_hat, iteration, pauli_proj
from mps_tomo.utils import fidelity, pauli_group
def test_pauli_proj():
NUM_QUBITS = 3
SEED = 7777
TOL = 100 * np.spacing(np.complex128(1.0).real)
dens_mat = random_density_matrix(2 ** NUM_QUBITS, seed=SEED)
reconstructed = sum(pauli_proj(dens_mat, p) for p in pauli_group(NUM_QUBITS))
assert_allclose(reconstructed, dens_mat, rtol=0.0, atol=TOL)
def test_R_hat():
TOL = 100 * np.spacing(np.complex128(1.0).real)
SEED = 7777
np.random.seed(SEED)
sigmas = [np.random.randn(4, 4) + 1j * np.random.randn(4, 4) for i in range(3)]
example = (
np.kron(sigmas[0], np.eye(4))
+ np.kron(np.kron(np.eye(2), sigmas[1]), np.eye(2))
+ np.kron(np.eye(4), sigmas[2])
)
assert_allclose(R_hat(sigmas, 4), example, rtol=0.0, atol=TOL)
def test_iteration_pure_W():
NUM_QUBITS = 5
SEED = 7777
TOL = 0.01
K = 2
np.random.seed(SEED)
dim = 2 ** NUM_QUBITS
state = np.zeros(dim, dtype=np.complex128)
for i in range(NUM_QUBITS):
state[1 << i] = 1
state /= np.linalg.norm(state)
dens_mat = np.outer(state, state.conj())
def reduce(qubit):
left_size = 2 ** qubit
reduced_size = 2 ** K
right_size = np.size(dens_mat, axis=0) // (left_size * reduced_size)
reshaped = np.reshape(
dens_mat,
(left_size, reduced_size, right_size, left_size, reduced_size, right_size),
)
return np.einsum("aibajb->ij", reshaped)
sigmas = (reduce(q) for q in range(NUM_QUBITS - K + 1))
y_vec = iteration(K, sigmas, NUM_QUBITS, max_its=100, delta=0.1)
overlap = np.abs(state.conj() @ y_vec) ** 2
assert_allclose(overlap, 1, rtol=0.0, atol=TOL)
def test_iteration_depolarising_W():
NUM_QUBITS = 5
SEED = 7777
TOL = 0.15
K = 2
DEPOLARISATION = 0.1
np.random.seed(SEED)
dim = 2 ** NUM_QUBITS
state = np.zeros(dim, dtype=np.complex128)
for i in range(NUM_QUBITS):
state[1 << i] = 1
state /= np.linalg.norm(state)
dens_mat = np.outer(state, state.conj())
dens_mat = (
DEPOLARISATION * (1 / dim) * np.eye(dim) + (1 - DEPOLARISATION) * dens_mat
)
purity = np.trace(dens_mat @ dens_mat)
def reduce(qubit):
left_size = 2 ** qubit
reduced_size = 2 ** K
right_size = np.size(dens_mat, axis=0) // (left_size * reduced_size)
reshaped = np.reshape(
dens_mat,
(left_size, reduced_size, right_size, left_size, reduced_size, right_size),
)
return np.einsum("aibajb->ij", reshaped)
sigmas = (reduce(q) for q in range(NUM_QUBITS - K + 1))
y_vec = iteration(K, sigmas, NUM_QUBITS, max_its=100, delta=0.1)
y_dens = np.outer(y_vec, y_vec.conj())
overlap = np.abs(state.conj() @ y_vec) ** 2
fid = fidelity(y_dens, dens_mat)
assert_allclose(overlap, 1, rtol=0.0, atol=TOL)
| [
"mps_tomo.utils.pauli_group",
"numpy.trace",
"qiskit.quantum_info.random_density_matrix",
"numpy.eye",
"numpy.reshape",
"numpy.testing.assert_allclose",
"mps_tomo.uncert.pauli_proj",
"numpy.size",
"numpy.complex128",
"numpy.zeros",
"mps_tomo.uncert.iteration",
"numpy.einsum",
"numpy.random.s... | [((350, 399), 'qiskit.quantum_info.random_density_matrix', 'random_density_matrix', (['(2 ** NUM_QUBITS)'], {'seed': 'SEED'}), '(2 ** NUM_QUBITS, seed=SEED)\n', (371, 399), False, 'from qiskit.quantum_info import random_density_matrix\n'), ((487, 547), 'numpy.testing.assert_allclose', 'assert_allclose', (['reconstructed', 'dens_mat'], {'rtol': '(0.0)', 'atol': 'TOL'}), '(reconstructed, dens_mat, rtol=0.0, atol=TOL)\n', (502, 547), False, 'from numpy.testing import assert_allclose\n'), ((641, 661), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (655, 661), True, 'import numpy as np\n'), ((1072, 1092), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1086, 1092), True, 'import numpy as np\n'), ((1132, 1166), 'numpy.zeros', 'np.zeros', (['dim'], {'dtype': 'np.complex128'}), '(dim, dtype=np.complex128)\n', (1140, 1166), True, 'import numpy as np\n'), ((1238, 1259), 'numpy.linalg.norm', 'np.linalg.norm', (['state'], {}), '(state)\n', (1252, 1259), True, 'import numpy as np\n'), ((1744, 1800), 'mps_tomo.uncert.iteration', 'iteration', (['K', 'sigmas', 'NUM_QUBITS'], {'max_its': '(100)', 'delta': '(0.1)'}), '(K, sigmas, NUM_QUBITS, max_its=100, delta=0.1)\n', (1753, 1800), False, 'from mps_tomo.uncert import R_hat, iteration, pauli_proj\n'), ((1854, 1901), 'numpy.testing.assert_allclose', 'assert_allclose', (['overlap', '(1)'], {'rtol': '(0.0)', 'atol': 'TOL'}), '(overlap, 1, rtol=0.0, atol=TOL)\n', (1869, 1901), False, 'from numpy.testing import assert_allclose\n'), ((2031, 2051), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (2045, 2051), True, 'import numpy as np\n'), ((2091, 2125), 'numpy.zeros', 'np.zeros', (['dim'], {'dtype': 'np.complex128'}), '(dim, dtype=np.complex128)\n', (2099, 2125), True, 'import numpy as np\n'), ((2197, 2218), 'numpy.linalg.norm', 'np.linalg.norm', (['state'], {}), '(state)\n', (2211, 2218), True, 'import numpy as np\n'), ((2384, 2413), 'numpy.trace', 'np.trace', (['(dens_mat @ dens_mat)'], {}), '(dens_mat @ dens_mat)\n', (2392, 2413), True, 'import numpy as np\n'), ((2852, 2908), 'mps_tomo.uncert.iteration', 'iteration', (['K', 'sigmas', 'NUM_QUBITS'], {'max_its': '(100)', 'delta': '(0.1)'}), '(K, sigmas, NUM_QUBITS, max_its=100, delta=0.1)\n', (2861, 2908), False, 'from mps_tomo.uncert import R_hat, iteration, pauli_proj\n'), ((3011, 3037), 'mps_tomo.utils.fidelity', 'fidelity', (['y_dens', 'dens_mat'], {}), '(y_dens, dens_mat)\n', (3019, 3037), False, 'from mps_tomo.utils import fidelity, pauli_group\n'), ((3043, 3090), 'numpy.testing.assert_allclose', 'assert_allclose', (['overlap', '(1)'], {'rtol': '(0.0)', 'atol': 'TOL'}), '(overlap, 1, rtol=0.0, atol=TOL)\n', (3058, 3090), False, 'from numpy.testing import assert_allclose\n'), ((929, 945), 'mps_tomo.uncert.R_hat', 'R_hat', (['sigmas', '(4)'], {}), '(sigmas, 4)\n', (934, 945), False, 'from mps_tomo.uncert import R_hat, iteration, pauli_proj\n'), ((1488, 1588), 'numpy.reshape', 'np.reshape', (['dens_mat', '(left_size, reduced_size, right_size, left_size, reduced_size, right_size)'], {}), '(dens_mat, (left_size, reduced_size, right_size, left_size,\n reduced_size, right_size))\n', (1498, 1588), True, 'import numpy as np\n'), ((1636, 1669), 'numpy.einsum', 'np.einsum', (['"""aibajb->ij"""', 'reshaped'], {}), "('aibajb->ij', reshaped)\n", (1645, 1669), True, 'import numpy as np\n'), ((2596, 2696), 'numpy.reshape', 'np.reshape', (['dens_mat', '(left_size, reduced_size, right_size, left_size, reduced_size, right_size)'], {}), '(dens_mat, (left_size, reduced_size, right_size, left_size,\n reduced_size, right_size))\n', (2606, 2696), True, 'import numpy as np\n'), ((2744, 2777), 'numpy.einsum', 'np.einsum', (['"""aibajb->ij"""', 'reshaped'], {}), "('aibajb->ij', reshaped)\n", (2753, 2777), True, 'import numpy as np\n'), ((424, 447), 'mps_tomo.uncert.pauli_proj', 'pauli_proj', (['dens_mat', 'p'], {}), '(dens_mat, p)\n', (434, 447), False, 'from mps_tomo.uncert import R_hat, iteration, pauli_proj\n'), ((677, 698), 'numpy.random.randn', 'np.random.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (692, 698), True, 'import numpy as np\n'), ((880, 889), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (886, 889), True, 'import numpy as np\n'), ((1412, 1437), 'numpy.size', 'np.size', (['dens_mat'], {'axis': '(0)'}), '(dens_mat, axis=0)\n', (1419, 1437), True, 'import numpy as np\n'), ((2319, 2330), 'numpy.eye', 'np.eye', (['dim'], {}), '(dim)\n', (2325, 2330), True, 'import numpy as np\n'), ((2520, 2545), 'numpy.size', 'np.size', (['dens_mat'], {'axis': '(0)'}), '(dens_mat, axis=0)\n', (2527, 2545), True, 'import numpy as np\n'), ((309, 327), 'numpy.complex128', 'np.complex128', (['(1.0)'], {}), '(1.0)\n', (322, 327), True, 'import numpy as np\n'), ((457, 480), 'mps_tomo.utils.pauli_group', 'pauli_group', (['NUM_QUBITS'], {}), '(NUM_QUBITS)\n', (468, 480), False, 'from mps_tomo.utils import fidelity, pauli_group\n'), ((595, 613), 'numpy.complex128', 'np.complex128', (['(1.0)'], {}), '(1.0)\n', (608, 613), True, 'import numpy as np\n'), ((706, 727), 'numpy.random.randn', 'np.random.randn', (['(4)', '(4)'], {}), '(4, 4)\n', (721, 727), True, 'import numpy as np\n'), ((791, 800), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (797, 800), True, 'import numpy as np\n'), ((851, 860), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (857, 860), True, 'import numpy as np\n'), ((828, 837), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (834, 837), True, 'import numpy as np\n')] |
#Developed by <NAME>
#Github link: https://github.com/Hemraj183
import cv2
import numpy as np
import face_recognition
import os
import pyttsx3
path = 'Images'
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
print(voices[0].id)
speed = 150
engine.setProperty('rate', speed)
def speak(audio):
engine.say(audio)
engine.runAndWait()
# from PIL import ImageGrab
images = []
classNames = []
List = os.listdir(path)
# print(List)
for cl in List:
curImg = cv2.imread(f'{path}/{cl}')
images.append(curImg)
classNames.append(os.path.splitext(cl)[0])
# print(classNames)
def findEncodings(images):
encodeList = []
for img in images:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
encode = face_recognition.face_encodings(img)[0]
encodeList.append(encode)
return encodeList
knownEncodeList = findEncodings(images)
#print(knownEncodeList)
print("Encoding Complete")
cap = cv2.VideoCapture(0) # 'https://192.168.254.7:8080/video' for ip camera
while True:
success, img = cap.read()
imgS = cv2.resize(img, (0, 0), None, 0.25, 0.25)
imgS = cv2.cvtColor(imgS, cv2.COLOR_BGR2RGB)
facesCurFrame = face_recognition.face_locations(imgS)
encodesCurFrame = face_recognition.face_encodings(imgS, facesCurFrame)
for encodeFace, faceLoc in zip(encodesCurFrame, facesCurFrame):
matches = face_recognition.compare_faces(knownEncodeList, encodeFace)
faceDis = face_recognition.face_distance(knownEncodeList, encodeFace)
# print(faceDis)
matchIndex = np.argmin(faceDis)
if matches[matchIndex]:
name = classNames[matchIndex].title()
speak("Hello" + name + "Welcome to Itahari International College")
print("Hello" + name + "Welcome to Itahari International College")
# print(name)
y1, x2, y2, x1 = faceLoc
y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
cv2.rectangle(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)
cv2.putText(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 0.6, (255, 255, 255), 1)
#cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
#cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cv2.imshow('IIC AI CLUB', img)
key = cv2.waitKey(1) & 0xFF
if key == ord("b"):
break
# do a bit of cleanup
cap.release()
cv2.destroyAllWindows()
| [
"cv2.rectangle",
"face_recognition.face_locations",
"os.listdir",
"pyttsx3.init",
"os.path.splitext",
"cv2.imshow",
"cv2.putText",
"face_recognition.face_distance",
"cv2.waitKey",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.cvtColor",
"face_recognition.face_encodings",
"face_recognit... | [((180, 201), 'pyttsx3.init', 'pyttsx3.init', (['"""sapi5"""'], {}), "('sapi5')\n", (192, 201), False, 'import pyttsx3\n'), ((495, 511), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (505, 511), False, 'import os\n'), ((1039, 1058), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1055, 1058), False, 'import cv2\n'), ((2616, 2639), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2637, 2639), False, 'import cv2\n'), ((560, 586), 'cv2.imread', 'cv2.imread', (['f"""{path}/{cl}"""'], {}), "(f'{path}/{cl}')\n", (570, 586), False, 'import cv2\n'), ((1169, 1210), 'cv2.resize', 'cv2.resize', (['img', '(0, 0)', 'None', '(0.25)', '(0.25)'], {}), '(img, (0, 0), None, 0.25, 0.25)\n', (1179, 1210), False, 'import cv2\n'), ((1223, 1260), 'cv2.cvtColor', 'cv2.cvtColor', (['imgS', 'cv2.COLOR_BGR2RGB'], {}), '(imgS, cv2.COLOR_BGR2RGB)\n', (1235, 1260), False, 'import cv2\n'), ((1282, 1319), 'face_recognition.face_locations', 'face_recognition.face_locations', (['imgS'], {}), '(imgS)\n', (1313, 1319), False, 'import face_recognition\n'), ((1343, 1395), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['imgS', 'facesCurFrame'], {}), '(imgS, facesCurFrame)\n', (1374, 1395), False, 'import face_recognition\n'), ((2469, 2499), 'cv2.imshow', 'cv2.imshow', (['"""IIC AI CLUB"""', 'img'], {}), "('IIC AI CLUB', img)\n", (2479, 2499), False, 'import cv2\n'), ((779, 815), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (791, 815), False, 'import cv2\n'), ((1484, 1543), 'face_recognition.compare_faces', 'face_recognition.compare_faces', (['knownEncodeList', 'encodeFace'], {}), '(knownEncodeList, encodeFace)\n', (1514, 1543), False, 'import face_recognition\n'), ((1563, 1622), 'face_recognition.face_distance', 'face_recognition.face_distance', (['knownEncodeList', 'encodeFace'], {}), '(knownEncodeList, encodeFace)\n', (1593, 1622), False, 'import face_recognition\n'), ((1673, 1691), 'numpy.argmin', 'np.argmin', (['faceDis'], {}), '(faceDis)\n', (1682, 1691), True, 'import numpy as np\n'), ((2511, 2525), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2522, 2525), False, 'import cv2\n'), ((637, 657), 'os.path.splitext', 'os.path.splitext', (['cl'], {}), '(cl)\n', (653, 657), False, 'import os\n'), ((834, 870), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['img'], {}), '(img)\n', (865, 870), False, 'import face_recognition\n'), ((2077, 2131), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y1)', '(x2, y2)', '(0, 255, 0)', '(2)'], {}), '(img, (x1, y1), (x2, y2), (0, 255, 0), 2)\n', (2090, 2131), False, 'import cv2\n'), ((2145, 2213), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x1, y2 - 35)', '(x2, y2)', '(0, 255, 0)', 'cv2.FILLED'], {}), '(img, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED)\n', (2158, 2213), False, 'import cv2\n'), ((2227, 2323), 'cv2.putText', 'cv2.putText', (['img', 'name', '(x1 + 6, y2 - 6)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.6)', '(255, 255, 255)', '(1)'], {}), '(img, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 0.6, (\n 255, 255, 255), 1)\n', (2238, 2323), False, 'import cv2\n')] |
import numpy as np
from trimesh.voxel.ops import points_to_marching_cubes
from trimesh.voxel.ops import points_to_indices
class MeshOccupancy:
"""
Mesh which is backed by an occupancy function.
"""
def __init__(self, occupancy_function, iso_level, bounds, resolution = [64, 64, 64]):
self.occupancy_function = occupancy_function
self.iso_level = iso_level
self.bounds = bounds
self.lower = bounds[0]
self.upper = bounds[1]
self.resolution = resolution
self.mesh = None
self.calculate_voxels()
def calculate_voxel_matrix(self, max):
"""
:param max:
:return:
"""
return
def calculate_voxels(self):
x_range = np.linspace(self.lower[0], self.upper[0], self.resolution[0])
y_range = np.linspace(self.lower[1], self.upper[1], self.resolution[1])
z_range = np.linspace(self.lower[2], self.upper[2], self.resolution[2])
xx, yy, zz = np.meshgrid(x_range, y_range, z_range)
xx = xx.flatten()
yy = yy.flatten()
zz = zz.flatten()
points = np.array([xx, yy, zz]).reshape((xx.shape[0], 3))
occupancy_mask = self.occupancy_function.evaluate_set(points)
inside_points = points[occupancy_mask > self.iso_level]
indices = points_to_indices(inside_points, pitch=1.0, origin=np.array([0.0, 0.0, 0.0]))
self.mesh = points_to_marching_cubes(inside_points * 32, pitch=1.0)
| [
"numpy.array",
"numpy.meshgrid",
"numpy.linspace",
"trimesh.voxel.ops.points_to_marching_cubes"
] | [((758, 819), 'numpy.linspace', 'np.linspace', (['self.lower[0]', 'self.upper[0]', 'self.resolution[0]'], {}), '(self.lower[0], self.upper[0], self.resolution[0])\n', (769, 819), True, 'import numpy as np\n'), ((838, 899), 'numpy.linspace', 'np.linspace', (['self.lower[1]', 'self.upper[1]', 'self.resolution[1]'], {}), '(self.lower[1], self.upper[1], self.resolution[1])\n', (849, 899), True, 'import numpy as np\n'), ((918, 979), 'numpy.linspace', 'np.linspace', (['self.lower[2]', 'self.upper[2]', 'self.resolution[2]'], {}), '(self.lower[2], self.upper[2], self.resolution[2])\n', (929, 979), True, 'import numpy as np\n'), ((1002, 1040), 'numpy.meshgrid', 'np.meshgrid', (['x_range', 'y_range', 'z_range'], {}), '(x_range, y_range, z_range)\n', (1013, 1040), True, 'import numpy as np\n'), ((1437, 1492), 'trimesh.voxel.ops.points_to_marching_cubes', 'points_to_marching_cubes', (['(inside_points * 32)'], {'pitch': '(1.0)'}), '(inside_points * 32, pitch=1.0)\n', (1461, 1492), False, 'from trimesh.voxel.ops import points_to_marching_cubes\n'), ((1137, 1159), 'numpy.array', 'np.array', (['[xx, yy, zz]'], {}), '([xx, yy, zz])\n', (1145, 1159), True, 'import numpy as np\n'), ((1389, 1414), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1397, 1414), True, 'import numpy as np\n')] |
#!/usr/bin/python3
# Roding: utf-8
""" Extract frames from video """
import argparse
import logging as log
from pathlib import Path
import cv2
import numpy as np
from tqdm import tqdm
def get_desired_frames(length, n_frames, uniform=True):
if uniform:
interval = int((length) / n_frames)
desired_frames = np.arange(interval, length, interval)
return desired_frames
X1 = np.random.normal(
loc=length / 4, scale=length / 4, size=int(n_frames / 2))
X1 = X1.astype(int)
X2 = np.random.normal(loc=length / 2 + length / 4,
scale=length / 4, size=int(n_frames / 2))
X2 = X2.astype(int)
X = np.hstack((X1, X2))
return X
def write(image, out_dir, episode, index):
out_dir = Path(out_dir / episode)
if not out_dir.exists():
out_dir.mkdir()
frame_name = "{0}.jpg".format(index)
cv2.imwrite(str(out_dir / frame_name), image)
def extract_frames(video_file, out_dir, n_frames=10, uniform=True, episode=''):
cap = cv2.VideoCapture(video_file)
_, image = cap.read()
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
desired_frames = get_desired_frames(length, n_frames, uniform=uniform)
n = len(desired_frames)
for i, index in tqdm(zip(desired_frames, range(n)),
total=n, unit="frames"):
cap.set(1, i - 1)
_, image = cap.read(1)
cap.get(1)
write(image, out_dir, episode, index)
def parse_args():
parser = argparse.ArgumentParser(description='process args')
parser.add_argument('video_file',
help='path to videofile')
parser.add_argument('-n', '--n_frames', type=int, default=10,
help='amount of frames')
parser.add_argument('-e', '--episode', dest='episode', default='default',
help='episode counter')
parser.add_argument('-o', '--out', dest='out_dir',
default='../frames/', help='output dir')
parser.add_argument('-v', '--verbose', dest="verbose", action='store_true')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if args.verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
log.info("Verbose output.")
else:
log.basicConfig(format="%(levelname)s: %(message)s")
# Create out_dir
out_dir = Path(args.out_dir)
if not out_dir.exists():
out_dir.mkdir()
# Extract images
extract_frames(
args.video_file, out_dir, episode=Path(args.video_file).stem,
n_frames=args.n_frames, uniform=True)
| [
"logging.basicConfig",
"argparse.ArgumentParser",
"pathlib.Path",
"numpy.hstack",
"cv2.VideoCapture",
"logging.info",
"numpy.arange"
] | [((669, 688), 'numpy.hstack', 'np.hstack', (['(X1, X2)'], {}), '((X1, X2))\n', (678, 688), True, 'import numpy as np\n'), ((761, 784), 'pathlib.Path', 'Path', (['(out_dir / episode)'], {}), '(out_dir / episode)\n', (765, 784), False, 'from pathlib import Path\n'), ((1021, 1049), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_file'], {}), '(video_file)\n', (1037, 1049), False, 'import cv2\n'), ((1492, 1543), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""process args"""'}), "(description='process args')\n", (1515, 1543), False, 'import argparse\n'), ((2415, 2433), 'pathlib.Path', 'Path', (['args.out_dir'], {}), '(args.out_dir)\n', (2419, 2433), False, 'from pathlib import Path\n'), ((329, 366), 'numpy.arange', 'np.arange', (['interval', 'length', 'interval'], {}), '(interval, length, interval)\n', (338, 366), True, 'import numpy as np\n'), ((2202, 2271), 'logging.basicConfig', 'log.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'log.DEBUG'}), "(format='%(levelname)s: %(message)s', level=log.DEBUG)\n", (2217, 2271), True, 'import logging as log\n'), ((2280, 2307), 'logging.info', 'log.info', (['"""Verbose output."""'], {}), "('Verbose output.')\n", (2288, 2307), True, 'import logging as log\n'), ((2326, 2378), 'logging.basicConfig', 'log.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""'}), "(format='%(levelname)s: %(message)s')\n", (2341, 2378), True, 'import logging as log\n'), ((2571, 2592), 'pathlib.Path', 'Path', (['args.video_file'], {}), '(args.video_file)\n', (2575, 2592), False, 'from pathlib import Path\n')] |
# hyperparameter optimization
from ray import tune
from ray.tune.schedulers import ASHAScheduler
from model import GRU
# set processing device
import torch
import torch.nn as nn
device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu")
# data
from torchtext.legacy.data import Field, TabularDataset, BucketIterator
import torch.optim as optim
from ray.tune import CLIReporter
import os
import numpy as np
from functools import partial
from loadData import declareFields
def save_metrics(save_path, train_loss_list, valid_loss_list, global_steps_list):
if save_path == None:
return
state_dict = {'train_loss_list': train_loss_list,
'valid_loss_list': valid_loss_list,
'global_steps_list': global_steps_list}
torch.save(state_dict, save_path)
print(f'Model saved to ==> {save_path}')
def load_metrics(load_path):
if load_path==None:
return
state_dict = torch.load(load_path, map_location=device)
print(f'Model loaded from <== {load_path}')
return state_dict['train_loss_list'], state_dict['valid_loss_list'], state_dict['global_steps_list']
# Tokenize peptide sequences by splitting into individual amino acids
def split(sequence):
return [char for char in sequence]
def load_data(fields, root_dir="./"):
# Create simple TabularDatasets from train/test CSV
trainData, testData = TabularDataset.splits(path=root_dir, train="train.csv", test="test.csv", format='CSV', fields=fields, skip_header=True)
return trainData, testData
def optimizeHyperparameters(config, data_dir, checkpoint_dir=None, out_dir='./output', amyloid=True):
# initialize running values
running_loss = 0.0
valid_running_loss = 0.0
global_step = 0
train_loss_list = []
valid_loss_list = []
global_steps_list = []
fields, sequence = declareFields(train=True)
trainData, testData = load_data(fields, data_dir)
# create iterators for current fold
# sort by sequence length to keep batches consistent
train_iter = BucketIterator(trainData, batch_size=config['batch_size'], sort_key=lambda x: len(x.sequence),
device=device, sort=True, sort_within_batch=True)
test_iter = BucketIterator(testData, batch_size=config['batch_size'], sort_key=lambda x: len(x.sequence),
device=device, sort=True, sort_within_batch=True)
sequence.build_vocab(trainData)
eval_every=len(train_iter) // 2
# initialize model
model = GRU(vocab=sequence.vocab, dimension=config['dimension'], sequenceDepth=config['sequence_feature_depth'], dropoutWithinLayers=config['dropout_within_layers'], dropoutOutput=config['dropout_output'])
# load optimizer
optimizer = optim.Adam(model.parameters(), lr=config['learning_rate'])
criterion = nn.SmoothL1Loss()
# training loop
model.train()
for epoch in range(config['number_of_epochs']):
for (header, (sequence, sequence_len), prionLabel, amyloidLabel), _ in train_iter:
prionLabel = prionLabel.to(device)
amyloidLabel = amyloidLabel.to(device)
sequence = sequence.to(device)
sequence_len = sequence_len.to(device)
model.to(device)
output = model(sequence, sequence_len.cpu())
amyloidLoss = criterion(output, amyloidLabel)
prionLoss = criterion(output, prionLabel)
loss = amyloidLoss if amyloid else prionLoss
optimizer.zero_grad()
loss.backward()
optimizer.step()
# update running values
running_loss += loss.item()
global_step += 1
# evaluation for this epoch
if global_step % eval_every == 0:
model.eval()
with torch.no_grad():
# validation loop
for (header, (sequence, sequence_len), prionLabel, amyloidLabel), _ in test_iter:
prionLabel = prionLabel.to(device)
amyloidLabel = amyloidLabel.to(device)
sequence = sequence.to(device)
sequence_len = sequence_len.to(device)
output = model(sequence, sequence_len.cpu())
amyloidLoss = criterion(output, amyloidLabel)
prionLoss = criterion(output, prionLabel)
loss = amyloidLoss if amyloid else prionLoss
valid_running_loss += loss.item()
# record loss
average_train_loss = running_loss / eval_every
average_valid_loss = valid_running_loss / len(test_iter)
train_loss_list.append(average_train_loss)
valid_loss_list.append(average_valid_loss)
global_steps_list.append(global_step)
with tune.checkpoint_dir(epoch) as file_path:
path = os.path.join(file_path, "checkpoint")
torch.save((model.state_dict(), optimizer.state_dict()), path)
# resetting running values
valid_running_loss = 0.0
running_loss = 0.0
model.train()
# print progress
print('Epoch [{}/{}], Step [{}/{}], Train Loss: {:.4f}, Valid Loss: {:.4f}'
.format(epoch+1, config['number_of_epochs'], global_step, config['number_of_epochs']*len(train_iter),
average_train_loss, average_valid_loss))
tune.report(valid_loss=average_valid_loss)
save_metrics(file_path + f'/metrics.pt', train_loss_list, valid_loss_list, global_steps_list)
def main(num_samples=10, max_num_epochs=10, gpus_per_trial=1):
data_dir = os.path.abspath("./")
fields, sequence = declareFields(train=True)
config = {
"number_of_epochs": tune.sample_from(lambda _: np.random.randint(2, 25)),
"dropout_within_layers": tune.sample_from(lambda _: np.random.uniform(high = 0.5)),
"dropout_output": tune.sample_from(lambda _: np.random.uniform(high = 0.5)),
"sequence_feature_depth": tune.sample_from(lambda _: np.random.randint(16, 192)),
"dimension": tune.sample_from(lambda _: np.random.randint(16, 192)),
"learning_rate": tune.loguniform(1e-5, 1e-2),
"batch_size": tune.choice([2, 4, 8, 16, 32, 64]),
"num_gpu": 1,
"num_workers": 2
}
scheduler = ASHAScheduler(
metric="valid_loss",
mode="min",
max_t=max_num_epochs,
grace_period=1,
reduction_factor=2)
reporter = CLIReporter(
# parameter_columns=["l1", "l2", "lr", "batch_size"],
metric_columns=["valid_loss", "training_iteration"])
result = tune.run(
partial(optimizeHyperparameters, data_dir=data_dir),
resources_per_trial={"cpu": 2, "gpu": gpus_per_trial},
config=config,
num_samples=num_samples,
scheduler=scheduler,
progress_reporter=reporter)
best_trial = result.get_best_trial("valid_loss", "min", "last")
print("Best trial config: {}".format(best_trial.config))
print("Best trial final validation loss: {}".format(
best_trial.last_result["valid_loss"]))
# best_trained_model = GRU(vocab=sequence.vocab, dimension=best_trial.config['dimension'], sequenceDepth=best_trial.config['sequence_feature_depth'], dropoutWithinLayers=best_trial.config['dropout_within_layers'], dropoutOutput=best_trial.config['dropout_output'])
# device = "cpu"
# if torch.cuda.is_available():
# device = "cuda:0"
# if gpus_per_trial > 1:
# best_trained_model = nn.DataParallel(best_trained_model)
# best_trained_model.to(device)
# best_checkpoint_dir = best_trial.checkpoint.value
# model_state, optimizer_state = torch.load(os.path.join(
# best_checkpoint_dir, "checkpoint"))
# best_trained_model.load_state_dict(model_state)
if __name__ == "__main__":
# You can change the number of GPUs per trial here:
main(num_samples=30, max_num_epochs=10, gpus_per_trial=1)
print('Finished Optimizing Hyperparameters!') | [
"ray.tune.report",
"torchtext.legacy.data.TabularDataset.splits",
"torch.cuda.is_available",
"ray.tune.checkpoint_dir",
"ray.tune.loguniform",
"ray.tune.CLIReporter",
"ray.tune.choice",
"torch.save",
"model.GRU",
"ray.tune.schedulers.ASHAScheduler",
"torch.nn.SmoothL1Loss",
"torch.device",
"... | [((214, 239), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (237, 239), False, 'import torch\n'), ((188, 210), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (200, 210), False, 'import torch\n'), ((245, 264), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (257, 264), False, 'import torch\n'), ((809, 842), 'torch.save', 'torch.save', (['state_dict', 'save_path'], {}), '(state_dict, save_path)\n', (819, 842), False, 'import torch\n'), ((981, 1023), 'torch.load', 'torch.load', (['load_path'], {'map_location': 'device'}), '(load_path, map_location=device)\n', (991, 1023), False, 'import torch\n'), ((1435, 1558), 'torchtext.legacy.data.TabularDataset.splits', 'TabularDataset.splits', ([], {'path': 'root_dir', 'train': '"""train.csv"""', 'test': '"""test.csv"""', 'format': '"""CSV"""', 'fields': 'fields', 'skip_header': '(True)'}), "(path=root_dir, train='train.csv', test='test.csv',\n format='CSV', fields=fields, skip_header=True)\n", (1456, 1558), False, 'from torchtext.legacy.data import Field, TabularDataset, BucketIterator\n'), ((1898, 1923), 'loadData.declareFields', 'declareFields', ([], {'train': '(True)'}), '(train=True)\n', (1911, 1923), False, 'from loadData import declareFields\n'), ((2561, 2768), 'model.GRU', 'GRU', ([], {'vocab': 'sequence.vocab', 'dimension': "config['dimension']", 'sequenceDepth': "config['sequence_feature_depth']", 'dropoutWithinLayers': "config['dropout_within_layers']", 'dropoutOutput': "config['dropout_output']"}), "(vocab=sequence.vocab, dimension=config['dimension'], sequenceDepth=\n config['sequence_feature_depth'], dropoutWithinLayers=config[\n 'dropout_within_layers'], dropoutOutput=config['dropout_output'])\n", (2564, 2768), False, 'from model import GRU\n'), ((2872, 2889), 'torch.nn.SmoothL1Loss', 'nn.SmoothL1Loss', ([], {}), '()\n', (2887, 2889), True, 'import torch.nn as nn\n'), ((5956, 5977), 'os.path.abspath', 'os.path.abspath', (['"""./"""'], {}), "('./')\n", (5971, 5977), False, 'import os\n'), ((6005, 6030), 'loadData.declareFields', 'declareFields', ([], {'train': '(True)'}), '(train=True)\n', (6018, 6030), False, 'from loadData import declareFields\n'), ((6658, 6766), 'ray.tune.schedulers.ASHAScheduler', 'ASHAScheduler', ([], {'metric': '"""valid_loss"""', 'mode': '"""min"""', 'max_t': 'max_num_epochs', 'grace_period': '(1)', 'reduction_factor': '(2)'}), "(metric='valid_loss', mode='min', max_t=max_num_epochs,\n grace_period=1, reduction_factor=2)\n", (6671, 6766), False, 'from ray.tune.schedulers import ASHAScheduler\n'), ((6819, 6883), 'ray.tune.CLIReporter', 'CLIReporter', ([], {'metric_columns': "['valid_loss', 'training_iteration']"}), "(metric_columns=['valid_loss', 'training_iteration'])\n", (6830, 6883), False, 'from ray.tune import CLIReporter\n'), ((6502, 6530), 'ray.tune.loguniform', 'tune.loguniform', (['(1e-05)', '(0.01)'], {}), '(1e-05, 0.01)\n', (6517, 6530), False, 'from ray import tune\n'), ((6553, 6587), 'ray.tune.choice', 'tune.choice', (['[2, 4, 8, 16, 32, 64]'], {}), '([2, 4, 8, 16, 32, 64])\n', (6564, 6587), False, 'from ray import tune\n'), ((6986, 7037), 'functools.partial', 'partial', (['optimizeHyperparameters'], {'data_dir': 'data_dir'}), '(optimizeHyperparameters, data_dir=data_dir)\n', (6993, 7037), False, 'from functools import partial\n'), ((5724, 5766), 'ray.tune.report', 'tune.report', ([], {'valid_loss': 'average_valid_loss'}), '(valid_loss=average_valid_loss)\n', (5735, 5766), False, 'from ray import tune\n'), ((6106, 6130), 'numpy.random.randint', 'np.random.randint', (['(2)', '(25)'], {}), '(2, 25)\n', (6123, 6130), True, 'import numpy as np\n'), ((6193, 6220), 'numpy.random.uniform', 'np.random.uniform', ([], {'high': '(0.5)'}), '(high=0.5)\n', (6210, 6220), True, 'import numpy as np\n'), ((6278, 6305), 'numpy.random.uniform', 'np.random.uniform', ([], {'high': '(0.5)'}), '(high=0.5)\n', (6295, 6305), True, 'import numpy as np\n'), ((6371, 6397), 'numpy.random.randint', 'np.random.randint', (['(16)', '(192)'], {}), '(16, 192)\n', (6388, 6397), True, 'import numpy as np\n'), ((6448, 6474), 'numpy.random.randint', 'np.random.randint', (['(16)', '(192)'], {}), '(16, 192)\n', (6465, 6474), True, 'import numpy as np\n'), ((3877, 3892), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3890, 3892), False, 'import torch\n'), ((5028, 5054), 'ray.tune.checkpoint_dir', 'tune.checkpoint_dir', (['epoch'], {}), '(epoch)\n', (5047, 5054), False, 'from ray import tune\n'), ((5096, 5133), 'os.path.join', 'os.path.join', (['file_path', '"""checkpoint"""'], {}), "(file_path, 'checkpoint')\n", (5108, 5133), False, 'import os\n')] |
#!/usr/bin/env python3
import numpy as np
from . import tshark
def get_result(input_files, filter):
time_list = []
for file in input_files:
cmd_result = tshark.fields(file, filter, ['frame.time_delta_displayed'])
time_list.extend([float(result) for result in cmd_result])
if len(time_list) > 0:
freq, intervals = np.histogram(time_list, 100)
freq, intervals = freq.tolist(), intervals.tolist()
else:
freq, intervals = [], []
return {'freq': freq, 'intervals': intervals}
| [
"numpy.histogram"
] | [((353, 381), 'numpy.histogram', 'np.histogram', (['time_list', '(100)'], {}), '(time_list, 100)\n', (365, 381), True, 'import numpy as np\n')] |
from torch.utils.data import Dataset
from utils.visual_augmentation import ColorDistort, pixel_jitter
import numpy as np
import copy
import json
import random
import cv2
from utils.augmentation import Rotate_aug, Affine_aug, Mirror, Padding_aug, Img_dropout
from utils.headpose import get_head_pose
import time
from utils.turbo.TurboJPEG import TurboJPEG
jpeg = TurboJPEG()
symmetry = [(0, 16), (1, 15), (2, 14), (3, 13), (4, 12), (5, 11), (6, 10), (7, 9), (8, 8),
(17, 26), (18, 25), (19, 24), (20, 23), (21, 22),
(31, 35), (32, 34),
(36, 45), (37, 44), (38, 43), (39, 42), (40, 47), (41, 46),
(48, 54), (49, 53), (50, 52), (55, 59), (56, 58), (60, 64), (61, 63), (65, 67)]
base_extend_range = [0.2, 0.3]
class data_info(object):
def __init__(self, ann_json):
self.ann_json = ann_json
self.metas = []
self.load_anns()
def load_anns(self):
with open(self.ann_json, 'r') as f:
train_json_list = json.load(f)
self.metas = train_json_list
def get_all_sample(self):
random.shuffle(self.metas)
return self.metas
class Landmark(Dataset):
def __init__(self, ann_file, input_size=(96, 96), training_flag=True):
super(Landmark, self).__init__()
self.counter = 0
self.time_counter = 0
self.training_flag = training_flag
self.raw_data_set_size = None
self.color_augmentor = ColorDistort()
self.lst = self.parse_file(ann_file)
self.input_size = input_size
def __getitem__(self, item):
"""Data augmentation function."""
dp = self.lst[item]
fname = dp['image_path']
keypoints = dp['keypoints']
bbox = dp['bbox']
if keypoints is not None:
if ".jpg" in fname:
image = jpeg.imread(fname)
# image = cv2.imread(fname)
else:
image = cv2.imread(fname)
label = np.array(keypoints, dtype=np.float).reshape((-1, 2))
bbox = np.array(bbox)
crop_image, label = self.augmentationCropImage(image, bbox, label, self.training_flag)
if self.training_flag:
if random.uniform(0, 1) > 0.5:
crop_image, label = Mirror(crop_image, label=label, symmetry=symmetry)
if random.uniform(0, 1) > 0.0:
angle = random.uniform(-45, 45)
crop_image, label = Rotate_aug(crop_image, label=label, angle=angle)
if random.uniform(0, 1) > 0.5:
strength = random.uniform(0, 50)
crop_image, label = Affine_aug(crop_image, strength=strength, label=label)
if random.uniform(0, 1) > 0.5:
crop_image = self.color_augmentor(crop_image)
if random.uniform(0, 1) > 0.5:
crop_image = pixel_jitter(crop_image, 15)
if random.uniform(0, 1) > 0.5:
crop_image = Img_dropout(crop_image, 0.2)
if random.uniform(0, 1) > 0.5:
crop_image = Padding_aug(crop_image, 0.3)
reprojectdst, euler_angle = get_head_pose(label, crop_image)
PRY = euler_angle.reshape([-1]).astype(np.float32) / 90.
cla_label = np.zeros([4])
if dp['left_eye_close']:
cla_label[0] = 1
if dp['right_eye_close']:
cla_label[1] = 1
if dp['mouth_close']:
cla_label[2] = 1
if dp['big_mouth_open']:
cla_label[3] = 1
crop_image_height, crop_image_width, _ = crop_image.shape
# for point in label:
# crop_image = cv2.circle(crop_image, tuple(point.astype(np.int)), 3, (255, 0, 0), -1, 1)
# cv2.imshow("", crop_image)
# cv2.waitKey()
label = label.astype(np.float32)
label[:, 0] = label[:, 0] / crop_image_width
label[:, 1] = label[:, 1] / crop_image_height
crop_image = crop_image.astype(np.float32)
label = label.reshape([-1]).astype(np.float32)
cla_label = cla_label.astype(np.float32)
label = np.concatenate([label, PRY, cla_label], axis=0)
crop_image = (crop_image - 127.0) / 127.0
crop_image = np.transpose(crop_image, (2, 0, 1)).astype(np.float32)
return crop_image, label
def __len__(self):
return len(self.lst)
def parse_file(self, ann_file):
ann_info = data_info(ann_file)
all_samples = ann_info.get_all_sample()
self.raw_data_set_size = len(all_samples)
print("Raw Samples: " + str(self.raw_data_set_size))
if self.training_flag:
balanced_samples = self.balance(all_samples)
print("Balanced Samples: " + str(len(balanced_samples)))
# balanced_samples = all_samples
pass
else:
balanced_samples = all_samples
return balanced_samples
def balance(self, anns):
res_anns = copy.deepcopy(anns)
lar_count = 0
for ann in anns:
if ann['keypoints'] is not None:
bbox = ann['bbox']
bbox_width = bbox[2] - bbox[0]
bbox_height = bbox[3] - bbox[1]
if bbox_width < 50 or bbox_height < 50:
res_anns.remove(ann)
left_eye_close = ann['left_eye_close']
right_eye_close = ann['right_eye_close']
if left_eye_close or right_eye_close:
for i in range(10):
res_anns.append(ann)
if ann['small_eye_distance']:
for i in range(20):
res_anns.append(ann)
if ann['small_mouth_open']:
for i in range(20):
res_anns.append(ann)
if ann['big_mouth_open']:
for i in range(50):
res_anns.append(ann)
if left_eye_close and not right_eye_close:
for i in range(40):
res_anns.append(ann)
lar_count += 1
if not left_eye_close and right_eye_close:
for i in range(40):
res_anns.append(ann)
lar_count += 1
return res_anns
def augmentationCropImage(self, img, bbox, joints=None, is_training=True):
bbox = np.array(bbox).reshape(4, ).astype(np.float32)
add = max(img.shape[0], img.shape[1])
bimg = cv2.copyMakeBorder(img, add, add, add, add, borderType=cv2.BORDER_CONSTANT, value=[127., 127., 127.])
objcenter = np.array([(bbox[0] + bbox[2]) / 2., (bbox[1] + bbox[3]) / 2.])
bbox += add
objcenter += add
joints[:, :2] += add
gt_width = (bbox[2] - bbox[0])
gt_height = (bbox[3] - bbox[1])
crop_width_half = gt_width * (1 + base_extend_range[0] * 2) // 2
crop_height_half = gt_height * (1 + base_extend_range[1] * 2) // 2
if is_training:
min_x = int(objcenter[0] - crop_width_half + \
random.uniform(-base_extend_range[0], base_extend_range[0]) * gt_width)
max_x = int(objcenter[0] + crop_width_half + \
random.uniform(-base_extend_range[0], base_extend_range[0]) * gt_width)
min_y = int(objcenter[1] - crop_height_half + \
random.uniform(-base_extend_range[1], base_extend_range[1]) * gt_height)
max_y = int(objcenter[1] + crop_height_half + \
random.uniform(-base_extend_range[1], base_extend_range[1]) * gt_height)
else:
min_x = int(objcenter[0] - crop_width_half)
max_x = int(objcenter[0] + crop_width_half)
min_y = int(objcenter[1] - crop_height_half)
max_y = int(objcenter[1] + crop_height_half)
joints[:, 0] = joints[:, 0] - min_x
joints[:, 1] = joints[:, 1] - min_y
img = bimg[min_y:max_y, min_x:max_x, :]
crop_image_height, crop_image_width, _ = img.shape
joints[:, 0] = joints[:, 0] / crop_image_width
joints[:, 1] = joints[:, 1] / crop_image_height
interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST,
cv2.INTER_LANCZOS4]
interp_method = random.choice(interp_methods)
img = cv2.resize(img, (self.input_size[0], self.input_size[1]), interpolation=interp_method)
joints[:, 0] = joints[:, 0] * self.input_size[0]
joints[:, 1] = joints[:, 1] * self.input_size[1]
return img, joints
| [
"utils.visual_augmentation.pixel_jitter",
"numpy.array",
"utils.visual_augmentation.ColorDistort",
"copy.deepcopy",
"utils.turbo.TurboJPEG.TurboJPEG",
"utils.headpose.get_head_pose",
"utils.augmentation.Affine_aug",
"utils.augmentation.Img_dropout",
"numpy.concatenate",
"random.uniform",
"random... | [((363, 374), 'utils.turbo.TurboJPEG.TurboJPEG', 'TurboJPEG', ([], {}), '()\n', (372, 374), False, 'from utils.turbo.TurboJPEG import TurboJPEG\n'), ((1088, 1114), 'random.shuffle', 'random.shuffle', (['self.metas'], {}), '(self.metas)\n', (1102, 1114), False, 'import random\n'), ((1451, 1465), 'utils.visual_augmentation.ColorDistort', 'ColorDistort', ([], {}), '()\n', (1463, 1465), False, 'from utils.visual_augmentation import ColorDistort, pixel_jitter\n'), ((5102, 5121), 'copy.deepcopy', 'copy.deepcopy', (['anns'], {}), '(anns)\n', (5115, 5121), False, 'import copy\n'), ((6664, 6772), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['img', 'add', 'add', 'add', 'add'], {'borderType': 'cv2.BORDER_CONSTANT', 'value': '[127.0, 127.0, 127.0]'}), '(img, add, add, add, add, borderType=cv2.BORDER_CONSTANT,\n value=[127.0, 127.0, 127.0])\n', (6682, 6772), False, 'import cv2\n'), ((6786, 6850), 'numpy.array', 'np.array', (['[(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0]'], {}), '([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0])\n', (6794, 6850), True, 'import numpy as np\n'), ((8510, 8539), 'random.choice', 'random.choice', (['interp_methods'], {}), '(interp_methods)\n', (8523, 8539), False, 'import random\n'), ((8554, 8645), 'cv2.resize', 'cv2.resize', (['img', '(self.input_size[0], self.input_size[1])'], {'interpolation': 'interp_method'}), '(img, (self.input_size[0], self.input_size[1]), interpolation=\n interp_method)\n', (8564, 8645), False, 'import cv2\n'), ((999, 1011), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1008, 1011), False, 'import json\n'), ((2052, 2066), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (2060, 2066), True, 'import numpy as np\n'), ((3203, 3235), 'utils.headpose.get_head_pose', 'get_head_pose', (['label', 'crop_image'], {}), '(label, crop_image)\n', (3216, 3235), False, 'from utils.headpose import get_head_pose\n'), ((3329, 3342), 'numpy.zeros', 'np.zeros', (['[4]'], {}), '([4])\n', (3337, 3342), True, 'import numpy as np\n'), ((4249, 4296), 'numpy.concatenate', 'np.concatenate', (['[label, PRY, cla_label]'], {'axis': '(0)'}), '([label, PRY, cla_label], axis=0)\n', (4263, 4296), True, 'import numpy as np\n'), ((1942, 1959), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (1952, 1959), False, 'import cv2\n'), ((4369, 4404), 'numpy.transpose', 'np.transpose', (['crop_image', '(2, 0, 1)'], {}), '(crop_image, (2, 0, 1))\n', (4381, 4404), True, 'import numpy as np\n'), ((1980, 2015), 'numpy.array', 'np.array', (['keypoints'], {'dtype': 'np.float'}), '(keypoints, dtype=np.float)\n', (1988, 2015), True, 'import numpy as np\n'), ((2221, 2241), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2235, 2241), False, 'import random\n'), ((2289, 2339), 'utils.augmentation.Mirror', 'Mirror', (['crop_image'], {'label': 'label', 'symmetry': 'symmetry'}), '(crop_image, label=label, symmetry=symmetry)\n', (2295, 2339), False, 'from utils.augmentation import Rotate_aug, Affine_aug, Mirror, Padding_aug, Img_dropout\n'), ((2359, 2379), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2373, 2379), False, 'import random\n'), ((2415, 2438), 'random.uniform', 'random.uniform', (['(-45)', '(45)'], {}), '(-45, 45)\n', (2429, 2438), False, 'import random\n'), ((2479, 2527), 'utils.augmentation.Rotate_aug', 'Rotate_aug', (['crop_image'], {'label': 'label', 'angle': 'angle'}), '(crop_image, label=label, angle=angle)\n', (2489, 2527), False, 'from utils.augmentation import Rotate_aug, Affine_aug, Mirror, Padding_aug, Img_dropout\n'), ((2547, 2567), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2561, 2567), False, 'import random\n'), ((2606, 2627), 'random.uniform', 'random.uniform', (['(0)', '(50)'], {}), '(0, 50)\n', (2620, 2627), False, 'import random\n'), ((2668, 2722), 'utils.augmentation.Affine_aug', 'Affine_aug', (['crop_image'], {'strength': 'strength', 'label': 'label'}), '(crop_image, strength=strength, label=label)\n', (2678, 2722), False, 'from utils.augmentation import Rotate_aug, Affine_aug, Mirror, Padding_aug, Img_dropout\n'), ((2742, 2762), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2756, 2762), False, 'import random\n'), ((2855, 2875), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2869, 2875), False, 'import random\n'), ((2916, 2944), 'utils.visual_augmentation.pixel_jitter', 'pixel_jitter', (['crop_image', '(15)'], {}), '(crop_image, 15)\n', (2928, 2944), False, 'from utils.visual_augmentation import ColorDistort, pixel_jitter\n'), ((2964, 2984), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2978, 2984), False, 'import random\n'), ((3025, 3053), 'utils.augmentation.Img_dropout', 'Img_dropout', (['crop_image', '(0.2)'], {}), '(crop_image, 0.2)\n', (3036, 3053), False, 'from utils.augmentation import Rotate_aug, Affine_aug, Mirror, Padding_aug, Img_dropout\n'), ((3073, 3093), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (3087, 3093), False, 'import random\n'), ((3134, 3162), 'utils.augmentation.Padding_aug', 'Padding_aug', (['crop_image', '(0.3)'], {}), '(crop_image, 0.3)\n', (3145, 3162), False, 'from utils.augmentation import Rotate_aug, Affine_aug, Mirror, Padding_aug, Img_dropout\n'), ((6556, 6570), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (6564, 6570), True, 'import numpy as np\n'), ((7257, 7316), 'random.uniform', 'random.uniform', (['(-base_extend_range[0])', 'base_extend_range[0]'], {}), '(-base_extend_range[0], base_extend_range[0])\n', (7271, 7316), False, 'import random\n'), ((7412, 7471), 'random.uniform', 'random.uniform', (['(-base_extend_range[0])', 'base_extend_range[0]'], {}), '(-base_extend_range[0], base_extend_range[0])\n', (7426, 7471), False, 'import random\n'), ((7568, 7627), 'random.uniform', 'random.uniform', (['(-base_extend_range[1])', 'base_extend_range[1]'], {}), '(-base_extend_range[1], base_extend_range[1])\n', (7582, 7627), False, 'import random\n'), ((7725, 7784), 'random.uniform', 'random.uniform', (['(-base_extend_range[1])', 'base_extend_range[1]'], {}), '(-base_extend_range[1], base_extend_range[1])\n', (7739, 7784), False, 'import random\n')] |
import numpy as np
import pytest
from sklego.common import flatten
from sklego.mixture import GMMClassifier
from sklego.testing import check_shape_remains_same_classifier
from tests.conftest import nonmeta_checks, general_checks, classifier_checks
@pytest.mark.parametrize("test_fn", flatten([
nonmeta_checks,
general_checks,
classifier_checks,
check_shape_remains_same_classifier
]))
def test_estimator_checks(test_fn):
clf = GMMClassifier()
test_fn(GMMClassifier.__name__, clf)
def test_obvious_usecase():
X = np.concatenate([np.random.normal(-10, 1, (100, 2)), np.random.normal(10, 1, (100, 2))])
y = np.concatenate([np.zeros(100), np.ones(100)])
assert (GMMClassifier().fit(X, y).predict(X) == y).all()
def test_value_error_threshold():
X = np.concatenate([np.random.normal(-10, 1, (100, 2)), np.random.normal(10, 1, (100, 2))])
y = np.concatenate([np.zeros(100), np.ones(100)])
with pytest.raises(ValueError):
GMMClassifier(megatondinosaurhead=1).fit(X, y)
| [
"numpy.random.normal",
"numpy.ones",
"sklego.mixture.GMMClassifier",
"sklego.common.flatten",
"numpy.zeros",
"pytest.raises"
] | [((450, 465), 'sklego.mixture.GMMClassifier', 'GMMClassifier', ([], {}), '()\n', (463, 465), False, 'from sklego.mixture import GMMClassifier\n'), ((287, 388), 'sklego.common.flatten', 'flatten', (['[nonmeta_checks, general_checks, classifier_checks,\n check_shape_remains_same_classifier]'], {}), '([nonmeta_checks, general_checks, classifier_checks,\n check_shape_remains_same_classifier])\n', (294, 388), False, 'from sklego.common import flatten\n'), ((943, 968), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (956, 968), False, 'import pytest\n'), ((561, 595), 'numpy.random.normal', 'np.random.normal', (['(-10)', '(1)', '(100, 2)'], {}), '(-10, 1, (100, 2))\n', (577, 595), True, 'import numpy as np\n'), ((597, 630), 'numpy.random.normal', 'np.random.normal', (['(10)', '(1)', '(100, 2)'], {}), '(10, 1, (100, 2))\n', (613, 630), True, 'import numpy as np\n'), ((657, 670), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (665, 670), True, 'import numpy as np\n'), ((672, 684), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (679, 684), True, 'import numpy as np\n'), ((808, 842), 'numpy.random.normal', 'np.random.normal', (['(-10)', '(1)', '(100, 2)'], {}), '(-10, 1, (100, 2))\n', (824, 842), True, 'import numpy as np\n'), ((844, 877), 'numpy.random.normal', 'np.random.normal', (['(10)', '(1)', '(100, 2)'], {}), '(10, 1, (100, 2))\n', (860, 877), True, 'import numpy as np\n'), ((904, 917), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (912, 917), True, 'import numpy as np\n'), ((919, 931), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (926, 931), True, 'import numpy as np\n'), ((978, 1014), 'sklego.mixture.GMMClassifier', 'GMMClassifier', ([], {'megatondinosaurhead': '(1)'}), '(megatondinosaurhead=1)\n', (991, 1014), False, 'from sklego.mixture import GMMClassifier\n'), ((699, 714), 'sklego.mixture.GMMClassifier', 'GMMClassifier', ([], {}), '()\n', (712, 714), False, 'from sklego.mixture import GMMClassifier\n')] |
import numpy as np
class MAX_POOL_LAYER:
"""MAX_POOL_LAYER only reduce dimensions of height and width by a factor.
It does not put max filter on same input twice i.e. stride = factor = kernel_dimension
"""
def __init__(self, **params):
self.factor = params.get('stride', 2)
def forward(self, X):
"""
Computes the forward pass of MaxPool Layer.
Input:
X: Input data of shape (N, D, H, W)
where, N = batch_size or number of images
H, W = Height and Width of input layer
D = Depth of input layer
"""
"""
Dokumentasi
input :
output :
"""
factor = self.factor
N, D, H, W = X.shape
#assert H%factor == 0 and W%factor == 0
self.cache = [X, factor]
self.feature_map = X.reshape(N, D, H//factor, factor, W//factor, factor).max(axis=(3,5))
#assert self.feature_map.shape == (N, D, H//factor, W//factor)
return self.feature_map, 0
def backward(self, delta):
"""
Computes the backward pass of MaxPool Layer.
Input:
delta: delta values of shape (N, D, H/factor, W/factor)
"""
"""
Dokumentasi
input :
output :
"""
X, factor = self.cache
if len(delta.shape) != 4: # then it must be 2
#assert delta.shape[0] == X.shape[0]
delta = delta.reshape(self.feature_map.shape)
fmap = np.repeat(np.repeat(self.feature_map, factor, axis=2), factor, axis=3)
dmap = np.repeat(np.repeat(delta, factor, axis=2), factor, axis=3)
#assert fmap.shape == X.shape and dmap.shape == X.shape
self.delta_X = np.zeros(X.shape)
#print(delta.shape)
#print(fmap.shape)
#print(dmap.shape)
#print(self.delta_X.shape)
self.delta_X = (fmap == X) * dmap
#assert self.delta_X.shape == X.shape
return self.delta_X
| [
"numpy.zeros",
"numpy.repeat"
] | [((1790, 1807), 'numpy.zeros', 'np.zeros', (['X.shape'], {}), '(X.shape)\n', (1798, 1807), True, 'import numpy as np\n'), ((1566, 1609), 'numpy.repeat', 'np.repeat', (['self.feature_map', 'factor'], {'axis': '(2)'}), '(self.feature_map, factor, axis=2)\n', (1575, 1609), True, 'import numpy as np\n'), ((1652, 1684), 'numpy.repeat', 'np.repeat', (['delta', 'factor'], {'axis': '(2)'}), '(delta, factor, axis=2)\n', (1661, 1684), True, 'import numpy as np\n')] |
# %load_ext autoreload
# %autoreload 2
import numpy as np
from pyhamimports import *
from spectrum import Spectrum
import glob
from tqdm import tqdm
from subprocess import check_output
datestr = check_output(["/bin/date","+%F"])
datestr = datestr.decode().replace('\n', '')
singleTemp_dir = "resources/templates/"
SB2Temp_dir = "resources/templates_SB2/"
singleTemp_list = np.array([os.path.basename(x)
for x in glob.glob(singleTemp_dir + "*.fits")])
singleTemp_list.sort()
SB2Temp_list = np.array([os.path.basename(x)
for x in glob.glob(SB2Temp_dir + "*.fits")])
SB2Temp_list.sort()
# 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 = O, B, A, F, G, K, M, L, C, WD
single_letter_specTypes = np.array(['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'C', 'D'])
specTypes = np.array(['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'dC', 'DA'])
new_tempLines_0 = np.empty(singleTemp_list.size, dtype=int)
new_tempLines_1 = np.empty(singleTemp_list.size, dtype=np.float64)
new_tempLines_2 = np.empty(singleTemp_list.size, dtype=np.float64)
new_tempLines_3 = np.ones(singleTemp_list.size, dtype=int) * 5
new_tempLines_4 = []
for ii in range(singleTemp_list.size):
new_tempLines_0[ii] = np.where(
single_letter_specTypes == singleTemp_list[ii][0])[0][0]
if new_tempLines_0[ii] == 9:
new_tempLines_1[ii] = spec.splitSpecType(singleTemp_list[ii].replace(".fits", ""))[1]
else:
new_tempLines_1[ii] = singleTemp_list[ii][1]
if len(singleTemp_list[ii].replace("_", " ").split()) == 1:
new_tempLines_2[ii] = 0.
else:
new_tempLines_2[ii] = np.float64(
singleTemp_list[ii].replace("_", " ").split()[1])
spec = Spectrum()
ftype = None
print("Measuring lines for single star templates:")
for ii in tqdm(range(singleTemp_list.size)):
message, ftype = spec.readFile(singleTemp_dir + singleTemp_list[ii], ftype)
spec._lines = spec.measureLines()
lines = np.array(list(spec._lines.values()))[
np.argsort(list(spec._lines.keys()))]
new_tempLines_4.append(lines)
SB2_index_start = new_tempLines_0.max() + 1 # 10
new_tempLines_0 = np.append(new_tempLines_0, np.arange(
SB2_index_start, SB2_index_start + SB2Temp_list.size, step=1))
new_tempLines_1 = np.append(new_tempLines_1, np.zeros(SB2Temp_list.size))
new_tempLines_2 = np.append(new_tempLines_2, np.zeros(SB2Temp_list.size))
new_tempLines_3 = np.append(new_tempLines_3, np.ones(SB2Temp_list.size) * 5)
# new_tempLines_4 = new_tempLines_4
spec = Spectrum()
ftype = None
print("Measuring lines for SB2 templates:")
for ii, filename in enumerate(tqdm(SB2Temp_list)):
# temp_list = []
message, ftype = spec.readFile(SB2Temp_dir + filename, ftype)
measuredLines = spec.measureLines()
spec._lines = measuredLines
lines = np.array(list(spec._lines.values()))[
np.argsort(list(spec._lines.keys()))]
linesLabels = np.array(list(spec._lines.keys()))[
np.argsort(list(spec._lines.keys()))]
# temp_list.append(lines)
new_tempLines_4.append(lines)
new_tempLines = [new_tempLines_0, new_tempLines_1,
new_tempLines_2, new_tempLines_3, new_tempLines_4]
pklPath = os.path.join(spec.thisDir, 'resources',
f'tempLines_{datestr}.pickle')
with open(pklPath, 'wb') as pklFile:
pickle.dump(new_tempLines, pklFile)
| [
"subprocess.check_output",
"numpy.ones",
"numpy.where",
"tqdm.tqdm",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"glob.glob",
"spectrum.Spectrum",
"numpy.arange"
] | [((198, 232), 'subprocess.check_output', 'check_output', (["['/bin/date', '+%F']"], {}), "(['/bin/date', '+%F'])\n", (210, 232), False, 'from subprocess import check_output\n'), ((733, 793), 'numpy.array', 'np.array', (["['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'C', 'D']"], {}), "(['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'C', 'D'])\n", (741, 793), True, 'import numpy as np\n'), ((806, 868), 'numpy.array', 'np.array', (["['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'dC', 'DA']"], {}), "(['O', 'B', 'A', 'F', 'G', 'K', 'M', 'L', 'dC', 'DA'])\n", (814, 868), True, 'import numpy as np\n'), ((888, 929), 'numpy.empty', 'np.empty', (['singleTemp_list.size'], {'dtype': 'int'}), '(singleTemp_list.size, dtype=int)\n', (896, 929), True, 'import numpy as np\n'), ((948, 996), 'numpy.empty', 'np.empty', (['singleTemp_list.size'], {'dtype': 'np.float64'}), '(singleTemp_list.size, dtype=np.float64)\n', (956, 996), True, 'import numpy as np\n'), ((1015, 1063), 'numpy.empty', 'np.empty', (['singleTemp_list.size'], {'dtype': 'np.float64'}), '(singleTemp_list.size, dtype=np.float64)\n', (1023, 1063), True, 'import numpy as np\n'), ((1698, 1708), 'spectrum.Spectrum', 'Spectrum', ([], {}), '()\n', (1706, 1708), False, 'from spectrum import Spectrum\n'), ((2510, 2520), 'spectrum.Spectrum', 'Spectrum', ([], {}), '()\n', (2518, 2520), False, 'from spectrum import Spectrum\n'), ((1082, 1122), 'numpy.ones', 'np.ones', (['singleTemp_list.size'], {'dtype': 'int'}), '(singleTemp_list.size, dtype=int)\n', (1089, 1122), True, 'import numpy as np\n'), ((2163, 2234), 'numpy.arange', 'np.arange', (['SB2_index_start', '(SB2_index_start + SB2Temp_list.size)'], {'step': '(1)'}), '(SB2_index_start, SB2_index_start + SB2Temp_list.size, step=1)\n', (2172, 2234), True, 'import numpy as np\n'), ((2286, 2313), 'numpy.zeros', 'np.zeros', (['SB2Temp_list.size'], {}), '(SB2Temp_list.size)\n', (2294, 2313), True, 'import numpy as np\n'), ((2360, 2387), 'numpy.zeros', 'np.zeros', (['SB2Temp_list.size'], {}), '(SB2Temp_list.size)\n', (2368, 2387), True, 'import numpy as np\n'), ((2608, 2626), 'tqdm.tqdm', 'tqdm', (['SB2Temp_list'], {}), '(SB2Temp_list)\n', (2612, 2626), False, 'from tqdm import tqdm\n'), ((2434, 2460), 'numpy.ones', 'np.ones', (['SB2Temp_list.size'], {}), '(SB2Temp_list.size)\n', (2441, 2460), True, 'import numpy as np\n'), ((445, 481), 'glob.glob', 'glob.glob', (["(singleTemp_dir + '*.fits')"], {}), "(singleTemp_dir + '*.fits')\n", (454, 481), False, 'import glob\n'), ((587, 620), 'glob.glob', 'glob.glob', (["(SB2Temp_dir + '*.fits')"], {}), "(SB2Temp_dir + '*.fits')\n", (596, 620), False, 'import glob\n'), ((1214, 1273), 'numpy.where', 'np.where', (['(single_letter_specTypes == singleTemp_list[ii][0])'], {}), '(single_letter_specTypes == singleTemp_list[ii][0])\n', (1222, 1273), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mxnet as mx
import numpy as np
import progressbar
import six
import math
from scipy.signal import savgol_filter
from scipy import interpolate
from .homography_estimator import HomographyEstimator
from .player_ball_tracker import PlayerBallTracker
class FootballTracker:
"""Class for the full Football Tracker. Given a list of images, it allows to track and id each player as well as the ball.
It also computes the homography at each given frame, and apply it to each player coordinates.
Arguments:
pretrained: Boolean, if the homography and tracking models should be pretrained with our weights or not.
weights_homo: Path to weight for the homography model
weights_keypoints: Path to weight for the keypoints model
shape_in: Shape of the input image
shape_out: Shape of the ouput image
conf_tresh: Confidence treshold to keep tracked bouding boxes
track_buffer: Number of frame to keep in memory for tracking reIdentification
K: Number of boxes to keep at each frames
frame_rate: -
Call arguments:
imgs: List of np.array (images) to track
split_size: if None, apply the tracking model to the full image. If its an int, the image shape must be divisible by this int.
We then split the image to create n smaller images of shape (split_size,split_size), and apply the model
to those.
We then reconstruct the full images and the full predictions.
results: list of previous results, to resume tracking
begin_frame: int, starting frame, if you want to resume tracking
verbose: Boolean, to display tracking at each frame or not
save_tracking_folder: Foler to save the tracking images
template: Football field, to warp it with the computed homographies on to the saved images
skip_homo: List of int. e.g.: [4,10] will not compute homography for frame 4 and 10, and reuse the computed homography
at frame 3 and 9.
enforce_keypoints: Bool. Force the use of the keypoints model. If we can't use it, we skip the frame instead of using the homography model.
homography_interpolation: Bool. If set to true, missing homography prediction will be computed with an interpolation. If set to false, we simply repeat the
last homography.
homography_processing: Boo. If set to true, we process the homography estimation with a laplacian filter overtime.
"""
def __init__(
self,
pretrained=True,
weights_homo=None,
weights_keypoints=None,
shape_in=512.0,
shape_out=320.0,
conf_tresh=0.5,
track_buffer=30,
K=100,
frame_rate=30,
ctx=None
):
self.player_ball_tracker = PlayerBallTracker(
conf_tresh=conf_tresh, track_buffer=track_buffer, K=K, frame_rate=frame_rate,ctx=ctx
)
self.homo_estimator = HomographyEstimator(
pretrained=pretrained,
weights_homo=weights_homo,
weights_keypoints=weights_keypoints,
shape_in=shape_in,
shape_out=shape_out,
)
def __call__(
self,
imgs,
split_size=None,
results=[],
begin_frame=0,
verbose=True,
save_tracking_folder=None,
template=None,
skip_homo=[],
enforce_keypoints = False,
homography_interpolation = False,
homography_processing = False
):
assert enforce_keypoints == homography_interpolation, "We only use homography interpolation with keypoint detection at the moment"
pred_homo, method = np.ones((3, 3)), "cv"
points, values, methods = [], [], []
for indx, input_img in progressbar.progressbar(enumerate(imgs)):
if indx in skip_homo:
if homography_interpolation:
continue
else:
points.append(indx + 1)
values.append(pred_homo)
methods.append(method)
else:
pred_homo, method = self.homo_estimator(input_img)
if (enforce_keypoints and method == 'torch'):
if homography_interpolation:
continue
else:
points.append(indx + 1)
values.append(values[-1])
methods.append(methods[-1])
else:
points.append(indx + 1)
values.append(pred_homo)
methods.append(method)
points = np.array(points)
values = np.array(values)
if homography_interpolation:
f = interpolate.interp1d(points, values,axis=0,fill_value = 'extrapolate')
points = np.arange(1,len(imgs)+1)
values = f(points)
methods = ["cv" for _ in range(len(imgs))]
if homography_processing:
values = savgol_filter(values,5,3,axis=0)
frame_to_homo = {}
for indx in range(len(imgs)):
frame_to_homo[indx + 1] = (values[indx], methods[indx])
results, frame_id = self.player_ball_tracker.get_tracking(
imgs,
results=results,
begin_frame=begin_frame,
verbose=verbose,
split_size=split_size,
save_tracking_folder=save_tracking_folder,
template=template,
frame_to_homo=frame_to_homo,
)
last_known_pos = {}
trajectories = {}
for result in progressbar.progressbar(results):
frame, colors, bboxes, id_entities = result[0], result[1], result[2], result[3]
pred_homo, method = frame_to_homo[frame]
for color, bbox, id_entity in zip(colors, bboxes, id_entities):
dst = self.homo_estimator.get_field_coordinates(bbox, pred_homo, method)
if np.isnan(dst[0]) or np.isnan(dst[1]):
if id_entity in last_known_pos.keys():
dst = last_known_pos[id_entity]
else:
dst = None
if dst is not None:
last_known_pos[id_entity] = [dst[0], dst[1]]
if id_entity in trajectories.keys():
trajectories[id_entity].append((dst[0], dst[1], frame, color))
else:
trajectories[id_entity] = [(dst[0], dst[1], frame, color)]
return trajectories
| [
"numpy.ones",
"scipy.signal.savgol_filter",
"progressbar.progressbar",
"scipy.interpolate.interp1d",
"numpy.array",
"numpy.isnan"
] | [((4900, 4916), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (4908, 4916), True, 'import numpy as np\n'), ((4934, 4950), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (4942, 4950), True, 'import numpy as np\n'), ((5919, 5951), 'progressbar.progressbar', 'progressbar.progressbar', (['results'], {}), '(results)\n', (5942, 5951), False, 'import progressbar\n'), ((3869, 3884), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (3876, 3884), True, 'import numpy as np\n'), ((5025, 5095), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['points', 'values'], {'axis': '(0)', 'fill_value': '"""extrapolate"""'}), "(points, values, axis=0, fill_value='extrapolate')\n", (5045, 5095), False, 'from scipy import interpolate\n'), ((5296, 5331), 'scipy.signal.savgol_filter', 'savgol_filter', (['values', '(5)', '(3)'], {'axis': '(0)'}), '(values, 5, 3, axis=0)\n', (5309, 5331), False, 'from scipy.signal import savgol_filter\n'), ((6283, 6299), 'numpy.isnan', 'np.isnan', (['dst[0]'], {}), '(dst[0])\n', (6291, 6299), True, 'import numpy as np\n'), ((6303, 6319), 'numpy.isnan', 'np.isnan', (['dst[1]'], {}), '(dst[1])\n', (6311, 6319), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
# import torch.nn.functional as F
import torch.optim as optim
import utils.utils as util
import utils.quantization as q
import numpy as np
import os, time, sys
import copy
import argparse
#########################
# supported model candidates
candidates = [
'binput-pg',
]
#########################
#----------------------------
# Argument parser.
#----------------------------
parser = argparse.ArgumentParser(description='PyTorch CIFAR-10 Training')
parser.add_argument('--model_id', '-id', type=int, default=0)
parser.add_argument('--gtarget', '-g', type=float, default=0.0)
parser.add_argument('--init_lr', '-lr', type=float, default=1e-3)
parser.add_argument('--batch_size', '-b', type=int, default=128)
parser.add_argument('--num_epoch', '-e', type=int, default=250)
parser.add_argument('--weight_decay', '-wd', type=float, default=1e-5)
parser.add_argument('--last_epoch', '-last', type=int, default=-1)
parser.add_argument('--finetune', '-f', action='store_true', help='finetune the model')
parser.add_argument('--save', '-s', action='store_true', help='save the model')
parser.add_argument('--test', '-t', action='store_true', help='test only')
parser.add_argument('--resume', '-r', type=str, default=None,
help='path of the model checkpoint for resuming training')
parser.add_argument('--data_dir', '-d', type=str, default='/tmp/cifar10_data',
help='path to the dataset directory')
parser.add_argument('--which_gpus', '-gpu', type=str, default='0', help='which gpus to use')
args = parser.parse_args()
_ARCH = candidates[args.model_id]
drop_last = True if 'binput' in _ARCH else False
#----------------------------
# Load the CIFAR-10 dataset.
#----------------------------
def load_cifar10():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform_train_list = [
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
]
transform_test_list = [transforms.ToTensor()]
if 'binput' not in _ARCH:
transform_train_list.append(normalize)
transform_test_list.append(normalize)
transform_train = transforms.Compose(transform_train_list)
transform_test = transforms.Compose(transform_test_list)
# pin_memory=True makes transfering data from host to GPU faster
trainset = torchvision.datasets.CIFAR10(root=args.data_dir, train=True,
download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size,
shuffle=True, num_workers=2,
pin_memory=True, drop_last=drop_last)
testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False,
download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.batch_size,
shuffle=False, num_workers=2,
pin_memory=True, drop_last=drop_last)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
return trainloader, testloader, classes
#----------------------------
# Define the model.
#----------------------------
def generate_model(model_arch):
if 'binput-pg' in model_arch:
import model.fracbnn_cifar10 as m
return m.resnet20(batch_size=args.batch_size, num_gpus=torch.cuda.device_count())
else:
raise NotImplementedError("Model architecture is not supported.")
#----------------------------
# Train the network.
#----------------------------
def train_model(trainloader, testloader, net,
optimizer, scheduler, start_epoch, device):
# define the loss function
criterion = (nn.CrossEntropyLoss().cuda()
if torch.cuda.is_available() else nn.CrossEntropyLoss())
best_acc = 0.0
best_model = copy.deepcopy(net.state_dict())
for epoch in range(start_epoch, args.num_epoch): # loop over the dataset multiple times
# set printing functions
batch_time = util.AverageMeter('Time/batch', ':.2f')
losses = util.AverageMeter('Loss', ':6.2f')
top1 = util.AverageMeter('Acc', ':6.2f')
progress = util.ProgressMeter(
len(trainloader),
[losses, top1, batch_time],
prefix="Epoch: [{}]".format(epoch+1)
)
# switch the model to the training mode
net.train()
print('current learning rate = {}'.format(optimizer.param_groups[0]['lr']))
# each epoch
end = time.time()
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data[0].to(device), data[1].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
if 'pg' in _ARCH:
for name, param in net.named_parameters():
if 'threshold' in name:
loss += (0.00001 * 0.5 *
torch.norm(param-args.gtarget) *
torch.norm(param-args.gtarget))
loss.backward()
optimizer.step()
# measure accuracy and record loss
_, batch_predicted = torch.max(outputs.data, 1)
batch_accu = 100.0 * (batch_predicted == labels).sum().item() / labels.size(0)
losses.update(loss.item(), labels.size(0))
top1.update(batch_accu, labels.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 100 == 99:
# print statistics every 100 mini-batches each epoch
progress.display(i) # i = batch id in the epoch
# update the learning rate
scheduler.step()
# print test accuracy every few epochs
if epoch % 1 == 0:
print('epoch {}'.format(epoch+1))
epoch_acc = test_accu(testloader, net, device)
if 'pg' in _ARCH:
sparsity(testloader, net, device)
if epoch_acc >= best_acc:
best_acc = epoch_acc
best_model = copy.deepcopy(net.state_dict())
print("The best test accuracy so far: {:.1f}".format(best_acc))
# save the model if required
if args.save:
print("Saving the trained model and states.")
this_file_path = os.path.dirname(os.path.abspath(__file__))
save_folder = os.path.join(this_file_path, 'save_CIFAR10_model')
util.save_models(best_model, save_folder,
suffix=_ARCH+'-finetune' if args.finetune else _ARCH)
"""
states = {'epoch':epoch+1,
'optimizer':optimizer.state_dict(),
'scheduler':scheduler.state_dict()}
util.save_states(states, save_folder, suffix=_ARCH)
"""
print('Finished Training')
#----------------------------
# Test accuracy.
#----------------------------
def test_accu(testloader, net, device):
correct = 0
total = 0
# switch the model to the evaluation mode
net.eval()
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = 100.0 * correct / total
print('Accuracy of the network on the 10000 test images: %.1f %%' % accuracy)
return accuracy
#----------------------------
# Report sparsity in PG
#----------------------------
def sparsity(testloader, net, device):
num_out, num_high = [], []
def _report_sparsity(m):
classname = m.__class__.__name__
if isinstance(m, q.PGBinaryConv2d):
num_out.append(m.num_out)
num_high.append(m.num_high)
net.eval()
# initialize cnt_out, cnt_high
net.apply(_report_sparsity)
cnt_out = np.zeros(len(num_out))
cnt_high = np.zeros(len(num_high))
num_out, num_high = [], []
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
""" calculate statistics per PG layer """
net.apply(_report_sparsity)
cnt_out += np.array(num_out)
cnt_high += np.array(num_high)
num_out = []
num_high = []
print('Sparsity of the update phase: %.1f %%' %
(100.0-np.sum(cnt_high)*1.0/np.sum(cnt_out)*100.0))
#----------------------------
# Remove the saved placeholder
#----------------------------
def remove_placeholder(state_dict):
from collections import OrderedDict
temp_state_dict = OrderedDict()
for key, value in state_dict.items():
if 'encoder.placeholder' in key:
pass
else:
temp_state_dict[key] = value
return temp_state_dict
#----------------------------
# Main function.
#----------------------------
def main():
os.environ["CUDA_VISIBLE_DEVICES"] = args.which_gpus
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Available GPUs: {}".format(torch.cuda.device_count()))
print("Create {} model.".format(_ARCH))
net = generate_model(_ARCH)
if torch.cuda.device_count() > 1:
# dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
print("Activate multi GPU support.")
net = nn.DataParallel(net)
net.to(device)
#------------------
# Load model params
#------------------
if args.resume is not None:
model_path = args.resume
if os.path.exists(model_path):
print("@ Load trained model from {}.".format(model_path))
state_dict = torch.load(model_path)
state_dict = remove_placeholder(state_dict)
net.load_state_dict(state_dict, strict=False)
else:
raise ValueError("Model not found.")
#-----------------
# Prepare Data
#-----------------
print("Loading the data.")
trainloader, testloader, classes = load_cifar10()
#-----------------
# Test
#-----------------
if args.test:
print("Mode: Test only.")
test_accu(testloader, net, device)
if 'pg' in _ARCH:
sparsity(testloader, net, device)
#-----------------
# Finetune
#-----------------
elif args.finetune:
print("num epochs = {}".format(args.num_epoch))
initial_lr = args.init_lr
print("init lr = {}".format(initial_lr))
optimizer = optim.Adam(net.parameters(),
lr = initial_lr,
weight_decay=0.)
lr_decay_milestones = [100, 150, 200]
print("milestones = {}".format(lr_decay_milestones))
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer,
milestones=lr_decay_milestones,
gamma=0.1,
last_epoch=args.last_epoch)
start_epoch=0
print("Start finetuning.")
train_model(trainloader, testloader, net,
optimizer, scheduler, start_epoch, device)
test_accu(testloader, net, device)
#-----------------
# Train
#-----------------
else:
print("num epochs = {}".format(args.num_epoch))
#-----------
# Optimizer
#-----------
initial_lr = args.init_lr
optimizer = optim.Adam(net.parameters(),
lr = initial_lr,
weight_decay=args.weight_decay)
#-----------
# Scheduler
#-----------
print("Use linear learning rate decay.")
lambda1 = lambda epoch : (1.0-epoch/args.num_epoch) # linear decay
#lambda1 = lambda epoch : (0.7**epoch) # exponential decay
scheduler = optim.lr_scheduler.LambdaLR(
optimizer,
lr_lambda=lambda1,
last_epoch=args.last_epoch)
start_epoch = 0
print("Start training.")
train_model(trainloader, testloader, net,
optimizer, scheduler, start_epoch, device)
test_accu(testloader, net, device)
if __name__ == "__main__":
main()
| [
"torch.optim.lr_scheduler.LambdaLR",
"torch.nn.CrossEntropyLoss",
"torch.optim.lr_scheduler.MultiStepLR",
"torch.max",
"torch.cuda.device_count",
"numpy.array",
"torch.cuda.is_available",
"os.path.exists",
"argparse.ArgumentParser",
"torchvision.transforms.ToTensor",
"collections.OrderedDict",
... | [((630, 694), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch CIFAR-10 Training"""'}), "(description='PyTorch CIFAR-10 Training')\n", (653, 694), False, 'import argparse\n'), ((2007, 2082), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2027, 2082), True, 'import torchvision.transforms as transforms\n'), ((2468, 2508), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_train_list'], {}), '(transform_train_list)\n', (2486, 2508), True, 'import torchvision.transforms as transforms\n'), ((2530, 2569), 'torchvision.transforms.Compose', 'transforms.Compose', (['transform_test_list'], {}), '(transform_test_list)\n', (2548, 2569), True, 'import torchvision.transforms as transforms\n'), ((2655, 2761), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'args.data_dir', 'train': '(True)', 'download': '(True)', 'transform': 'transform_train'}), '(root=args.data_dir, train=True, download=True,\n transform=transform_train)\n', (2683, 2761), False, 'import torchvision\n'), ((2820, 2957), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(2)', 'pin_memory': '(True)', 'drop_last': 'drop_last'}), '(trainset, batch_size=args.batch_size, shuffle=\n True, num_workers=2, pin_memory=True, drop_last=drop_last)\n', (2847, 2957), False, 'import torch\n'), ((3061, 3167), 'torchvision.datasets.CIFAR10', 'torchvision.datasets.CIFAR10', ([], {'root': 'args.data_dir', 'train': '(False)', 'download': '(True)', 'transform': 'transform_test'}), '(root=args.data_dir, train=False, download=True,\n transform=transform_test)\n', (3089, 3167), False, 'import torchvision\n'), ((3224, 3361), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(2)', 'pin_memory': '(True)', 'drop_last': 'drop_last'}), '(testset, batch_size=args.batch_size, shuffle=\n False, num_workers=2, pin_memory=True, drop_last=drop_last)\n', (3251, 3361), False, 'import torch\n'), ((9606, 9619), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9617, 9619), False, 'from collections import OrderedDict\n'), ((2157, 2190), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (2188, 2190), True, 'import torchvision.transforms as transforms\n'), ((2200, 2228), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)', '(4)'], {}), '(32, 4)\n', (2221, 2228), True, 'import torchvision.transforms as transforms\n'), ((2238, 2259), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2257, 2259), True, 'import torchvision.transforms as transforms\n'), ((2298, 2319), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2317, 2319), True, 'import torchvision.transforms as transforms\n'), ((4257, 4282), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4280, 4282), False, 'import torch\n'), ((4288, 4309), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4307, 4309), True, 'import torch.nn as nn\n'), ((4528, 4567), 'utils.utils.AverageMeter', 'util.AverageMeter', (['"""Time/batch"""', '""":.2f"""'], {}), "('Time/batch', ':.2f')\n", (4545, 4567), True, 'import utils.utils as util\n'), ((4585, 4619), 'utils.utils.AverageMeter', 'util.AverageMeter', (['"""Loss"""', '""":6.2f"""'], {}), "('Loss', ':6.2f')\n", (4602, 4619), True, 'import utils.utils as util\n'), ((4635, 4668), 'utils.utils.AverageMeter', 'util.AverageMeter', (['"""Acc"""', '""":6.2f"""'], {}), "('Acc', ':6.2f')\n", (4652, 4668), True, 'import utils.utils as util\n'), ((5087, 5098), 'time.time', 'time.time', ([], {}), '()\n', (5096, 5098), False, 'import os, time, sys\n'), ((7929, 7944), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7942, 7944), False, 'import torch\n'), ((8920, 8935), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8933, 8935), False, 'import torch\n'), ((10178, 10203), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (10201, 10203), False, 'import torch\n'), ((10341, 10361), 'torch.nn.DataParallel', 'nn.DataParallel', (['net'], {}), '(net)\n', (10356, 10361), True, 'import torch.nn as nn\n'), ((10530, 10556), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (10544, 10556), False, 'import os, time, sys\n'), ((5936, 5962), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (5945, 5962), False, 'import torch\n'), ((6264, 6275), 'time.time', 'time.time', ([], {}), '()\n', (6273, 6275), False, 'import os, time, sys\n'), ((8107, 8133), 'torch.max', 'torch.max', (['outputs.data', '(1)'], {}), '(outputs.data, 1)\n', (8116, 8133), False, 'import torch\n'), ((9188, 9205), 'numpy.array', 'np.array', (['num_out'], {}), '(num_out)\n', (9196, 9205), True, 'import numpy as np\n'), ((9230, 9248), 'numpy.array', 'np.array', (['num_high'], {}), '(num_high)\n', (9238, 9248), True, 'import numpy as np\n'), ((9989, 10014), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10012, 10014), False, 'import torch\n'), ((10065, 10090), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (10088, 10090), False, 'import torch\n'), ((10653, 10675), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (10663, 10675), False, 'import torch\n'), ((11716, 11832), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer'], {'milestones': 'lr_decay_milestones', 'gamma': '(0.1)', 'last_epoch': 'args.last_epoch'}), '(optimizer, milestones=lr_decay_milestones,\n gamma=0.1, last_epoch=args.last_epoch)\n', (11746, 11832), True, 'import torch.optim as optim\n'), ((12800, 12890), 'torch.optim.lr_scheduler.LambdaLR', 'optim.lr_scheduler.LambdaLR', (['optimizer'], {'lr_lambda': 'lambda1', 'last_epoch': 'args.last_epoch'}), '(optimizer, lr_lambda=lambda1, last_epoch=args.\n last_epoch)\n', (12827, 12890), True, 'import torch.optim as optim\n'), ((3858, 3883), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (3881, 3883), False, 'import torch\n'), ((4208, 4229), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (4227, 4229), True, 'import torch.nn as nn\n'), ((7213, 7263), 'os.path.join', 'os.path.join', (['this_file_path', '"""save_CIFAR10_model"""'], {}), "(this_file_path, 'save_CIFAR10_model')\n", (7225, 7263), False, 'import os, time, sys\n'), ((7280, 7381), 'utils.utils.save_models', 'util.save_models', (['best_model', 'save_folder'], {'suffix': "(_ARCH + '-finetune' if args.finetune else _ARCH)"}), "(best_model, save_folder, suffix=_ARCH + '-finetune' if\n args.finetune else _ARCH)\n", (7296, 7381), True, 'import utils.utils as util\n'), ((6227, 6238), 'time.time', 'time.time', ([], {}), '()\n', (6236, 6238), False, 'import os, time, sys\n'), ((7156, 7181), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (7171, 7181), False, 'import os, time, sys\n'), ((9390, 9405), 'numpy.sum', 'np.sum', (['cnt_out'], {}), '(cnt_out)\n', (9396, 9405), True, 'import numpy as np\n'), ((5766, 5798), 'torch.norm', 'torch.norm', (['(param - args.gtarget)'], {}), '(param - args.gtarget)\n', (5776, 5798), False, 'import torch\n'), ((9369, 9385), 'numpy.sum', 'np.sum', (['cnt_high'], {}), '(cnt_high)\n', (9375, 9385), True, 'import numpy as np\n'), ((5700, 5732), 'torch.norm', 'torch.norm', (['(param - args.gtarget)'], {}), '(param - args.gtarget)\n', (5710, 5732), False, 'import torch\n')] |
#!/usr/bin/env python3
# coding:utf-8
import cv2
import numpy as np
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from paddle.inference import Config
from paddle.inference import PrecisionType
from paddle.inference import create_predictor
import yaml
import time
# ————————————————图像预处理函数———————————————— #
def resize(img, target_size):
"""resize to target size"""
if not isinstance(img, np.ndarray):
raise TypeError('image type is not numpy.')
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale_x = float(target_size) / float(im_shape[1])
im_scale_y = float(target_size) / float(im_shape[0])
img = cv2.resize(img, None, None, fx=im_scale_x, fy=im_scale_y)
return img
def normalize(img, mean, std):
img = img / 255.0
mean = np.array(mean)[np.newaxis, np.newaxis, :]
std = np.array(std)[np.newaxis, np.newaxis, :]
img -= mean
img /= std
return img
def preprocess(img, img_size):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = resize(img, img_size)
resize_img = img
img = img[:, :, ::-1].astype('float32') # bgr -> rgb
img = normalize(img, mean, std)
img = img.transpose((2, 0, 1)) # hwc -> chw
return img[np.newaxis, :], resize_img
# ——————————————————————模型配置、预测相关函数—————————————————————————— #
def predict_config(model_file, params_file):
'''
函数功能:初始化预测模型predictor
函数输入:模型结构文件,模型参数文件
函数输出:预测器predictor
'''
# 根据预测部署的实际情况,设置Config
config = Config()
# 读取模型文件
config.set_prog_file(model_file)
config.set_params_file(params_file)
# Config默认是使用CPU预测,若要使用GPU预测,需要手动开启,设置运行的GPU卡号和分配的初始显存。
config.enable_use_gpu(400, 0)
# 可以设置开启IR优化、开启内存优化。
config.switch_ir_optim()
config.enable_memory_optim()
# config.enable_tensorrt_engine(workspace_size=1 << 30, precision_mode=PrecisionType.Float32,max_batch_size=1, min_subgraph_size=5, use_static=False, use_calib_mode=False)
predictor = create_predictor(config)
return predictor
def predict(predictor, img):
'''
函数功能:初始化预测模型predictor
函数输入:模型结构文件,模型参数文件
函数输出:预测器predictor
'''
input_names = predictor.get_input_names()
for i, name in enumerate(input_names):
input_tensor = predictor.get_input_handle(name)
input_tensor.reshape(img[i].shape)
input_tensor.copy_from_cpu(img[i].copy())
# 执行Predictor
predictor.run()
# 获取输出
results = []
# 获取输出
output_names = predictor.get_output_names()
for i, name in enumerate(output_names):
output_tensor = predictor.get_output_handle(name)
output_data = output_tensor.copy_to_cpu()
results.append(output_data)
return results
# ——————————————————————后处理函数—————————————————————————— #
def draw_bbox_image(frame, result, label_list, threshold=0.5):
for res in result:
cat_id, score, bbox = res[0], res[1], res[2:]
if score < threshold:
continue
xmin, ymin, xmax, ymax = bbox
cv2.rectangle(frame, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255,0,255), 2)
label_id = label_list[int(cat_id)]
print('label is {}, bbox is {}'.format(label_id, bbox))
try:
# #cv2.putText(图像, 文字, (x, y), 字体, 大小, (b, g, r), 宽度)
cv2.putText(frame, label_id, (int(xmin), int(ymin-2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,0,0), 2)
cv2.putText(frame, str(round(score,2)), (int(xmin-35), int(ymin-2)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 2)
except KeyError:
pass
def callback(data):
global bridge, predictor, im_size, im_shape, scale_factor, label_list
cv_img = bridge.imgmsg_to_cv2(data, "bgr8")
img_data, cv_img = preprocess(cv_img, im_size)
# 预测
result = predict(predictor, [im_shape, img_data, scale_factor])
draw_bbox_image(cv_img, result[0], label_list, threshold=0.1)
cv2.imshow("cv_img", cv_img)
cv2.waitKey(1)
if __name__ == '__main__':
import sys
print(sys.version) # 查看python版本
# 初始化节点
rospy.init_node('ppinfer_node', anonymous=True)
bridge = CvBridge()
# 模型文件路径(最好写绝对路径)
model_dir = '~/paddle_ros_ws/src/py3_infer/scripts/yolov3_r50vd_dcn_270e_coco/'
# 从infer_cfg.yml中读出label
infer_cfg = open(model_dir + 'infer_cfg.yml')
data = infer_cfg.read()
yaml_reader = yaml.load(data)
label_list = yaml_reader['label_list']
print(label_list)
# 配置模型参数
model_file = model_dir + "model.pdmodel"
params_file = model_dir + "model.pdiparams"
# 图像尺寸相关参数初始化
try:
img = bridge.imgmsg_to_cv2(data, "bgr8")
except AttributeError:
img = np.zeros((224,224,3), np.uint8)
im_size = 224
scale_factor = np.array([im_size * 1. / img.shape[0], im_size * 1. / img.shape[1]]).reshape((1, 2)).astype(np.float32)
im_shape = np.array([im_size, im_size]).reshape((1, 2)).astype(np.float32)
# 初始化预测模型
predictor = predict_config(model_file, params_file)
rospy.Subscriber('/image_view/image_raw', Image, callback)
rospy.spin() | [
"paddle.inference.Config",
"rospy.init_node",
"yaml.load",
"paddle.inference.create_predictor",
"numpy.max",
"cv2.imshow",
"cv_bridge.CvBridge",
"numpy.array",
"numpy.zeros",
"rospy.spin",
"numpy.min",
"cv2.resize",
"rospy.Subscriber",
"cv2.waitKey"
] | [((556, 577), 'numpy.min', 'np.min', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (562, 577), True, 'import numpy as np\n'), ((596, 617), 'numpy.max', 'np.max', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (602, 617), True, 'import numpy as np\n'), ((742, 799), 'cv2.resize', 'cv2.resize', (['img', 'None', 'None'], {'fx': 'im_scale_x', 'fy': 'im_scale_y'}), '(img, None, None, fx=im_scale_x, fy=im_scale_y)\n', (752, 799), False, 'import cv2\n'), ((1591, 1599), 'paddle.inference.Config', 'Config', ([], {}), '()\n', (1597, 1599), False, 'from paddle.inference import Config\n'), ((2063, 2087), 'paddle.inference.create_predictor', 'create_predictor', (['config'], {}), '(config)\n', (2079, 2087), False, 'from paddle.inference import create_predictor\n'), ((3999, 4027), 'cv2.imshow', 'cv2.imshow', (['"""cv_img"""', 'cv_img'], {}), "('cv_img', cv_img)\n", (4009, 4027), False, 'import cv2\n'), ((4033, 4047), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4044, 4047), False, 'import cv2\n'), ((4149, 4196), 'rospy.init_node', 'rospy.init_node', (['"""ppinfer_node"""'], {'anonymous': '(True)'}), "('ppinfer_node', anonymous=True)\n", (4164, 4196), False, 'import rospy\n'), ((4210, 4220), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (4218, 4220), False, 'from cv_bridge import CvBridge, CvBridgeError\n'), ((4453, 4468), 'yaml.load', 'yaml.load', (['data'], {}), '(data)\n', (4462, 4468), False, 'import yaml\n'), ((5091, 5149), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/image_view/image_raw"""', 'Image', 'callback'], {}), "('/image_view/image_raw', Image, callback)\n", (5107, 5149), False, 'import rospy\n'), ((5155, 5167), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (5165, 5167), False, 'import rospy\n'), ((880, 894), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (888, 894), True, 'import numpy as np\n'), ((932, 945), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (940, 945), True, 'import numpy as np\n'), ((4763, 4796), 'numpy.zeros', 'np.zeros', (['(224, 224, 3)', 'np.uint8'], {}), '((224, 224, 3), np.uint8)\n', (4771, 4796), True, 'import numpy as np\n'), ((4832, 4902), 'numpy.array', 'np.array', (['[im_size * 1.0 / img.shape[0], im_size * 1.0 / img.shape[1]]'], {}), '([im_size * 1.0 / img.shape[0], im_size * 1.0 / img.shape[1]])\n', (4840, 4902), True, 'import numpy as np\n'), ((4951, 4979), 'numpy.array', 'np.array', (['[im_size, im_size]'], {}), '([im_size, im_size])\n', (4959, 4979), True, 'import numpy as np\n')] |
import csv
import random
from pathlib import Path
import numpy as np
import time as t
from data.model import Record, calculate_ranges
from data.utils import bin_array
from visualise import graph_power
from matplotlib import pyplot as plt
from model.model import LSTM, ClassicRNN
from tqdm import tqdm
parent = Path(__file__).parent
split = .7
dataset_size = 250000
batch_size = 10
# original data is sampled per minute; so 60 is an hour 60 * 24 a day... etc
bin_width = 60
look_back = 10
def normalise(arr):
min = np.min(arr, axis=0)
max = np.max(arr, axis=0)
for idx, val in enumerate(arr):
arr[idx] = (val - min) / (max - min)
return arr
def create_dataset(power_vals, look_back=1):
X, Y = [], []
for i in range(len(power_vals) - look_back - 1):
a = power_vals[i:(i + look_back)]
X.append(a)
Y.append(power_vals[i + look_back])
X = np.array(X)
X = np.expand_dims(X, axis=2)
Y = np.array(Y)
Y = np.expand_dims(Y, axis=1)
return X, Y
if __name__ == "__main__":
dataset = []
if not Path.is_file(Path("./data/processed.npy")):
with open(str(parent) + '/data/household_power_consumption.txt', 'r', newline='') as csvfile:
linereader = csv.reader(csvfile, delimiter=';')
for row in linereader:
dataset.append(row)
dataset = np.array(dataset)
np.save("./data/processed", dataset)
else:
dataset = np.load("./data/processed.npy", allow_pickle=True)
labels = dataset[0]
labels = np.delete(labels, 1)
labels[0] = "DateTime"
labels = np.append(labels, "residual active energy")
labels = np.append(labels, "error active vs amp *volt")
labels = np.append(labels, "power")
processed = []
process_error = []
start = t.time()
s_val = random.randrange(1, len(dataset) - dataset_size, 1)
print("Dataset range {}:{}".format(s_val, s_val + dataset_size))
for d in dataset[s_val:s_val + dataset_size]:
rec = Record().process_entry(d)
if rec is not False:
processed.append(rec)
else:
process_error.append(d)
print("Dataset processing time : {}".format(t.time() - start))
del d
del start
# TODO: break into own function and explain why it's here
ranges = calculate_ranges(processed)
print("{0:>25}: \t{1:>10}\t{2:>10}\t{3:>10}".format("label", "min", "mean", "max"))
for idx, l in enumerate(labels):
print("{0:>25}: \t{1:>10}\t{2:>10}\t{3:>10}".format(l, ranges[idx][0], ranges[idx][1], ranges[idx][2]))
dataset = []
for rec in processed:
r = Record()
r.process_record(rec, ranges)
dataset.append(r)
# graph_power(dataset)
power_vals = []
for rec in dataset:
power_vals.append(rec.power)
power_vals = bin_array(power_vals, bin_width=bin_width)
train = power_vals[:int(len(power_vals) * split)]
test = power_vals[int(len(power_vals) * split):]
X_train, Y_train = create_dataset(train, look_back)
X_test, Y_test = create_dataset(test, look_back)
train_loss = 0.0
running_loss = 0.0
test_loss = 0.0
preds = []
LX = len(X_train)
rem = LX % batch_size
batches = (LX - rem)
new_train_X = []
new_train_Y = []
for i in range(0, batches, batch_size):
new_train_X.append(X_train[i:i+batch_size])
new_train_Y.append(Y_train[i:i + batch_size])
new_train_X.append(X_train[-rem:])
new_train_Y.append(Y_train[-rem:])
rnn = ClassicRNN(new_train_X[0].shape[1], 1)
with tqdm(total=len(new_train_X)) as pbar:
for idx, b in enumerate(new_train_X):
for (sample, target) in zip(b, new_train_Y[idx]):
running_loss += rnn.forward_backward(sample, target)
train_loss += running_loss / float(b.shape[0])
pbar.update(1)
for (sample, target) in zip(X_test, Y_test):
sample_loss, pred = rnn.forward(sample, target)
test_loss += sample_loss
preds.append(pred)
train_loss = train_loss / float(len(new_train_X))
test_loss = test_loss / float(Y_train.shape[0])
print("Train set loss: {}".format(train_loss))
print("Test set loss: {}".format(test_loss))
preds = np.array(preds)
preds = normalise(preds)
vals = preds[:, 0, 0]
axis = np.arange(0, len(X_train) + len(X_test), 1)
mean = np.mean(X_train[:, -1])
plt.hlines(y=mean, xmin=0, xmax=len(axis[:len(X_train)]))
mean = np.mean(X_test[:, -1])
plt.hlines(y=mean, xmin=0 + len(X_train), xmax=len(axis))
plt.plot(axis[:len(X_train)], X_train[:, -1], 'b', label="Train Actual", alpha=0.5, linewidth=0.5)
plt.plot(axis[len(X_train):], X_test[:, -1], 'r', label="Actual", alpha=0.5, linewidth=0.5)
plt.plot(axis[len(X_train):], preds[:, 0, 0], 'g', label="Predicted", alpha=0.5, linewidth=0.5)
plt.legend()
plt.show()
print()
| [
"numpy.mean",
"matplotlib.pyplot.show",
"pathlib.Path",
"numpy.delete",
"numpy.max",
"numpy.append",
"numpy.array",
"numpy.load",
"data.model.Record",
"numpy.expand_dims",
"numpy.min",
"csv.reader",
"numpy.save",
"model.model.ClassicRNN",
"time.time",
"data.utils.bin_array",
"matplot... | [((310, 324), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (314, 324), False, 'from pathlib import Path\n'), ((521, 540), 'numpy.min', 'np.min', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (527, 540), True, 'import numpy as np\n'), ((551, 570), 'numpy.max', 'np.max', (['arr'], {'axis': '(0)'}), '(arr, axis=0)\n', (557, 570), True, 'import numpy as np\n'), ((899, 910), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (907, 910), True, 'import numpy as np\n'), ((919, 944), 'numpy.expand_dims', 'np.expand_dims', (['X'], {'axis': '(2)'}), '(X, axis=2)\n', (933, 944), True, 'import numpy as np\n'), ((953, 964), 'numpy.array', 'np.array', (['Y'], {}), '(Y)\n', (961, 964), True, 'import numpy as np\n'), ((973, 998), 'numpy.expand_dims', 'np.expand_dims', (['Y'], {'axis': '(1)'}), '(Y, axis=1)\n', (987, 998), True, 'import numpy as np\n'), ((1554, 1574), 'numpy.delete', 'np.delete', (['labels', '(1)'], {}), '(labels, 1)\n', (1563, 1574), True, 'import numpy as np\n'), ((1615, 1658), 'numpy.append', 'np.append', (['labels', '"""residual active energy"""'], {}), "(labels, 'residual active energy')\n", (1624, 1658), True, 'import numpy as np\n'), ((1672, 1718), 'numpy.append', 'np.append', (['labels', '"""error active vs amp *volt"""'], {}), "(labels, 'error active vs amp *volt')\n", (1681, 1718), True, 'import numpy as np\n'), ((1732, 1758), 'numpy.append', 'np.append', (['labels', '"""power"""'], {}), "(labels, 'power')\n", (1741, 1758), True, 'import numpy as np\n'), ((1813, 1821), 'time.time', 't.time', ([], {}), '()\n', (1819, 1821), True, 'import time as t\n'), ((2325, 2352), 'data.model.calculate_ranges', 'calculate_ranges', (['processed'], {}), '(processed)\n', (2341, 2352), False, 'from data.model import Record, calculate_ranges\n'), ((2846, 2888), 'data.utils.bin_array', 'bin_array', (['power_vals'], {'bin_width': 'bin_width'}), '(power_vals, bin_width=bin_width)\n', (2855, 2888), False, 'from data.utils import bin_array\n'), ((3540, 3578), 'model.model.ClassicRNN', 'ClassicRNN', (['new_train_X[0].shape[1]', '(1)'], {}), '(new_train_X[0].shape[1], 1)\n', (3550, 3578), False, 'from model.model import LSTM, ClassicRNN\n'), ((4272, 4287), 'numpy.array', 'np.array', (['preds'], {}), '(preds)\n', (4280, 4287), True, 'import numpy as np\n'), ((4409, 4432), 'numpy.mean', 'np.mean', (['X_train[:, -1]'], {}), '(X_train[:, -1])\n', (4416, 4432), True, 'import numpy as np\n'), ((4506, 4528), 'numpy.mean', 'np.mean', (['X_test[:, -1]'], {}), '(X_test[:, -1])\n', (4513, 4528), True, 'import numpy as np\n'), ((4894, 4906), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4904, 4906), True, 'from matplotlib import pyplot as plt\n'), ((4911, 4921), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4919, 4921), True, 'from matplotlib import pyplot as plt\n'), ((1466, 1516), 'numpy.load', 'np.load', (['"""./data/processed.npy"""'], {'allow_pickle': '(True)'}), "('./data/processed.npy', allow_pickle=True)\n", (1473, 1516), True, 'import numpy as np\n'), ((2646, 2654), 'data.model.Record', 'Record', ([], {}), '()\n', (2652, 2654), False, 'from data.model import Record, calculate_ranges\n'), ((1085, 1113), 'pathlib.Path', 'Path', (['"""./data/processed.npy"""'], {}), "('./data/processed.npy')\n", (1089, 1113), False, 'from pathlib import Path\n'), ((1243, 1277), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""";"""'}), "(csvfile, delimiter=';')\n", (1253, 1277), False, 'import csv\n'), ((1371, 1388), 'numpy.array', 'np.array', (['dataset'], {}), '(dataset)\n', (1379, 1388), True, 'import numpy as np\n'), ((1401, 1437), 'numpy.save', 'np.save', (['"""./data/processed"""', 'dataset'], {}), "('./data/processed', dataset)\n", (1408, 1437), True, 'import numpy as np\n'), ((2019, 2027), 'data.model.Record', 'Record', ([], {}), '()\n', (2025, 2027), False, 'from data.model import Record, calculate_ranges\n'), ((2206, 2214), 'time.time', 't.time', ([], {}), '()\n', (2212, 2214), True, 'import time as t\n')] |
# -*- coding: utf-8 -*-
#
# Creating Sequence to Sequence Models
#-------------------------------------
# Here we show how to implement sequence to sequence models.
# Specifically, we will build an English to German translation model.
#
import os
import re
import string
import requests
import io
import numpy as np
import collections
import random
import pickle
import string
import matplotlib.pyplot as plt
import tensorflow as tf
from zipfile import ZipFile
from collections import Counter
from tensorflow.models.rnn.translate import data_utils
from tensorflow.models.rnn.translate import seq2seq_model
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a session
sess = tf.Session()
# Model Parameters
learning_rate = 0.1
lr_decay_rate = 0.99
lr_decay_every = 100
max_gradient = 5.0
batch_size = 50
num_layers = 3
rnn_size = 500
layer_size = 512
generations = 10000
vocab_size = 10000
save_every = 1000
eval_every = 500
output_every = 50
punct = string.punctuation
# Data Parameters
data_dir = 'temp'
data_file = 'eng_ger.txt'
model_path = 'seq2seq_model'
full_model_dir = os.path.join(data_dir, model_path)
# Test Translation from English (lowercase, no punct)
test_english = ['hello where is my computer',
'the quick brown fox jumped over the lazy dog',
'is it going to rain tomorrow']
# Make Model Directory
if not os.path.exists(full_model_dir):
os.makedirs(full_model_dir)
# Make data directory
if not os.path.exists(data_dir):
os.makedirs(data_dir)
print('Loading English-German Data')
# Check for data, if it doesn't exist, download it and save it
if not os.path.isfile(os.path.join(data_dir, data_file)):
print('Data not found, downloading Eng-Ger sentences from www.manythings.org')
sentence_url = 'http://www.manythings.org/anki/deu-eng.zip'
r = requests.get(sentence_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('deu.txt')
# Format Data
eng_ger_data = file.decode()
eng_ger_data = eng_ger_data.encode('ascii',errors='ignore')
eng_ger_data = eng_ger_data.decode().split('\n')
# Write to file
with open(os.path.join(data_dir, data_file), 'w') as out_conn:
for sentence in eng_ger_data:
out_conn.write(sentence + '\n')
else:
eng_ger_data = []
with open(os.path.join(data_dir, data_file), 'r') as in_conn:
for row in in_conn:
eng_ger_data.append(row[:-1])
# Remove punctuation
eng_ger_data = [''.join(char for char in sent if char not in punct) for sent in eng_ger_data]
# Split each sentence by tabs
eng_ger_data = [x.split('\t') for x in eng_ger_data if len(x)>=1]
[english_sentence, german_sentence] = [list(x) for x in zip(*eng_ger_data)]
english_sentence = [x.lower().split() for x in english_sentence]
german_sentence = [x.lower().split() for x in german_sentence]
print('Processing the vocabularies.')
# Process the English Vocabulary
all_english_words = [word for sentence in english_sentence for word in sentence]
all_english_counts = Counter(all_english_words)
eng_word_keys = [x[0] for x in all_english_counts.most_common(vocab_size-1)] #-1 because 0=unknown is also in there
eng_vocab2ix = dict(zip(eng_word_keys, range(1,vocab_size)))
eng_ix2vocab = {val:key for key, val in eng_vocab2ix.items()}
english_processed = []
for sent in english_sentence:
temp_sentence = []
for word in sent:
try:
temp_sentence.append(eng_vocab2ix[word])
except:
temp_sentence.append(0)
english_processed.append(temp_sentence)
# Process the German Vocabulary
all_german_words = [word for sentence in german_sentence for word in sentence]
all_german_counts = Counter(all_german_words)
ger_word_keys = [x[0] for x in all_german_counts.most_common(vocab_size-1)]
ger_vocab2ix = dict(zip(ger_word_keys, range(1,vocab_size)))
ger_ix2vocab = {val:key for key, val in ger_vocab2ix.items()}
german_processed = []
for sent in german_sentence:
temp_sentence = []
for word in sent:
try:
temp_sentence.append(ger_vocab2ix[word])
except:
temp_sentence.append(0)
german_processed.append(temp_sentence)
# Process the test english sentences, use '0' if word not in our vocab
test_data = []
for sentence in test_english:
temp_sentence = []
for word in sentence.split(' '):
try:
temp_sentence.append(eng_vocab2ix[word])
except:
# Use '0' if the word isn't in our vocabulary
temp_sentence.append(0)
test_data.append(temp_sentence)
# Define Buckets for sequence lengths
# We will split data into the corresponding buckets:
# (x1, y1), (x2, y2), ...
# Where all entries in bucket 1: len(x)<x1 and len(y)<y1 and so on.
x_maxs = [5, 7, 11, 50]
y_maxs = [10, 12, 17, 60]
buckets = [x for x in zip(x_maxs, y_maxs)]
bucketed_data = [[] for _ in range(len(x_maxs))]
for eng, ger in zip(english_processed, german_processed):
for ix, (x_max, y_max) in enumerate(zip(x_maxs, y_maxs)):
if (len(eng) <= x_max) and (len(ger) <= y_max):
bucketed_data[ix].append([eng, ger])
break
# Print summaries of buckets
train_bucket_sizes = [len(bucketed_data[b]) for b in range(len(buckets))]
train_total_size = float(sum(train_bucket_sizes))
for ix, bucket in enumerate(bucketed_data):
print('Data pts in bucket {}: {}'.format(ix, len(bucket)))
# Create sequence to sequence model
def translation_model(sess, input_vocab_size, output_vocab_size,
buckets, rnn_size, num_layers, max_gradient,
learning_rate, lr_decay_rate, forward_only):
model = seq2seq_model.Seq2SeqModel(
input_vocab_size,
output_vocab_size,
buckets,
rnn_size,
num_layers,
max_gradient,
batch_size,
learning_rate,
lr_decay_rate,
forward_only=forward_only,
dtype=tf.float32)
return(model)
print('Creating Translation Model')
input_vocab_size = vocab_size
output_vocab_size = vocab_size
with tf.variable_scope('translate_model') as scope:
translate_model = translation_model(sess, vocab_size, vocab_size,
buckets, rnn_size, num_layers,
max_gradient, learning_rate,
lr_decay_rate, False)
#Reuse the variables for the test model
scope.reuse_variables()
test_model = translation_model(sess, vocab_size, vocab_size,
buckets, rnn_size, num_layers,
max_gradient, learning_rate,
lr_decay_rate, True)
test_model.batch_size = 1
# Initialize all model variables
init = tf.global_variables_initializer()
sess.run(init)
# Start training
train_loss = []
for i in range(generations):
rand_bucket_ix = np.random.choice(len(bucketed_data))
model_outputs = translate_model.get_batch(bucketed_data, rand_bucket_ix)
encoder_inputs, decoder_inputs, target_weights = model_outputs
# Get the (gradient norm, loss, and outputs)
_, step_loss, _ = translate_model.step(sess, encoder_inputs, decoder_inputs,
target_weights, rand_bucket_ix, False)
# Output status
if (i+1) % output_every == 0:
train_loss.append(step_loss)
print('Gen #{} out of {}. Loss: {:.4}'.format(i+1, generations, step_loss))
# Check if we should decay the learning rate
if (i+1) % lr_decay_every == 0:
sess.run(translate_model.learning_rate_decay_op)
# Save model
if (i+1) % save_every == 0:
print('Saving model to {}.'.format(full_model_dir))
model_save_path = os.path.join(full_model_dir, "eng_ger_translation.ckpt")
translate_model.saver.save(sess, model_save_path, global_step=i)
# Eval on test set
if (i+1) % eval_every == 0:
for ix, sentence in enumerate(test_data):
# Find which bucket sentence goes in
bucket_id = next(index for index, val in enumerate(x_maxs) if val>=len(sentence))
# Get RNN model outputs
encoder_inputs, decoder_inputs, target_weights = test_model.get_batch(
{bucket_id: [(sentence, [])]}, bucket_id)
# Get logits
_, test_loss, output_logits = test_model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
ix_output = [int(np.argmax(logit, axis=1)) for logit in output_logits]
# If there is a 0 symbol in outputs end the output there.
ix_output = ix_output[0:[ix for ix, x in enumerate(ix_output+[0]) if x==0][0]]
# Get german words from indices
test_german = [ger_ix2vocab[x] for x in ix_output]
print('English: {}'.format(test_english[ix]))
print('German: {}'.format(test_german))
# Plot train loss
loss_generations = [i for i in range(generations) if i%output_every==0]
plt.plot(loss_generations, train_loss, 'k-')
plt.title('Sequence to Sequence Loss')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show() | [
"os.path.exists",
"tensorflow.python.framework.ops.reset_default_graph",
"os.makedirs",
"matplotlib.pyplot.ylabel",
"tensorflow.variable_scope",
"tensorflow.Session",
"matplotlib.pyplot.plot",
"os.path.join",
"matplotlib.pyplot.xlabel",
"requests.get",
"io.BytesIO",
"collections.Counter",
"t... | [((653, 678), 'tensorflow.python.framework.ops.reset_default_graph', 'ops.reset_default_graph', ([], {}), '()\n', (676, 678), False, 'from tensorflow.python.framework import ops\n'), ((705, 717), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (715, 717), True, 'import tensorflow as tf\n'), ((1110, 1144), 'os.path.join', 'os.path.join', (['data_dir', 'model_path'], {}), '(data_dir, model_path)\n', (1122, 1144), False, 'import os\n'), ((3039, 3065), 'collections.Counter', 'Counter', (['all_english_words'], {}), '(all_english_words)\n', (3046, 3065), False, 'from collections import Counter\n'), ((3698, 3723), 'collections.Counter', 'Counter', (['all_german_words'], {}), '(all_german_words)\n', (3705, 3723), False, 'from collections import Counter\n'), ((6792, 6825), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (6823, 6825), True, 'import tensorflow as tf\n'), ((9141, 9185), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_generations', 'train_loss', '"""k-"""'], {}), "(loss_generations, train_loss, 'k-')\n", (9149, 9185), True, 'import matplotlib.pyplot as plt\n'), ((9186, 9224), 'matplotlib.pyplot.title', 'plt.title', (['"""Sequence to Sequence Loss"""'], {}), "('Sequence to Sequence Loss')\n", (9195, 9224), True, 'import matplotlib.pyplot as plt\n'), ((9225, 9249), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Generation"""'], {}), "('Generation')\n", (9235, 9249), True, 'import matplotlib.pyplot as plt\n'), ((9250, 9268), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (9260, 9268), True, 'import matplotlib.pyplot as plt\n'), ((9269, 9279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9277, 9279), True, 'import matplotlib.pyplot as plt\n'), ((1389, 1419), 'os.path.exists', 'os.path.exists', (['full_model_dir'], {}), '(full_model_dir)\n', (1403, 1419), False, 'import os\n'), ((1425, 1452), 'os.makedirs', 'os.makedirs', (['full_model_dir'], {}), '(full_model_dir)\n', (1436, 1452), False, 'import os\n'), ((1483, 1507), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (1497, 1507), False, 'import os\n'), ((1513, 1534), 'os.makedirs', 'os.makedirs', (['data_dir'], {}), '(data_dir)\n', (1524, 1534), False, 'import os\n'), ((1849, 1875), 'requests.get', 'requests.get', (['sentence_url'], {}), '(sentence_url)\n', (1861, 1875), False, 'import requests\n'), ((5651, 5854), 'tensorflow.models.rnn.translate.seq2seq_model.Seq2SeqModel', 'seq2seq_model.Seq2SeqModel', (['input_vocab_size', 'output_vocab_size', 'buckets', 'rnn_size', 'num_layers', 'max_gradient', 'batch_size', 'learning_rate', 'lr_decay_rate'], {'forward_only': 'forward_only', 'dtype': 'tf.float32'}), '(input_vocab_size, output_vocab_size, buckets,\n rnn_size, num_layers, max_gradient, batch_size, learning_rate,\n lr_decay_rate, forward_only=forward_only, dtype=tf.float32)\n', (5677, 5854), False, 'from tensorflow.models.rnn.translate import seq2seq_model\n'), ((6079, 6115), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""translate_model"""'], {}), "('translate_model')\n", (6096, 6115), True, 'import tensorflow as tf\n'), ((1658, 1691), 'os.path.join', 'os.path.join', (['data_dir', 'data_file'], {}), '(data_dir, data_file)\n', (1670, 1691), False, 'import os\n'), ((1892, 1913), 'io.BytesIO', 'io.BytesIO', (['r.content'], {}), '(r.content)\n', (1902, 1913), False, 'import io\n'), ((7799, 7855), 'os.path.join', 'os.path.join', (['full_model_dir', '"""eng_ger_translation.ckpt"""'], {}), "(full_model_dir, 'eng_ger_translation.ckpt')\n", (7811, 7855), False, 'import os\n'), ((2146, 2179), 'os.path.join', 'os.path.join', (['data_dir', 'data_file'], {}), '(data_dir, data_file)\n', (2158, 2179), False, 'import os\n'), ((2323, 2356), 'os.path.join', 'os.path.join', (['data_dir', 'data_file'], {}), '(data_dir, data_file)\n', (2335, 2356), False, 'import os\n'), ((8605, 8629), 'numpy.argmax', 'np.argmax', (['logit'], {'axis': '(1)'}), '(logit, axis=1)\n', (8614, 8629), True, 'import numpy as np\n')] |
import numpy as np
def softmax(x):
"""Stable softmax"""
x -= np.max(x, axis=0)
e_x = np.exp(x)
return e_x / np.sum(e_x, axis=0)
def get_idx_aug_baseline(LOO_influences):
"""Returns points randomly"""
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=None,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_influence(LOO_influences):
"""Returns points with probability proportional to magnitude of LOO"""
p = np.abs(LOO_influences, dtype=float)
p[p == 0] = min(np.min(p[p > 0]), 1e-20)
p /= np.sum(p)
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=p,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_k_dpp(LOO_influences, k):
"""Returns points with probability proportional to L matrix using DPP"""
import sample_dpp
L = LOO_influences.T.dot(LOO_influences)
assert len(L) == len(LOO_influences)
idxs = sample_dpp.oct_sample_k_dpp(
L,
k=k,
one_hot=False)
for idx in idxs:
yield [idx]
def get_idx_aug_influence_reverse(LOO_influences):
"""Returns points with probability proportional to magnitude of LOO"""
p = np.abs(LOO_influences)
p[p == 0] = min(np.min(p[p > 0]), 1e-20)
p = 1 / p
p /= np.sum(p)
p[p == 0] = 1e-20
p /= np.sum(p)
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=p,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_softmax_influence(LOO_influences):
"""Returns points with probability proportional to softmax of magnitude
of LOO"""
p = np.abs(LOO_influences)
p[p == 0] = min(np.min(p[p > 0]), 1e-20)
p = math_util.softmax(p)
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=p,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_softmax_influence_reverse(LOO_influences):
"""Returns points with probability proportional to softmax of magnitude
of LOO"""
p = np.abs(LOO_influences)
p[p == 0] = min(np.min(p[p > 0]), 1e-20)
p = 1 / p
p = math_util.softmax(p)
p[p == 0] = 1e-20
p /= np.sum(p)
idxs = np.random.choice(
len(LOO_influences),
len(LOO_influences),
p=p,
replace=False,
)
for idx in idxs:
yield [idx]
def get_idx_aug_deterministic_influence(LOO_influences):
"""Returns points in deterministic order ranked by LOO magnitude"""
idxs = np.argsort(-np.abs(LOO_influences))
for idx in idxs:
yield [idx]
def get_idx_aug_deterministic_influence_reverse(LOO_influences):
"""Returns points in deterministic order ranked by LOO magnitude"""
idxs = np.argsort(np.abs(LOO_influences))
for idx in idxs:
yield [idx]
name_to_policy = {
"baseline": get_idx_aug_baseline,
"random_proportional": get_idx_aug_influence,
"random_inverse_proportional": get_idx_aug_influence_reverse,
"random_softmax_proportional": get_idx_aug_softmax_influence,
"random_inverse_softmax_proportional":
get_idx_aug_softmax_influence_reverse,
"deterministic_proportional": get_idx_aug_deterministic_influence,
"deterministic_inverse_proportional":
get_idx_aug_deterministic_influence_reverse,
}
def get_policy_by_name(name):
return name_to_policy[name]
| [
"numpy.abs",
"numpy.max",
"numpy.exp",
"sample_dpp.oct_sample_k_dpp",
"numpy.sum",
"numpy.min"
] | [((71, 88), 'numpy.max', 'np.max', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (77, 88), True, 'import numpy as np\n'), ((99, 108), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (105, 108), True, 'import numpy as np\n'), ((541, 576), 'numpy.abs', 'np.abs', (['LOO_influences'], {'dtype': 'float'}), '(LOO_influences, dtype=float)\n', (547, 576), True, 'import numpy as np\n'), ((631, 640), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (637, 640), True, 'import numpy as np\n'), ((1067, 1117), 'sample_dpp.oct_sample_k_dpp', 'sample_dpp.oct_sample_k_dpp', (['L'], {'k': 'k', 'one_hot': '(False)'}), '(L, k=k, one_hot=False)\n', (1094, 1117), False, 'import sample_dpp\n'), ((1320, 1342), 'numpy.abs', 'np.abs', (['LOO_influences'], {}), '(LOO_influences)\n', (1326, 1342), True, 'import numpy as np\n'), ((1411, 1420), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (1417, 1420), True, 'import numpy as np\n'), ((1452, 1461), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (1458, 1461), True, 'import numpy as np\n'), ((1802, 1824), 'numpy.abs', 'np.abs', (['LOO_influences'], {}), '(LOO_influences)\n', (1808, 1824), True, 'import numpy as np\n'), ((2247, 2269), 'numpy.abs', 'np.abs', (['LOO_influences'], {}), '(LOO_influences)\n', (2253, 2269), True, 'import numpy as np\n'), ((2389, 2398), 'numpy.sum', 'np.sum', (['p'], {}), '(p)\n', (2395, 2398), True, 'import numpy as np\n'), ((126, 145), 'numpy.sum', 'np.sum', (['e_x'], {'axis': '(0)'}), '(e_x, axis=0)\n', (132, 145), True, 'import numpy as np\n'), ((597, 613), 'numpy.min', 'np.min', (['p[p > 0]'], {}), '(p[p > 0])\n', (603, 613), True, 'import numpy as np\n'), ((1363, 1379), 'numpy.min', 'np.min', (['p[p > 0]'], {}), '(p[p > 0])\n', (1369, 1379), True, 'import numpy as np\n'), ((1845, 1861), 'numpy.min', 'np.min', (['p[p > 0]'], {}), '(p[p > 0])\n', (1851, 1861), True, 'import numpy as np\n'), ((2290, 2306), 'numpy.min', 'np.min', (['p[p > 0]'], {}), '(p[p > 0])\n', (2296, 2306), True, 'import numpy as np\n'), ((2965, 2987), 'numpy.abs', 'np.abs', (['LOO_influences'], {}), '(LOO_influences)\n', (2971, 2987), True, 'import numpy as np\n'), ((2739, 2761), 'numpy.abs', 'np.abs', (['LOO_influences'], {}), '(LOO_influences)\n', (2745, 2761), True, 'import numpy as np\n')] |
# importing packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras.models import Sequential
from keras.layers import LSTM, Dense
# convert an array of values into a dataset matrix
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset) - look_back - 1):
a = dataset[i: (i + look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return np.array(dataX), np.array(dataY)
# fixing random seed for reproducibility
np.random.seed(42)
# load the dataset
df = pd.read_csv('./airline-passengers.csv')
data = df.values
data = df.astype('float32')
# normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
data = scaler.fit_transform(data)
# split into train and tst sets
train_size = int(len(data) * 0.67)
test_size = len(data) - train_size
train, test = data[0:train_size, :], data[train_size: len(data), :]
# print(len(train), len(test))
# reshape into X=t and y=t+1
look_back = 1
train_X, train_y = create_dataset(train, look_back)
test_X, test_y = create_dataset(test, look_back)
# reshape inputs to be [samples, time-steps, features]
train_X = np.reshape(train_X, (train_X.shape[0], 1, train_X.shape[1]))
test_X = np.reshape(test_X, (test_X.shape[0], 1, test_X.shape[1]))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(train_X, train_y, epochs=100, batch_size=1, verbose=2)
# make predictions
train_preds = model.predict(train_X)
test_preds = model.predict(test_X)
# invert predictions
train_preds = scaler.inverse_transform(train_preds)
train_y = scaler.inverse_transform([train_y])
test_preds = scaler.inverse_transform(test_preds)
test_y = scaler.inverse_transform([test_y])
# calculate root mean squared error
train_score = math.sqrt(mean_squared_error(train_y[0], train_preds[:, 0]))
print('Train Score: %.2f RMSE' % (train_score))
test_score = math.sqrt(mean_squared_error(test_y[0], test_preds[:, 0]))
print('Test Score: %.2f RMSE' % (test_score))
# shift train predictions for plotting
trainPredictPlot = np.empty_like(data)
trainPredictPlot[:, :] = np.nan
trainPredictPlot[look_back:len(train_preds) + look_back, :] = train_preds
# shift test predictions for plotting
testPredictPlot = np.empty_like(data)
testPredictPlot[:, :] = np.nan
testPredictPlot[len(train_preds) + (look_back * 2) + 1:len(data) - 1, :] = test_preds
# plot baseline and predictions
plt.plot(scaler.inverse_transform(data))
plt.plot(trainPredictPlot)
plt.plot(testPredictPlot)
plt.show()
| [
"numpy.reshape",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"keras.models.Sequential",
"sklearn.metrics.mean_squared_error",
"keras.layers.LSTM",
"numpy.array",
"numpy.empty_like",
"numpy.random.seed",
"keras.layers.Dense",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.show"
] | [((670, 688), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (684, 688), True, 'import numpy as np\n'), ((719, 758), 'pandas.read_csv', 'pd.read_csv', (['"""./airline-passengers.csv"""'], {}), "('./airline-passengers.csv')\n", (730, 758), True, 'import pandas as pd\n'), ((843, 877), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(0, 1)'}), '(feature_range=(0, 1))\n', (855, 877), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1340, 1400), 'numpy.reshape', 'np.reshape', (['train_X', '(train_X.shape[0], 1, train_X.shape[1])'], {}), '(train_X, (train_X.shape[0], 1, train_X.shape[1]))\n', (1350, 1400), True, 'import numpy as np\n'), ((1411, 1468), 'numpy.reshape', 'np.reshape', (['test_X', '(test_X.shape[0], 1, test_X.shape[1])'], {}), '(test_X, (test_X.shape[0], 1, test_X.shape[1]))\n', (1421, 1468), True, 'import numpy as np\n'), ((1515, 1527), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1525, 1527), False, 'from keras.models import Sequential\n'), ((2385, 2404), 'numpy.empty_like', 'np.empty_like', (['data'], {}), '(data)\n', (2398, 2404), True, 'import numpy as np\n'), ((2573, 2592), 'numpy.empty_like', 'np.empty_like', (['data'], {}), '(data)\n', (2586, 2592), True, 'import numpy as np\n'), ((2790, 2816), 'matplotlib.pyplot.plot', 'plt.plot', (['trainPredictPlot'], {}), '(trainPredictPlot)\n', (2798, 2816), True, 'import matplotlib.pyplot as plt\n'), ((2818, 2843), 'matplotlib.pyplot.plot', 'plt.plot', (['testPredictPlot'], {}), '(testPredictPlot)\n', (2826, 2843), True, 'import matplotlib.pyplot as plt\n'), ((2845, 2855), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2853, 2855), True, 'import matplotlib.pyplot as plt\n'), ((1539, 1574), 'keras.layers.LSTM', 'LSTM', (['(4)'], {'input_shape': '(1, look_back)'}), '(4, input_shape=(1, look_back))\n', (1543, 1574), False, 'from keras.layers import LSTM, Dense\n'), ((1587, 1595), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (1592, 1595), False, 'from keras.layers import LSTM, Dense\n'), ((2103, 2152), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['train_y[0]', 'train_preds[:, 0]'], {}), '(train_y[0], train_preds[:, 0])\n', (2121, 2152), False, 'from sklearn.metrics import mean_squared_error\n'), ((2227, 2274), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['test_y[0]', 'test_preds[:, 0]'], {}), '(test_y[0], test_preds[:, 0])\n', (2245, 2274), False, 'from sklearn.metrics import mean_squared_error\n'), ((590, 605), 'numpy.array', 'np.array', (['dataX'], {}), '(dataX)\n', (598, 605), True, 'import numpy as np\n'), ((607, 622), 'numpy.array', 'np.array', (['dataY'], {}), '(dataY)\n', (615, 622), True, 'import numpy as np\n')] |
import sys
import numpy as np
INF = 10 ** 18
def main():
x, y, z, K = map(int, sys.stdin.readline().split())
abc = np.array(sys.stdin.read().split(), dtype=np.int64)
a = np.append(abc[:x], INF)
b = np.append(abc[x:x + y], INF)
c = np.append(abc[x + y:x + y+ z], INF)
a = np.sort(a)[::-1]
b = np.sort(b)[::-1]
c = np.sort(c)[::-1]
res = []
for i in range(1, min(K, x) + 1):
for j in range(1, min(y, K // i) + 1):
for k in range(1, min(z, K // (i * j)) + 1):
res.append(a[i] + b[j] + c[k])
res = sorted(res, reverse=True)[:K]
print('\n'.join(map(str, res)))
if __name__ == "__main__":
main()
| [
"numpy.append",
"numpy.sort",
"sys.stdin.readline",
"sys.stdin.read"
] | [((194, 217), 'numpy.append', 'np.append', (['abc[:x]', 'INF'], {}), '(abc[:x], INF)\n', (203, 217), True, 'import numpy as np\n'), ((227, 255), 'numpy.append', 'np.append', (['abc[x:x + y]', 'INF'], {}), '(abc[x:x + y], INF)\n', (236, 255), True, 'import numpy as np\n'), ((265, 301), 'numpy.append', 'np.append', (['abc[x + y:x + y + z]', 'INF'], {}), '(abc[x + y:x + y + z], INF)\n', (274, 301), True, 'import numpy as np\n'), ((310, 320), 'numpy.sort', 'np.sort', (['a'], {}), '(a)\n', (317, 320), True, 'import numpy as np\n'), ((336, 346), 'numpy.sort', 'np.sort', (['b'], {}), '(b)\n', (343, 346), True, 'import numpy as np\n'), ((362, 372), 'numpy.sort', 'np.sort', (['c'], {}), '(c)\n', (369, 372), True, 'import numpy as np\n'), ((93, 113), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (111, 113), False, 'import sys\n'), ((143, 159), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (157, 159), False, 'import sys\n')] |
#
# Author: <NAME>
# Copyright 2016
#
import logging
import isceobj
import mroipac
import os
import numpy as np
from isceobj.Util.decorators import use_api
logger = logging.getLogger('isce.insar.VerifyDEM')
def runVerifyDEM(self):
'''
Make sure that a DEM is available for processing the given data.
'''
self.demStitcher.noFilling = False
###If provided in the input XML file
if self.demFilename not in ['',None]:
demimg = isceobj.createDemImage()
demimg.load(self.demFilename + '.xml')
if not os.path.exists(self.demFilename + '.vrt'):
demimg.renderVRT()
if demimg.reference.upper() == 'EGM96':
wgsdemname = self.demFilename + '.wgs84'
if os.path.exists(wgsdemname) and os.path.exists(wgsdemname + '.xml'):
demimg = isceobj.createDemImage()
demimg.load(wgsdemname + '.xml')
if demimg.reference.upper() == 'EGM96':
raise Exception('WGS84 version of dem found by reference set to EGM96')
else:
demimg = self.demStitcher.correct(demimg)
elif demimg.reference.upper() != 'WGS84':
raise Exception('Unknown reference system for DEM: {0}'.format(demimg.reference))
else:
refPol = self._grd.polarizations[0]
reference = self._grd.loadProduct( os.path.join(self._grd.outputFolder, 'beta_{0}.xml'.format(refPol)))
bbox = reference.getBbox()
####Truncate to integers
tbox = [np.floor(bbox[0]), np.ceil(bbox[1]),
np.floor(bbox[2]), np.ceil(bbox[3])]
filename = self.demStitcher.defaultName(tbox)
wgsfilename = filename + '.wgs84'
####Check if WGS84 file exists
if os.path.exists(wgsfilename) and os.path.exists(wgsfilename + '.xml'):
demimg = isceobj.createDemImage()
demimg.load(wgsfilename + '.xml')
if not os.path.exists(wgsfilename + '.vrt'):
demimg.renderVRT()
####Check if EGM96 file exists
elif os.path.exists(filename) and os.path.exists(filename + '.xml'):
inimg = isceobj.createDemImage()
inimg.load(filename + '.xml')
if not os.path.exists(filename + '.xml'):
inimg.renderVRT()
demimg = self.demStitcher.correct(inimg)
else:
stitchOk = self.demStitcher.stitch(tbox[0:2], tbox[2:4])
if not stitchOk:
logger.error("Cannot form the DEM for the region of interest. If you have one, set the appropriate DEM component in the input file.")
raise Exception
inimg = isceobj.createDemImage()
inimg.load(filename + '.xml')
if not os.path.exists(filename):
inimg.renderVRT()
demimg = self.demStitcher.correct(inimg)
#get water mask
# self.runCreateWbdMask(info)
return demimg.filename
| [
"logging.getLogger",
"os.path.exists",
"numpy.ceil",
"numpy.floor",
"isceobj.createDemImage"
] | [((167, 208), 'logging.getLogger', 'logging.getLogger', (['"""isce.insar.VerifyDEM"""'], {}), "('isce.insar.VerifyDEM')\n", (184, 208), False, 'import logging\n'), ((464, 488), 'isceobj.createDemImage', 'isceobj.createDemImage', ([], {}), '()\n', (486, 488), False, 'import isceobj\n'), ((551, 592), 'os.path.exists', 'os.path.exists', (["(self.demFilename + '.vrt')"], {}), "(self.demFilename + '.vrt')\n", (565, 592), False, 'import os\n'), ((1544, 1561), 'numpy.floor', 'np.floor', (['bbox[0]'], {}), '(bbox[0])\n', (1552, 1561), True, 'import numpy as np\n'), ((1563, 1579), 'numpy.ceil', 'np.ceil', (['bbox[1]'], {}), '(bbox[1])\n', (1570, 1579), True, 'import numpy as np\n'), ((1597, 1614), 'numpy.floor', 'np.floor', (['bbox[2]'], {}), '(bbox[2])\n', (1605, 1614), True, 'import numpy as np\n'), ((1616, 1632), 'numpy.ceil', 'np.ceil', (['bbox[3]'], {}), '(bbox[3])\n', (1623, 1632), True, 'import numpy as np\n'), ((1783, 1810), 'os.path.exists', 'os.path.exists', (['wgsfilename'], {}), '(wgsfilename)\n', (1797, 1810), False, 'import os\n'), ((1815, 1851), 'os.path.exists', 'os.path.exists', (["(wgsfilename + '.xml')"], {}), "(wgsfilename + '.xml')\n", (1829, 1851), False, 'import os\n'), ((1874, 1898), 'isceobj.createDemImage', 'isceobj.createDemImage', ([], {}), '()\n', (1896, 1898), False, 'import isceobj\n'), ((744, 770), 'os.path.exists', 'os.path.exists', (['wgsdemname'], {}), '(wgsdemname)\n', (758, 770), False, 'import os\n'), ((775, 810), 'os.path.exists', 'os.path.exists', (["(wgsdemname + '.xml')"], {}), "(wgsdemname + '.xml')\n", (789, 810), False, 'import os\n'), ((837, 861), 'isceobj.createDemImage', 'isceobj.createDemImage', ([], {}), '()\n', (859, 861), False, 'import isceobj\n'), ((1965, 2001), 'os.path.exists', 'os.path.exists', (["(wgsfilename + '.vrt')"], {}), "(wgsfilename + '.vrt')\n", (1979, 2001), False, 'import os\n'), ((2091, 2115), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2105, 2115), False, 'import os\n'), ((2120, 2153), 'os.path.exists', 'os.path.exists', (["(filename + '.xml')"], {}), "(filename + '.xml')\n", (2134, 2153), False, 'import os\n'), ((2175, 2199), 'isceobj.createDemImage', 'isceobj.createDemImage', ([], {}), '()\n', (2197, 2199), False, 'import isceobj\n'), ((2702, 2726), 'isceobj.createDemImage', 'isceobj.createDemImage', ([], {}), '()\n', (2724, 2726), False, 'import isceobj\n'), ((2262, 2295), 'os.path.exists', 'os.path.exists', (["(filename + '.xml')"], {}), "(filename + '.xml')\n", (2276, 2295), False, 'import os\n'), ((2788, 2812), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2802, 2812), False, 'import os\n')] |
import os
import cv2
import numpy as np
from torch.utils.data import DataLoader, Dataset, sampler
from kaggle_runner.datasets.coders import run_length_decode
from kaggle_runner.datasets.transfomers import get_transforms
class SIIMDataset(Dataset):
def __init__(self, df, fnames, data_folder, size, mean, std, phase):
self.df = df
self.root = data_folder
self.size = size
self.mean = mean
self.std = std
self.phase = phase
self.transforms = get_transforms(phase, size, mean, std)
self.gb = self.df.groupby("ImageId")
self.fnames = fnames
def __getitem__(self, idx):
image_id = self.fnames[idx]
df = self.gb.get_group(image_id)
annotations = df[" EncodedPixels"].tolist()
image_path = os.path.join(self.root, image_id + ".png")
image = cv2.imread(image_path)
mask = np.zeros([1024, 1024])
if annotations[0] != "-1":
for rle in annotations:
mask += run_length_decode(rle)
mask = (mask >= 1).astype("float32") # for overlap cases
augmented = self.transforms(image=image, mask=mask)
image = augmented["image"]
mask = augmented["mask"]
return image, mask
def __len__(self):
return len(self.fnames)
| [
"kaggle_runner.datasets.coders.run_length_decode",
"os.path.join",
"numpy.zeros",
"kaggle_runner.datasets.transfomers.get_transforms",
"cv2.imread"
] | [((504, 542), 'kaggle_runner.datasets.transfomers.get_transforms', 'get_transforms', (['phase', 'size', 'mean', 'std'], {}), '(phase, size, mean, std)\n', (518, 542), False, 'from kaggle_runner.datasets.transfomers import get_transforms\n'), ((800, 842), 'os.path.join', 'os.path.join', (['self.root', "(image_id + '.png')"], {}), "(self.root, image_id + '.png')\n", (812, 842), False, 'import os\n'), ((859, 881), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (869, 881), False, 'import cv2\n'), ((897, 919), 'numpy.zeros', 'np.zeros', (['[1024, 1024]'], {}), '([1024, 1024])\n', (905, 919), True, 'import numpy as np\n'), ((1015, 1037), 'kaggle_runner.datasets.coders.run_length_decode', 'run_length_decode', (['rle'], {}), '(rle)\n', (1032, 1037), False, 'from kaggle_runner.datasets.coders import run_length_decode\n')] |
from styx_msgs.msg import TrafficLight
import tensorflow as tf
import numpy as np
import cv2
import rospy
import math
class TLClassifier(object):
def __init__(self):
#TODO load classifier
self.MODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'
model_file = self.MODEL_NAME + '/frozen_inference_graph.pb'
self.detection_graph = tf.Graph()
with self.detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
self.sess = tf.Session(graph=self.detection_graph)
# Input and output Tensors for detection_graph
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
def get_average_color(self, rgb_img):
"""Get the average value for each r g b channel in the cropped rgb image
"""
val = [0,0,0]
for i in range(3):
img = rgb_img[:,:,i]
avg_color_per_row = np.average(img, axis=0)
val[i] = np.average(avg_color_per_row, axis=0)
return val
def determine_color(self, val):
"""Determines the closest color
red is 255, 0, 0
yellow is 255, 255, 0
green is 0, 255, 0
"""
colors = [[255, 0, 0],[255, 255, 0],[0, 255, 0]]
diff = float("inf")
diff_index = -1
for i in range(len(colors)):
current_diff = (val[0]-colors[i][0])**2 + (val[1]-colors[i][1])**2 + (val[2]-colors[i][2])**2
if current_diff < diff:
diff = current_diff
diff_index = i
if diff_index == 0:
return TrafficLight.RED
elif diff_index == 1:
return TrafficLight.YELLOW
elif diff_index == 2:
return TrafficLight.GREEN
else:
return TrafficLight.UNKNOWN
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#TODO implement light color prediction
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_np = np.asarray(image, dtype="uint8")
image_np_expanded = np.expand_dims(image_np, axis=0)
detected = False
with self.detection_graph.as_default():
(boxes, scores, classes, num) = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
boxes = np.squeeze(boxes)
classes = np.squeeze(classes).astype(np.int32)
scores = np.squeeze(scores)
best_scores = []
for idx, classID in enumerate(classes):
if self.MODEL_NAME == 'ssd_mobilenet_v1_coco_2017_11_17':
if classID == 10: # 10 is traffic light
if scores[idx] > 0.15: #confidence level
best_scores.append([scores[idx], idx, classID])
detected = True
if detected:
# Sort to get the best score
best_scores.sort(key=lambda tup: tup[0], reverse=True)
best_score = best_scores[0]
nbox = boxes[best_score[1]]
# Get bounding box for this object
height = image.shape[0]
width = image.shape[1]
box = np.array([nbox[0]*height, nbox[1]*width, nbox[2]*height, nbox[3]*width]).astype(int)
cropped_image = image[box[0]:box[2], box[1]:box[3]]
# Get average color
val = self.get_average_color(cropped_image)
# Predict color
traffic_light = self.determine_color(val)
# Draw bounding box with detected color
traffic_light_color = (255,255,255)#black
if traffic_light==TrafficLight.RED:
traffic_light_color = (255,0,0)
elif traffic_light == TrafficLight.YELLOW:
traffic_light_color = (255,255,0)
elif traffic_light == TrafficLight.GREEN:
traffic_light_color = (0,255,0)
cv2.rectangle(image, (box[1],box[0]), (box[3],box[2]),traffic_light_color,4)
return traffic_light
return TrafficLight.UNKNOWN
| [
"cv2.rectangle",
"tensorflow.Graph",
"numpy.average",
"tensorflow.Session",
"numpy.asarray",
"tensorflow.GraphDef",
"numpy.squeeze",
"numpy.array",
"numpy.expand_dims",
"cv2.cvtColor",
"tensorflow.gfile.GFile",
"tensorflow.import_graph_def"
] | [((362, 372), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (370, 372), True, 'import tensorflow as tf\n'), ((2831, 2869), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (2843, 2869), False, 'import cv2\n'), ((2889, 2921), 'numpy.asarray', 'np.asarray', (['image'], {'dtype': '"""uint8"""'}), "(image, dtype='uint8')\n", (2899, 2921), True, 'import numpy as np\n'), ((2950, 2982), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (2964, 2982), True, 'import numpy as np\n'), ((3315, 3332), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (3325, 3332), True, 'import numpy as np\n'), ((3405, 3423), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (3415, 3423), True, 'import numpy as np\n'), ((448, 461), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (459, 461), True, 'import tensorflow as tf\n'), ((713, 751), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.detection_graph'}), '(graph=self.detection_graph)\n', (723, 751), True, 'import tensorflow as tf\n'), ((1539, 1562), 'numpy.average', 'np.average', (['img'], {'axis': '(0)'}), '(img, axis=0)\n', (1549, 1562), True, 'import numpy as np\n'), ((1584, 1621), 'numpy.average', 'np.average', (['avg_color_per_row'], {'axis': '(0)'}), '(avg_color_per_row, axis=0)\n', (1594, 1621), True, 'import numpy as np\n'), ((4901, 4986), 'cv2.rectangle', 'cv2.rectangle', (['image', '(box[1], box[0])', '(box[3], box[2])', 'traffic_light_color', '(4)'], {}), '(image, (box[1], box[0]), (box[3], box[2]), traffic_light_color, 4\n )\n', (4914, 4986), False, 'import cv2\n'), ((479, 511), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['model_file', '"""rb"""'], {}), "(model_file, 'rb')\n", (493, 511), True, 'import tensorflow as tf\n'), ((645, 687), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (664, 687), True, 'import tensorflow as tf\n'), ((3351, 3370), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (3361, 3370), True, 'import numpy as np\n'), ((4159, 4244), 'numpy.array', 'np.array', (['[nbox[0] * height, nbox[1] * width, nbox[2] * height, nbox[3] * width]'], {}), '([nbox[0] * height, nbox[1] * width, nbox[2] * height, nbox[3] * width]\n )\n', (4167, 4244), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Imports
#==================================================
import json, keras, gensim, codecs
import tensorflow as tf
import numpy as np
import keras.preprocessing.text as kpt
from keras.callbacks import Callback
from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional
from keras.models import Model, load_model
from keras_contrib.layers import CRF
from keras_contrib.losses import crf_loss
from keras_contrib.metrics import crf_viterbi_accuracy
from sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support
from sklearn.model_selection import train_test_split
# Get negations
#==================================================
def get_negation_instances(dataset):
global length
data = dataset
words = []
lemmas = []
pos = []
labels = []
for i in range(len(data)):
w = []
l = []
p = []
c = []
for i2 in range(len(data[i])):
try:
w.append(data[i][i2][2])
l.append(data[i][i2][3])
p.append(data[i][i2][4])
if data[i][i2][5] == '_':
c.append([0, 1])
elif data[i][i2][5] == '***':
c.append([0, 1])
else:
c.append([1, 0])
length+=1
except Exception:
pass
words.append(w)
lemmas.append(l)
pos.append(p)
labels.append(c)
return words, lemmas, pos, labels
# ---------------------- more metrics -------------------
class MoreMetrics(Callback):
def on_train_begin(self, logs={}):
self.val_f1s = []
self.val_recalls = []
self.val_precisions = []
def on_epoch_end(self, epoch, logs={}):
global best_f1
val_predict = model.predict([X_valid, X_lemmas_valid, X_pos_valid])
val_targ = Y_valid
valid_pre, valid_rec, valid_f1 = get_eval_epoch(val_predict,val_targ)
print ("Precision/recall/F1 score on validation set", valid_pre, valid_rec, valid_f1)
if valid_f1 > best_f1:
best_f1 = valid_f1
model.save('cue_bilstm-crf.hdf5')
print ('saved best model')
else:
print ('No progress')
return
def get_eval(predictions,gs):
y,y_ = [],[]
for p in predictions: y.extend(map(lambda x: list(x).index(x.max()),p))
for g in gs: y_.extend(map(lambda x: 0 if list(x)==[1,0] else 1,g))
print (classification_report(y_,y, digits=4))
def get_eval_epoch(predictions,gs):
y,y_ = [],[]
for p in predictions:
y.extend(map(lambda x: list(x).index(x.max()),p))
for g in gs:
y_.extend(map(lambda x: 0 if list(x)==[1,0] else 1,g))
p, r, f1, s = precision_recall_fscore_support(y_,y)
p_pos = p[0]
r_pos = r[0]
f1_pos = f1[0]
return p_pos, r_pos, f1_pos
# ---------------------- Padding features -------------------
def pad_documents(sentences, padding_word='<PAD>'):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def pad_labels(sentences, padding_word=[0,1]):
"""
Pads all sentences to the same length. The length is defined by the longest sentence.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
padded_sentences = []
for i in range(len(sentences)):
sentence = sentences[i]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
# ---------------------- storing labeling results -------------------
def store_prediction(lex, dic_inv, pred_dev, gold_dev):
print ("Storing labelling results for dev or test set...")
with codecs.open('cue_best_pred.txt','wb','utf8') as store_pred:
for s, y_sys, y_hat in zip(lex, pred_dev, gold_dev):
s = [dic_inv.get(word) for word in s]
assert len(s)==len(y_sys)==len(y_hat)
for _word,_sys,gold in zip(s,y_sys,y_hat):
_p = list(_sys).index(_sys.max())
_g = 0 if list(gold)==[1,0] else 1
if _word != "<PAD>":
store_pred.write("%s\t%s\t%s\n" % (_word,_g,_p))
store_pred.write("\n")
#==================================================
# loading datasets
#==================================================
length = 0
lengths = []
data = open('./data/....txt').read()
data = data.split('\n\n')
data = [item.split('\n') for item in data]
data = [[i.split('\t') for i in item] for item in data]
words, lemmas, pos, labels = get_negation_instances(data)
lengths.append(length)
words_x = pad_documents(words)
lemmas_x = pad_documents(lemmas)
pos_x = pad_documents(pos)
labels_x = pad_labels(labels)
#Preparing words without pre-trained embeddings
# ==================================================
# create a new Tokenizer
tokenizer = kpt.Tokenizer(lower=False)
# feed our texts to the Tokenizer
tokenizer.fit_on_texts(words_x)
# Tokenizers come with a convenient list of words and IDs
dictionary = tokenizer.word_index
x_words = [[dictionary[word] for word in text] for text in words_x]
vocabulary_size = len(dictionary)
dic_inv = dict(map(reversed, tokenizer.word_index.items()))
#Preparing lemma without pre-trained embeddings
# ==================================================
# create a new Tokenizer
Lemmatokenizer = kpt.Tokenizer(lower=False)
# feed our texts to the Tokenizer
Lemmatokenizer.fit_on_texts(lemmas_x)
# Tokenizers come with a convenient list of words and IDs
Lemmadictionary = Lemmatokenizer.word_index
x_lemmas = [[Lemmadictionary[word] for word in text] for text in lemmas_x]
lemma_vocabulary_size = len(Lemmadictionary)
#==================================================
# Preparing POS embeddings
# ==================================================
# create a new Tokenizer
postokenizer = kpt.Tokenizer(lower=False)
# feed our texts to the Tokenizer
postokenizer.fit_on_texts(pos_x)
# Tokenizers come with a convenient list of words and IDs
posdictionary = postokenizer.word_index
x_pos = [[posdictionary[pos] for pos in text] for text in pos_x]
tag_voc_size = len(posdictionary)
#==================================================
# Splitting data into the original train, validation and test sets
#==================================================
xwords = np.array(x_words, dtype='int32')
xlemmas = np.array(x_lemmas, dtype='int32')
xpos = np.array(x_pos, dtype='int32')
xlabels = np.array(labels_x, dtype='int32')
sequence_length = xwords.shape[1]
Xtrain, X_test, Ytrain, Y_test = train_test_split(xwords, xlabels, random_state=42, test_size=0.2)
X_lemmas_train, X_lemmas_test, _, _ = train_test_split(xlemmas, xlabels, random_state=42, test_size=0.2)
X_pos_train, X_pos_test, _, _ = train_test_split(xpos, xlabels, random_state=42, test_size=0.2)
X_train, X_valid, Y_train, Y_valid = train_test_split(Xtrain, Ytrain, random_state=42, test_size=0.2)
X_lemmas_train, X_lemmas_valid, _, _ = train_test_split(X_lemmas_train, Ytrain, random_state=42, test_size=0.2)
X_pos_train, X_pos_valid, _, _ = train_test_split(X_pos_train, Ytrain, random_state=42, test_size=0.2)
# ---------------------- Parameters section -------------------
# Model Hyperparameters
embedding_dim = 100
hidden_dims = 400
# ~ # Training parameters
num_epochs = 20
batch_size = 32
best_f1 = 0.0
embeddings_initializer = keras.initializers.RandomUniform(minval=-1.0, maxval=1.0, seed=42)
moremetrics = MoreMetrics()
#==================================================
# ---------------------- training section -------------------
#==================================================
print("Creating BiLSTM Model")
inputs_w = Input(shape=(sequence_length,), dtype='int32')
inputs_l = Input(shape=(sequence_length,), dtype='int32')
inputs_pos = Input(shape=(sequence_length,), dtype='int32')
w_emb = Embedding(vocabulary_size+1, embedding_dim, input_length=sequence_length, embeddings_initializer=embeddings_initializer, trainable=True)(inputs_w)
l_emb = Embedding(lemma_vocabulary_size+1, embedding_dim, input_length=sequence_length, embeddings_initializer=embeddings_initializer, trainable=True)(inputs_l)
p_emb = Embedding(tag_voc_size+1, embedding_dim, input_length=sequence_length, embeddings_initializer=embeddings_initializer, trainable=True)(inputs_pos)
summed = keras.layers.add([w_emb, l_emb, p_emb])
dropout_emb = Dropout(0.5)(summed)
BiLSTM = Bidirectional(LSTM(hidden_dims, recurrent_dropout=0.5, return_sequences=True))(dropout_emb)
outputs = CRF(2, sparse_target=False)(BiLSTM)
model = Model(inputs=[inputs_w, inputs_l, inputs_pos], outputs=outputs)
model.compile('adam', loss=crf_loss, metrics=[crf_viterbi_accuracy])
model.summary()
model.fit([X_train, X_lemmas_train, X_pos_train], Y_train, batch_size=batch_size, epochs=num_epochs, verbose=1, validation_data=([X_valid, X_lemmas_valid, X_pos_valid], Y_valid), callbacks=[moremetrics])
#==================================================
# ---------------------- testing section -------------------
#==================================================
custom_objects = {'CRF': CRF, 'crf_loss': crf_loss, 'crf_viterbi_accuracy': crf_viterbi_accuracy}
model = load_model('cue_bilstm-crf.hdf5', custom_objects)
preds = model.predict([X_test, X_lemmas_test, X_pos_test])
get_eval(preds, Y_test)
store_prediction(X_test, dic_inv, preds, Y_test)
| [
"keras.initializers.RandomUniform",
"keras.preprocessing.text.Tokenizer",
"keras.models.load_model",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"keras.layers.add",
"sklearn.metrics.precision_recall_fscore_support",
"keras_contrib.layers.CRF",
"numpy.array",
... | [((5429, 5455), 'keras.preprocessing.text.Tokenizer', 'kpt.Tokenizer', ([], {'lower': '(False)'}), '(lower=False)\n', (5442, 5455), True, 'import keras.preprocessing.text as kpt\n'), ((5925, 5951), 'keras.preprocessing.text.Tokenizer', 'kpt.Tokenizer', ([], {'lower': '(False)'}), '(lower=False)\n', (5938, 5951), True, 'import keras.preprocessing.text as kpt\n'), ((6423, 6449), 'keras.preprocessing.text.Tokenizer', 'kpt.Tokenizer', ([], {'lower': '(False)'}), '(lower=False)\n', (6436, 6449), True, 'import keras.preprocessing.text as kpt\n'), ((6899, 6931), 'numpy.array', 'np.array', (['x_words'], {'dtype': '"""int32"""'}), "(x_words, dtype='int32')\n", (6907, 6931), True, 'import numpy as np\n'), ((6942, 6975), 'numpy.array', 'np.array', (['x_lemmas'], {'dtype': '"""int32"""'}), "(x_lemmas, dtype='int32')\n", (6950, 6975), True, 'import numpy as np\n'), ((6983, 7013), 'numpy.array', 'np.array', (['x_pos'], {'dtype': '"""int32"""'}), "(x_pos, dtype='int32')\n", (6991, 7013), True, 'import numpy as np\n'), ((7024, 7057), 'numpy.array', 'np.array', (['labels_x'], {'dtype': '"""int32"""'}), "(labels_x, dtype='int32')\n", (7032, 7057), True, 'import numpy as np\n'), ((7129, 7194), 'sklearn.model_selection.train_test_split', 'train_test_split', (['xwords', 'xlabels'], {'random_state': '(42)', 'test_size': '(0.2)'}), '(xwords, xlabels, random_state=42, test_size=0.2)\n', (7145, 7194), False, 'from sklearn.model_selection import train_test_split\n'), ((7239, 7305), 'sklearn.model_selection.train_test_split', 'train_test_split', (['xlemmas', 'xlabels'], {'random_state': '(42)', 'test_size': '(0.2)'}), '(xlemmas, xlabels, random_state=42, test_size=0.2)\n', (7255, 7305), False, 'from sklearn.model_selection import train_test_split\n'), ((7344, 7407), 'sklearn.model_selection.train_test_split', 'train_test_split', (['xpos', 'xlabels'], {'random_state': '(42)', 'test_size': '(0.2)'}), '(xpos, xlabels, random_state=42, test_size=0.2)\n', (7360, 7407), False, 'from sklearn.model_selection import train_test_split\n'), ((7447, 7511), 'sklearn.model_selection.train_test_split', 'train_test_split', (['Xtrain', 'Ytrain'], {'random_state': '(42)', 'test_size': '(0.2)'}), '(Xtrain, Ytrain, random_state=42, test_size=0.2)\n', (7463, 7511), False, 'from sklearn.model_selection import train_test_split\n'), ((7556, 7628), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_lemmas_train', 'Ytrain'], {'random_state': '(42)', 'test_size': '(0.2)'}), '(X_lemmas_train, Ytrain, random_state=42, test_size=0.2)\n', (7572, 7628), False, 'from sklearn.model_selection import train_test_split\n'), ((7667, 7736), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X_pos_train', 'Ytrain'], {'random_state': '(42)', 'test_size': '(0.2)'}), '(X_pos_train, Ytrain, random_state=42, test_size=0.2)\n', (7683, 7736), False, 'from sklearn.model_selection import train_test_split\n'), ((7962, 8028), 'keras.initializers.RandomUniform', 'keras.initializers.RandomUniform', ([], {'minval': '(-1.0)', 'maxval': '(1.0)', 'seed': '(42)'}), '(minval=-1.0, maxval=1.0, seed=42)\n', (7994, 8028), False, 'import json, keras, gensim, codecs\n'), ((8267, 8313), 'keras.layers.Input', 'Input', ([], {'shape': '(sequence_length,)', 'dtype': '"""int32"""'}), "(shape=(sequence_length,), dtype='int32')\n", (8272, 8313), False, 'from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional\n'), ((8325, 8371), 'keras.layers.Input', 'Input', ([], {'shape': '(sequence_length,)', 'dtype': '"""int32"""'}), "(shape=(sequence_length,), dtype='int32')\n", (8330, 8371), False, 'from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional\n'), ((8385, 8431), 'keras.layers.Input', 'Input', ([], {'shape': '(sequence_length,)', 'dtype': '"""int32"""'}), "(shape=(sequence_length,), dtype='int32')\n", (8390, 8431), False, 'from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional\n'), ((8913, 8952), 'keras.layers.add', 'keras.layers.add', (['[w_emb, l_emb, p_emb]'], {}), '([w_emb, l_emb, p_emb])\n', (8929, 8952), False, 'import json, keras, gensim, codecs\n'), ((9146, 9209), 'keras.models.Model', 'Model', ([], {'inputs': '[inputs_w, inputs_l, inputs_pos]', 'outputs': 'outputs'}), '(inputs=[inputs_w, inputs_l, inputs_pos], outputs=outputs)\n', (9151, 9209), False, 'from keras.models import Model, load_model\n'), ((9775, 9824), 'keras.models.load_model', 'load_model', (['"""cue_bilstm-crf.hdf5"""', 'custom_objects'], {}), "('cue_bilstm-crf.hdf5', custom_objects)\n", (9785, 9824), False, 'from keras.models import Model, load_model\n'), ((2825, 2863), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['y_', 'y'], {}), '(y_, y)\n', (2856, 2863), False, 'from sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support\n'), ((8441, 8583), 'keras.layers.Embedding', 'Embedding', (['(vocabulary_size + 1)', 'embedding_dim'], {'input_length': 'sequence_length', 'embeddings_initializer': 'embeddings_initializer', 'trainable': '(True)'}), '(vocabulary_size + 1, embedding_dim, input_length=sequence_length,\n embeddings_initializer=embeddings_initializer, trainable=True)\n', (8450, 8583), False, 'from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional\n'), ((8596, 8749), 'keras.layers.Embedding', 'Embedding', (['(lemma_vocabulary_size + 1)', 'embedding_dim'], {'input_length': 'sequence_length', 'embeddings_initializer': 'embeddings_initializer', 'trainable': '(True)'}), '(lemma_vocabulary_size + 1, embedding_dim, input_length=\n sequence_length, embeddings_initializer=embeddings_initializer,\n trainable=True)\n', (8605, 8749), False, 'from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional\n'), ((8757, 8896), 'keras.layers.Embedding', 'Embedding', (['(tag_voc_size + 1)', 'embedding_dim'], {'input_length': 'sequence_length', 'embeddings_initializer': 'embeddings_initializer', 'trainable': '(True)'}), '(tag_voc_size + 1, embedding_dim, input_length=sequence_length,\n embeddings_initializer=embeddings_initializer, trainable=True)\n', (8766, 8896), False, 'from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional\n'), ((8967, 8979), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8974, 8979), False, 'from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional\n'), ((9101, 9128), 'keras_contrib.layers.CRF', 'CRF', (['(2)'], {'sparse_target': '(False)'}), '(2, sparse_target=False)\n', (9104, 9128), False, 'from keras_contrib.layers import CRF\n'), ((2546, 2584), 'sklearn.metrics.classification_report', 'classification_report', (['y_', 'y'], {'digits': '(4)'}), '(y_, y, digits=4)\n', (2567, 2584), False, 'from sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support\n'), ((4258, 4304), 'codecs.open', 'codecs.open', (['"""cue_best_pred.txt"""', '"""wb"""', '"""utf8"""'], {}), "('cue_best_pred.txt', 'wb', 'utf8')\n", (4269, 4304), False, 'import json, keras, gensim, codecs\n'), ((9012, 9075), 'keras.layers.LSTM', 'LSTM', (['hidden_dims'], {'recurrent_dropout': '(0.5)', 'return_sequences': '(True)'}), '(hidden_dims, recurrent_dropout=0.5, return_sequences=True)\n', (9016, 9075), False, 'from keras.layers import Dropout, Input, Dense, Embedding, LSTM, Bidirectional\n')] |
import numpy as np
def list_to_mat(data, dims):
m, n = dims
n_obs = len(data[:, 0])
out1 = np.zeros((m, n))
out2 = np.zeros((m, n))
for ind in range(n_obs):
i, j = data[ind, :2]
i = int(i)
j = int(j)
out1[i, j] = data[ind, -1]
out2[i, j] = 1
return out1, out2
def predict(test_set, U, V):
n_test = test_set.shape[0]
i_obs = test_set[:, 0].astype('int')
j_obs = test_set[:, 1].astype('int')
UV_obs = np.sum(U[i_obs, :] * V[:, j_obs].T, axis=1)
diff = (test_set[:, -1] - UV_obs)
count = np.sum(np.abs(diff) >= 1)
return (np.sum(diff ** 2) ** 0.5, count / len(i_obs))
def log_joint(U, V, X_list, gamma_U_params, gamma_V_params):
m, d = np.shape(U)
_, n = np.shape(V)
A_u, B_u = gamma_U_params['a'], gamma_U_params['b']
A_v, B_v = gamma_V_params['a'], gamma_U_params['b']
n_obs = len(X_list[:, 0])
i_obs = X_list[:, 0].astype('int')
j_obs = X_list[:, 1].astype('int')
rel_UV = np.sum(U[i_obs, :] * V[:, j_obs].T, axis=1)
pt_poisson = np.sum(X_list[:, 2] * np.log(rel_UV) - rel_UV)
return pt_poisson
| [
"numpy.abs",
"numpy.log",
"numpy.sum",
"numpy.zeros",
"numpy.shape"
] | [((104, 120), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (112, 120), True, 'import numpy as np\n'), ((132, 148), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (140, 148), True, 'import numpy as np\n'), ((485, 528), 'numpy.sum', 'np.sum', (['(U[i_obs, :] * V[:, j_obs].T)'], {'axis': '(1)'}), '(U[i_obs, :] * V[:, j_obs].T, axis=1)\n', (491, 528), True, 'import numpy as np\n'), ((737, 748), 'numpy.shape', 'np.shape', (['U'], {}), '(U)\n', (745, 748), True, 'import numpy as np\n'), ((760, 771), 'numpy.shape', 'np.shape', (['V'], {}), '(V)\n', (768, 771), True, 'import numpy as np\n'), ((1005, 1048), 'numpy.sum', 'np.sum', (['(U[i_obs, :] * V[:, j_obs].T)'], {'axis': '(1)'}), '(U[i_obs, :] * V[:, j_obs].T, axis=1)\n', (1011, 1048), True, 'import numpy as np\n'), ((586, 598), 'numpy.abs', 'np.abs', (['diff'], {}), '(diff)\n', (592, 598), True, 'import numpy as np\n'), ((617, 634), 'numpy.sum', 'np.sum', (['(diff ** 2)'], {}), '(diff ** 2)\n', (623, 634), True, 'import numpy as np\n'), ((1088, 1102), 'numpy.log', 'np.log', (['rel_UV'], {}), '(rel_UV)\n', (1094, 1102), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from rdkit.Chem import PandasTools, AllChem
from rdkit import Chem
from rdkit.Chem.rdMolDescriptors import GetMorganFingerprint, GetMorganFingerprintAsBitVect
from rdkit import DataStructs
import os
from scipy.stats import lognorm
import time
#%%% Read in the InStock i chunks
db = pd.DataFrame(np.zeros([0,2]), columns= ["smiles","npl"])
num_to_read=int(1e+5)
for i in range(0,10000000,num_to_read):
temp =pd.read_csv("../data/original/in-stock_NPL.smi", sep ="\t",header =None,names =["smiles", "npl"],skiprows= i, nrows =num_to_read )
db=pd.concat([db,temp], axis=0)
print(i)
#%%% Prepare data
#reset_index
db.reset_index(inplace=True, drop=True)
# get indices of molecules with missing npl
missing_npl_idx=db[db.npl.isna()].index
# remove molecules with missing npl
db.drop(missing_npl_idx, axis=0, inplace =True)
# reset indices
db.reset_index(inplace =True, drop=True)
# remove stereoinformation
db["smiles"] = [x.replace("@","") for x in db.smiles]
db.drop_duplicates(subset="smiles", inplace =True)
db.reset_index(inplace =True, drop=True)
db.to_pickle("../data/zinc_smiles_clean.pkl")
#%% Same process for coconut
coconut_data = pd.read_csv("../data/original/COCONUT_DB_NPL.smi", sep="\t", header = None, names= ["smiles", "npl"])
coconut_data.dropna(inplace=True)
coconut_data.drop_duplicates("smiles")
# canonicalize
coc_smiles = []
for x in coconut_data.smiles:
try:
coc_smiles.append(Chem.MolToSmiles(Chem.MolFromSmiles(x)))
except:
coc_smiles.append(None)
coconut_data["smiles"] = coc_smiles
coconut_data.dropna(inplace=True)
coconut_data=coconut_data.drop_duplicates("smiles")
coconut_data.reset_index(inplace=True, drop=True)
coconut_data.to_pickle("../data/coconut_smiles_clean.pkl")
| [
"numpy.zeros",
"pandas.concat",
"pandas.read_csv",
"rdkit.Chem.MolFromSmiles"
] | [((1196, 1298), 'pandas.read_csv', 'pd.read_csv', (['"""../data/original/COCONUT_DB_NPL.smi"""'], {'sep': '"""\t"""', 'header': 'None', 'names': "['smiles', 'npl']"}), "('../data/original/COCONUT_DB_NPL.smi', sep='\\t', header=None,\n names=['smiles', 'npl'])\n", (1207, 1298), True, 'import pandas as pd\n'), ((335, 351), 'numpy.zeros', 'np.zeros', (['[0, 2]'], {}), '([0, 2])\n', (343, 351), True, 'import numpy as np\n'), ((451, 582), 'pandas.read_csv', 'pd.read_csv', (['"""../data/original/in-stock_NPL.smi"""'], {'sep': '"""\t"""', 'header': 'None', 'names': "['smiles', 'npl']", 'skiprows': 'i', 'nrows': 'num_to_read'}), "('../data/original/in-stock_NPL.smi', sep='\\t', header=None,\n names=['smiles', 'npl'], skiprows=i, nrows=num_to_read)\n", (462, 582), True, 'import pandas as pd\n'), ((589, 618), 'pandas.concat', 'pd.concat', (['[db, temp]'], {'axis': '(0)'}), '([db, temp], axis=0)\n', (598, 618), True, 'import pandas as pd\n'), ((1485, 1506), 'rdkit.Chem.MolFromSmiles', 'Chem.MolFromSmiles', (['x'], {}), '(x)\n', (1503, 1506), False, 'from rdkit import Chem\n')] |
"""Base class for Cameras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import os
import numpy as np
from robovat.math import get_transform
from robovat.math import Pose
from robovat.math import Orientation
class Camera(object):
"""Abstract base class for cameras.
"""
__metaclass__ = abc.ABCMeta
def __init__(self,
height=None,
width=None,
intrinsics=None,
translation=None,
rotation=None,
crop=None):
"""Initialize.
Args:
height: The height of the image.
width: The width of the image.
intrinsics: The intrinsics matrix.
translation: The translation vector.
rotation: The rotation matrix.
crop: The cropping box as [y1, x1, y2, x2].
"""
if crop is None:
self._height = height
self._width = width
else:
self._height = crop[2] - crop[0]
self._width = crop[3] - crop[1]
self._crop = crop
self.set_calibration(intrinsics, translation, rotation)
@property
def height(self):
return self._height
@property
def width(self):
return self._width
@property
def intrinsics(self):
return self._intrinsics
@property
def translation(self):
return self._translation
@property
def rotation(self):
return self._rotation
@property
def cx(self):
return self.intrinsics[0, 2]
@property
def cy(self):
return self.intrinsics[1, 2]
@property
def pose(self):
world_origin_in_camera = Pose([self._translation, self._rotation])
return world_origin_in_camera.inverse()
def start(self):
"""Starts the camera stream.
"""
pass
def stop(self):
"""Stops the camera stream.
Returns:
True if succeed, False if fail.
"""
return True
def reset(self):
"""Restarts the camera stream.
"""
self.stop()
self.start()
def frames(self):
"""Get the latest set of frames.
Returns:
A dictionary of RGB image, depth image and segmentation image.
'image': The RGB image as an uint8 np array of [width, height, 3].
'depth': The depth image as a float32 np array of [width, height].
'segmask': None value.
"""
images = self._frames()
if self._crop is not None:
for key in images:
images[key] = images[key][self._crop[0]:self._crop[2],
self._crop[1]:self._crop[3]]
return images
def _frames(self):
"""Get the latest set of frames.
"""
raise NotImplementedError
def load_calibration(self, path, robot_pose=[[0, 0, 0], [0, 0, 0]]):
"""Set the camera by using the camera calibration results.
Args:
path: The data directory of the calibration results.
"""
intrinsics_path = os.path.join(path, 'IR_intrinsics.npy')
intrinsics = np.load(intrinsics_path, encoding='latin1')
translation_path = os.path.join(path, 'robot_IR_translation.npy')
translation = np.load(translation_path, encoding='latin1')
rotation_path = os.path.join(path, 'robot_IR_rotation.npy')
rotation = np.load(rotation_path, encoding='latin1')
# Convert the extrinsics from the robot frame to the world frame.
from_robot_to_world = get_transform(source=robot_pose)
robot_pose_in_camera = Pose([translation, rotation])
camera_pose_in_robot = robot_pose_in_camera.inverse()
camera_pose_in_world = from_robot_to_world.transform(
camera_pose_in_robot)
world_origin_in_camera = camera_pose_in_world.inverse()
translation = world_origin_in_camera.position
rotation = world_origin_in_camera.matrix3
return intrinsics, translation, rotation
def set_calibration(self, intrinsics, translation, rotation):
"""Set the camera calibration data.
Args:
intrinsics: The intrinsics matrix.
translation: The translation vector.
rotation: The rotation matrix.
"""
if intrinsics is not None:
self._intrinsics = np.array(intrinsics).reshape((3, 3))
if self._crop is not None:
self._intrinsics[0, 2] -= self._crop[1]
self._intrinsics[1, 2] -= self._crop[0]
if translation is not None:
self._translation = np.array(translation).reshape((3,))
if rotation is not None:
self._rotation = Orientation(rotation).matrix3
def project_point(self, point, is_world_frame=True):
"""Projects a point cloud onto the camera image plane.
Args:
point: 3D point to project onto the camera image plane.
is_world_frame: True if the 3D point is defined in the world frame,
False if it is defined in the camera frame.
Returns:
pixel: 2D pixel location in the camera image.
"""
point = np.array(point)
if is_world_frame:
point = np.dot(point - self.pose.position, self.pose.matrix3)
projected = np.dot(point, self.intrinsics.T)
projected = np.divide(projected, np.tile(projected[..., 2:3], [3]))
projected = np.round(projected)
pixel = np.array(projected[..., :2]).astype(np.int16)
return pixel
def deproject_pixel(self, pixel, depth, is_world_frame=True):
"""Deprojects a single pixel with a given depth into a 3D point.
Args:
pixel: 2D point representing the pixel location in the image.
depth: Depth value at the given pixel location.
is_world_frame: True if the 3D point is defined in the world frame,
False if it is defined in the camera frame.
Returns:
point: The deprojected 3D point.
"""
point = depth * np.linalg.inv(self.intrinsics).dot(np.r_[pixel, 1.0])
if is_world_frame:
point = self.pose.position + np.dot(point, self.pose.matrix3.T)
return point
def deproject_depth_image(self, image, crop=None, is_world_frame=True):
"""Deprojects an entire depth image into a 3D point cloud.
Args:
image: 2.5D depth image.
crop: The cropping box as [y1, x1, y2, x2].
is_world_frame: True if the 3D point is defined in the world frame,
False if it is defined in the camera frame.
Returns:
point: The deprojected 3D point cloud.
"""
num_points = np.prod(image.shape)
image_shape = [image.shape[0], image.shape[1]]
inds = np.indices(image_shape).reshape((2, -1))[::-1, :]
pixels = np.concatenate([inds, np.ones((1, num_points))], axis=0)
depth = np.tile(image.reshape((1, -1)), [3, 1])
pixels = pixels * depth
point_cloud = np.matmul(np.linalg.inv(self.intrinsics), pixels)
if crop is not None:
mask = np.logical_and(
np.logical_and(inds[:, 0] >= crop[0], inds[:, 0] <= crop[2]),
np.logical_and(inds[:, 1] >= crop[1], inds[:, 1] <= crop[3]))
point_cloud = point_cloud[mask]
if is_world_frame:
point_cloud = self.pose.position.reshape(3, 1) + np.matmul(
self.pose.matrix3,
point_cloud)
return np.array(point_cloud.T)
| [
"numpy.prod",
"numpy.tile",
"numpy.ones",
"numpy.logical_and",
"robovat.math.Orientation",
"os.path.join",
"numpy.indices",
"robovat.math.Pose",
"numpy.array",
"numpy.dot",
"robovat.math.get_transform",
"numpy.linalg.inv",
"numpy.matmul",
"numpy.load",
"numpy.round"
] | [((1778, 1819), 'robovat.math.Pose', 'Pose', (['[self._translation, self._rotation]'], {}), '([self._translation, self._rotation])\n', (1782, 1819), False, 'from robovat.math import Pose\n'), ((3213, 3252), 'os.path.join', 'os.path.join', (['path', '"""IR_intrinsics.npy"""'], {}), "(path, 'IR_intrinsics.npy')\n", (3225, 3252), False, 'import os\n'), ((3274, 3317), 'numpy.load', 'np.load', (['intrinsics_path'], {'encoding': '"""latin1"""'}), "(intrinsics_path, encoding='latin1')\n", (3281, 3317), True, 'import numpy as np\n'), ((3345, 3391), 'os.path.join', 'os.path.join', (['path', '"""robot_IR_translation.npy"""'], {}), "(path, 'robot_IR_translation.npy')\n", (3357, 3391), False, 'import os\n'), ((3414, 3458), 'numpy.load', 'np.load', (['translation_path'], {'encoding': '"""latin1"""'}), "(translation_path, encoding='latin1')\n", (3421, 3458), True, 'import numpy as np\n'), ((3483, 3526), 'os.path.join', 'os.path.join', (['path', '"""robot_IR_rotation.npy"""'], {}), "(path, 'robot_IR_rotation.npy')\n", (3495, 3526), False, 'import os\n'), ((3546, 3587), 'numpy.load', 'np.load', (['rotation_path'], {'encoding': '"""latin1"""'}), "(rotation_path, encoding='latin1')\n", (3553, 3587), True, 'import numpy as np\n'), ((3693, 3725), 'robovat.math.get_transform', 'get_transform', ([], {'source': 'robot_pose'}), '(source=robot_pose)\n', (3706, 3725), False, 'from robovat.math import get_transform\n'), ((3757, 3786), 'robovat.math.Pose', 'Pose', (['[translation, rotation]'], {}), '([translation, rotation])\n', (3761, 3786), False, 'from robovat.math import Pose\n'), ((5344, 5359), 'numpy.array', 'np.array', (['point'], {}), '(point)\n', (5352, 5359), True, 'import numpy as np\n'), ((5483, 5515), 'numpy.dot', 'np.dot', (['point', 'self.intrinsics.T'], {}), '(point, self.intrinsics.T)\n', (5489, 5515), True, 'import numpy as np\n'), ((5612, 5631), 'numpy.round', 'np.round', (['projected'], {}), '(projected)\n', (5620, 5631), True, 'import numpy as np\n'), ((6919, 6939), 'numpy.prod', 'np.prod', (['image.shape'], {}), '(image.shape)\n', (6926, 6939), True, 'import numpy as np\n'), ((7739, 7762), 'numpy.array', 'np.array', (['point_cloud.T'], {}), '(point_cloud.T)\n', (7747, 7762), True, 'import numpy as np\n'), ((5408, 5461), 'numpy.dot', 'np.dot', (['(point - self.pose.position)', 'self.pose.matrix3'], {}), '(point - self.pose.position, self.pose.matrix3)\n', (5414, 5461), True, 'import numpy as np\n'), ((5557, 5590), 'numpy.tile', 'np.tile', (['projected[..., 2:3]', '[3]'], {}), '(projected[..., 2:3], [3])\n', (5564, 5590), True, 'import numpy as np\n'), ((7254, 7284), 'numpy.linalg.inv', 'np.linalg.inv', (['self.intrinsics'], {}), '(self.intrinsics)\n', (7267, 7284), True, 'import numpy as np\n'), ((4866, 4887), 'robovat.math.Orientation', 'Orientation', (['rotation'], {}), '(rotation)\n', (4877, 4887), False, 'from robovat.math import Orientation\n'), ((5649, 5677), 'numpy.array', 'np.array', (['projected[..., :2]'], {}), '(projected[..., :2])\n', (5657, 5677), True, 'import numpy as np\n'), ((6368, 6402), 'numpy.dot', 'np.dot', (['point', 'self.pose.matrix3.T'], {}), '(point, self.pose.matrix3.T)\n', (6374, 6402), True, 'import numpy as np\n'), ((7099, 7123), 'numpy.ones', 'np.ones', (['(1, num_points)'], {}), '((1, num_points))\n', (7106, 7123), True, 'import numpy as np\n'), ((7375, 7435), 'numpy.logical_and', 'np.logical_and', (['(inds[:, 0] >= crop[0])', '(inds[:, 0] <= crop[2])'], {}), '(inds[:, 0] >= crop[0], inds[:, 0] <= crop[2])\n', (7389, 7435), True, 'import numpy as np\n'), ((7453, 7513), 'numpy.logical_and', 'np.logical_and', (['(inds[:, 1] >= crop[1])', '(inds[:, 1] <= crop[3])'], {}), '(inds[:, 1] >= crop[1], inds[:, 1] <= crop[3])\n', (7467, 7513), True, 'import numpy as np\n'), ((7648, 7689), 'numpy.matmul', 'np.matmul', (['self.pose.matrix3', 'point_cloud'], {}), '(self.pose.matrix3, point_cloud)\n', (7657, 7689), True, 'import numpy as np\n'), ((4510, 4530), 'numpy.array', 'np.array', (['intrinsics'], {}), '(intrinsics)\n', (4518, 4530), True, 'import numpy as np\n'), ((4767, 4788), 'numpy.array', 'np.array', (['translation'], {}), '(translation)\n', (4775, 4788), True, 'import numpy as np\n'), ((6245, 6275), 'numpy.linalg.inv', 'np.linalg.inv', (['self.intrinsics'], {}), '(self.intrinsics)\n', (6258, 6275), True, 'import numpy as np\n'), ((7010, 7033), 'numpy.indices', 'np.indices', (['image_shape'], {}), '(image_shape)\n', (7020, 7033), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
np.set_printoptions(threshold='nan')
import h5py
import theano
import argparse
import itertools
import subprocess
import logging
import time
import codecs
import os
from copy import deepcopy
import math
import sys
from data_generator import VisualWordDataGenerator
import models
# Set up logger
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger(__name__)
# Dimensionality of image feature vector
IMG_FEATS = 4096
MULTEVAL_DIR = '../multeval-0.5.1' if "util" in os.getcwd() else "multeval-0.5.1"
class cd:
"""Context manager for changing the current working directory"""
"""http://stackoverflow.com/questions/431684/how-do-i-cd-in-python"""
def __init__(self, newPath):
self.newPath = newPath
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
class GroundedTranslationGenerator:
def __init__(self, args):
self.args = args
self.vocab = dict()
self.unkdict = dict()
self.counter = 0
self.maxSeqLen = 0
# consistent with models.py
self.use_sourcelang = args.source_vectors is not None
self.use_image = not args.no_image
self.model = None
self.prepare_datagenerator()
# this results in two file handlers for dataset (here and
# data_generator)
if not self.args.dataset:
logger.warn("No dataset given, using flickr8k")
self.dataset = h5py.File("flickr8k/dataset.h5", "r")
else:
self.dataset = h5py.File("%s/dataset.h5" % self.args.dataset, "r")
if self.args.debug:
theano.config.optimizer = 'None'
theano.config.exception_verbosity = 'high'
def prepare_datagenerator(self):
self.data_gen = VisualWordDataGenerator(self.args,
self.args.dataset)
self.args.checkpoint = self.find_best_checkpoint()
self.data_gen.set_vocabulary(self.args.checkpoint)
self.vocab_len = len(self.data_gen.index2word)
self.index2word = self.data_gen.index2word
self.word2index = self.data_gen.word2index
def generate(self):
'''
Entry point for this module.
Loads up a data generator to get the relevant image / source features.
Builds the relevant model, given the command-line arguments.
Generates sentences for the images in the val / test data.
Calculates BLEU and PPLX, unless requested.
'''
if self.use_sourcelang:
# HACK FIXME unexpected problem with input_data
self.hsn_size = self.data_gen.hsn_size
else:
self.hsn_size = 0
if self.model == None:
self.build_model(generate=True)
self.generate_sentences(self.args.checkpoint, val=not self.args.test)
if not self.args.without_scores:
score = self.bleu_score(self.args.checkpoint, val=not self.args.test)
if self.args.multeval:
score, _, _ = self.multeval_scores(self.args.checkpoint,
val=not self.args.test)
if not self.args.no_pplx:
self.build_model(generate=False)
self.calculate_pplx(self.args.checkpoint, val=not self.args.test)
return score
def generate_sentences(self, filepath, val=True):
"""
Generates descriptions of images for --generation_timesteps
iterations through the LSTM. Each input description is clipped to
the first <BOS> token, or, if --generate_from_N_words is set, to the
first N following words (N + 1 BOS token).
This process can be additionally conditioned
on source language hidden representations, if provided by the
--source_vectors parameter.
The output is clipped to the first EOS generated, if it exists.
TODO: duplicated method with generate.py
"""
if self.args.beam_width > 1:
prefix = "val" if val else "test"
handle = codecs.open("%s/%sGenerated" % (filepath, prefix), "w",
'utf-8')
logger.info("Generating %s descriptions", prefix)
start_gen = self.args.generate_from_N_words # Default 0
start_gen = start_gen + 1 # include BOS
generator = self.data_gen.generation_generator(prefix, batch_size=1)
seen = 0
# we are going to beam search for the most probably sentence.
# let's do this one sentence at a time to make the logging output
# easier to understand
for data in generator:
text = data[0]['text']
# Append the first start_gen words to the complete_sentences list
# for each instance in the batch.
complete_sentences = [[] for _ in range(text.shape[0])]
for t in range(start_gen): # minimum 1
for i in range(text.shape[0]):
w = np.argmax(text[i, t])
complete_sentences[i].append(self.index2word[w])
del data[0]['text']
text = self.reset_text_arrays(text, start_gen)
Y_target = data[1]['output']
data[0]['text'] = text
max_beam_width = self.args.beam_width
structs = self.make_duplicate_matrices(data[0], max_beam_width)
# A beam is a 2-tuple with the probability of the sequence and
# the words in that sequence. Start with empty beams
beams = [(0.0, [])]
# collects beams that are in the top candidates and
# emitted a <E> token.
finished = []
for t in range(start_gen, self.args.generation_timesteps):
# Store the candidates produced at timestep t, will be
# pruned at the end of the timestep
candidates = []
# we take a view of the datastructures, which means we're only
# ever generating a prediction for the next word. This saves a
# lot of cycles.
preds = self.model.predict(structs, verbose=0)
# The last indices in preds are the predicted words
next_word_indices = preds[:, t-1]
sorted_indices = np.argsort(-next_word_indices, axis=1)
# Each instance in structs is holding the history of a
# beam, and so there is a direct connection between the
# index of a beam in beams and the index of an instance in
# structs.
for beam_idx, b in enumerate(beams):
# get the sorted predictions for the beam_idx'th beam
beam_predictions = sorted_indices[beam_idx]
for top_idx in range(self.args.beam_width):
wordIndex = beam_predictions[top_idx]
wordProb = next_word_indices[beam_idx][beam_predictions[top_idx]]
# For the beam_idxth beam, add the log probability
# of the top_idxth predicted word to the previous
# log probability of the sequence, and append the
# top_idxth predicted word to the sequence of words
candidates.append([b[0] + math.log(wordProb), b[1] + [wordIndex]])
candidates.sort(reverse = True)
if self.args.verbose:
logger.info("Candidates in the beam")
logger.info("---")
for c in candidates:
logger.info(" ".join([self.index2word[x] for x in c[1]]) + " (%f)" % c[0])
beams = candidates[:max_beam_width] # prune the beams
pruned = []
for b in beams:
# If a top candidate emitted an EOS token then
# a) add it to the list of finished sequences
# b) remove it from the beams and decrease the
# maximum size of the beams.
if b[1][-1] == self.word2index["<E>"]:
finished.append(b)
if max_beam_width >= 1:
max_beam_width -= 1
else:
pruned.append(b)
beams = pruned[:max_beam_width]
if self.args.verbose:
logger.info("Pruned beams")
logger.info("---")
for b in beams:
logger.info(" ".join([self.index2word[x] for x in b[1]]) + "(%f)" % b[0])
if max_beam_width == 0:
# We have sampled max_beam_width sequences with an <E>
# token so stop the beam search.
break
# Reproduce the structs for the beam search so we can keep
# track of the state of each beam
structs = self.make_duplicate_matrices(data[0], max_beam_width)
# Rewrite the 1-hot word features with the
# so-far-predcicted tokens in a beam.
for bidx, b in enumerate(beams):
for idx, w in enumerate(b[1]):
next_word_index = w
structs['text'][bidx, idx+1, w] = 1.
# If none of the sentences emitted an <E> token while
# decoding, add the final beams into the final candidates
if len(finished) == 0:
for leftover in beams:
finished.append(leftover)
# Normalise the probabilities by the length of the sequences
# as suggested by Graves (2012) http://arxiv.org/abs/1211.3711
for f in finished:
f[0] = f[0] / len(f[1])
finished.sort(reverse=True)
if self.args.verbose:
logger.info("Length-normalised samples")
logger.info("---")
for f in finished:
logger.info(" ".join([self.index2word[x] for x in f[1]]) + "(%f)" % f[0])
# Emit the lowest (log) probability sequence
best_beam = finished[0]
complete_sentences[i] = [self.index2word[x] for x in best_beam[1]]
handle.write(' '.join([x for x
in itertools.takewhile(
lambda n: n != "<E>", complete_sentences[i])]) + "\n")
if self.args.verbose:
logger.info("%s (%f)",' '.join([x for x
in itertools.takewhile(
lambda n: n != "<E>",
complete_sentences[i])]),
best_beam[0])
seen += text.shape[0]
if seen == self.data_gen.split_sizes['val']:
# Hacky way to break out of the generator
break
handle.close()
else:
# We are going to arg max decode a sequence.
prefix = "val" if val else "test"
logger.info("Generating %s descriptions", prefix)
start_gen = self.args.generate_from_N_words + 1 # include BOS
handle = codecs.open("%s/%sGenerated" % (filepath, prefix),
"w", 'utf-8')
generator = self.data_gen.generation_generator(prefix)
seen = 0
for data in generator:
text = deepcopy(data[0]['text'])
# Append the first start_gen words to the complete_sentences list
# for each instance in the batch.
complete_sentences = [[] for _ in range(text.shape[0])]
for t in range(start_gen): # minimum 1
for i in range(text .shape[0]):
w = np.argmax(text[i, t])
complete_sentences[i].append(self.index2word[w])
del data[0]['text']
text = self.reset_text_arrays(text, start_gen)
Y_target = data[1]['output']
data[0]['text'] = text
for t in range(start_gen, self.args.generation_timesteps):
logger.debug("Input token: %s" % self.index2word[np.argmax(text[0,t-1])])
preds = self.model.predict(data[0],
verbose=0)
# Look at the last indices for the words.
next_word_indices = np.argmax(preds[:, t-1], axis=1)
logger.debug("Predicted token: %s" % self.index2word[next_word_indices[0]])
# update array[0]/sentence-so-far with generated words.
for i in range(len(next_word_indices)):
data[0]['text'][i, t, next_word_indices[i]] = 1.
next_words = [self.index2word[x] for x in next_word_indices]
for i in range(len(next_words)):
complete_sentences[i].append(next_words[i])
sys.stdout.flush()
# print/extract each sentence until it hits the first end-of-string token
for s in complete_sentences:
if self.args.verbose:
logger.info("%s",' '.join([x for x
in itertools.takewhile(
lambda n: n != "<E>",
complete_sentences[i])]))
decoded_str = ' '.join([x for x
in itertools.takewhile(
lambda n: n != "<E>", s[1:])])
handle.write(decoded_str + "\n")
seen += text.shape[0]
if seen == self.data_gen.split_sizes[prefix]:
# Hacky way to break out of the generator
break
handle.close()
def calculate_pplx(self, path, val=True):
""" Splits the input data into batches of self.args.batch_size to
reduce the memory footprint of holding all of the data in RAM. """
prefix = "val" if val else "test"
logger.info("Calculating pplx over %s data", prefix)
sum_logprobs = 0
y_len = 0
generator = self.data_gen.generation_generator(prefix)
seen = 0
for data in generator:
Y_target = deepcopy(data[1]['output'])
del data[1]['output']
preds = self.model.predict(data[0],
verbose=0,
batch_size=self.args.batch_size)
for i in range(Y_target.shape[0]):
for t in range(Y_target.shape[1]):
target_idx = np.argmax(Y_target[i, t])
target_tok = self.index2word[target_idx]
if target_tok != "<P>":
log_p = math.log(preds[i, t, target_idx],2)
sum_logprobs += -log_p
y_len += 1
seen += data[0]['text'].shape[0]
if seen == self.data_gen.split_sizes[prefix]:
# Hacky way to break out of the generator
break
norm_logprob = sum_logprobs / y_len
pplx = math.pow(2, norm_logprob)
logger.info("PPLX: %.4f", pplx)
handle = open("%s/%sPPLX" % (path, prefix), "w")
handle.write("%f\n" % pplx)
handle.close()
return pplx
def reset_text_arrays(self, text_arrays, fixed_words=1):
""" Reset the values in the text data structure to zero so we cannot
accidentally pass them into the model.
Helper function for generate_sentences().
"""
reset_arrays = deepcopy(text_arrays)
reset_arrays[:,fixed_words:, :] = 0
return reset_arrays
def make_duplicate_matrices(self, generator_data, k):
'''
Prepare K duplicates of the input data for a given instance yielded by
the data generator.
Helper function for the beam search decoder in generation_sentences().
'''
if self.use_sourcelang and self.use_image:
# the data generator yielded a dictionary with the words, the
# image features, and the source features
dupes = [[],[],[]]
words = generator_data['text']
img = generator_data['img']
source = generator_data['src']
for x in range(k):
# Make a deep copy of the word_feats structures
# so the arrays will never be shared
dupes[0].append(deepcopy(words[0,:,:]))
dupes[1].append(source[0,:,:])
dupes[2].append(img[0,:,:])
# Turn the list of arrays into a numpy array
dupes[0] = np.array(dupes[0])
dupes[1] = np.array(dupes[1])
dupes[2] = np.array(dupes[2])
return {'text': dupes[0], 'img': dupes[2], 'src': dupes[1]}
elif self.use_image:
# the data generator yielded a dictionary with the words and the
# image features
dupes = [[],[]]
words = generator_data['text']
img = generator_data['img']
for x in range(k):
# Make a deep copy of the word_feats structures
# so the arrays will never be shared
dupes[0].append(deepcopy(words[0,:,:]))
dupes[1].append(img[0,:,:])
# Turn the list of arrays into a numpy array
dupes[0] = np.array(dupes[0])
dupes[1] = np.array(dupes[1])
return {'text': dupes[0], 'img': dupes[1]}
elif self.use_sourcelang:
# the data generator yielded a dictionary with the words and the
# source features
dupes = [[],[]]
words = generator_data['text']
source= generator_data['src']
for x in range(k):
# Make a deep copy of the word_feats structures
# so the arrays will never be shared
dupes[0].append(deepcopy(words[0,:,:]))
dupes[1].append(source[0,:,:])
# Turn the list of arrays into a numpy array
dupes[0] = np.array(dupes[0])
dupes[1] = np.array(dupes[1])
return {'text': dupes[0], 'src': dupes[1]}
def find_best_checkpoint(self):
'''
Read the summary file from the directory and scrape out the run ID of
the highest BLEU scoring checkpoint. Then do an ls-stlye function in
the directory and return the exact path to the best model.
Assumes only one matching prefix in the model checkpoints directory.
'''
summary_data = open("%s/summary" % self.args.model_checkpoints).readlines()
summary_data = [x.replace("\n", "") for x in summary_data]
best_id = None
target = "Best loss" if self.args.best_pplx else "Best Metric"
for line in summary_data:
if line.startswith(target):
best_id = "%03d" % (int(line.split(":")[1].split("|")[0]))
checkpoint = None
if best_id is not None:
checkpoints = os.listdir(self.args.model_checkpoints)
for c in checkpoints:
if c.startswith(best_id):
checkpoint = c
break
logger.info("Best checkpoint: %s/%s" % (self.args.model_checkpoints, checkpoint))
return "%s/%s" % (self.args.model_checkpoints, checkpoint)
def bleu_score(self, directory, val=True):
'''
PPLX is only weakly correlated with improvements in BLEU,
and thus improvements in human judgements. Let's also track
BLEU score of a subset of generated sentences in the val split
to decide on early stopping, etc.
'''
prefix = "val" if val else "test"
self.extract_references(directory, val)
subprocess.check_call(
['perl multi-bleu.perl %s/%s_reference.ref < %s/%sGenerated | tee %s/%sBLEU'
% (directory, prefix, directory, prefix, directory, prefix)], shell=True)
bleudata = open("%s/%sBLEU" % (directory, prefix)).readline()
data = bleudata.split(",")[0]
bleuscore = data.split("=")[1]
bleu = float(bleuscore.lstrip())
return bleu
def multeval_scores(self, directory, val=True):
'''
Maybe you want to evaluate with Meteor, TER, and BLEU?
'''
prefix = "val" if val else "test"
self.extract_references(directory, val)
with cd(MULTEVAL_DIR):
subprocess.check_call(
['./multeval.sh eval --refs ../%s/%s_reference.* \
--hyps-baseline ../%s/%sGenerated \
--meteor.language %s \
--threads 4 \
2> multevaloutput 1> multevaloutput'
% (directory, prefix, directory, prefix, self.args.meteor_lang)], shell=True)
handle = open("multevaloutput")
multdata = handle.readlines()
handle.close()
for line in multdata:
if line.startswith("RESULT: baseline: BLEU: AVG:"):
mbleu = line.split(":")[4]
mbleu = mbleu.replace("\n","")
mbleu = mbleu.strip()
lr = mbleu.split(".")
mbleu = float(lr[0]+"."+lr[1][0:2])
if line.startswith("RESULT: baseline: METEOR: AVG:"):
mmeteor = line.split(":")[4]
mmeteor = mmeteor.replace("\n","")
mmeteor = mmeteor.strip()
lr = mmeteor.split(".")
mmeteor = float(lr[0]+"."+lr[1][0:2])
if line.startswith("RESULT: baseline: TER: AVG:"):
mter = line.split(":")[4]
mter = mter.replace("\n","")
mter = mter.strip()
lr = mter.split(".")
mter = float(lr[0]+"."+lr[1][0:2])
logger.info("Meteor = %.2f | BLEU = %.2f | TER = %.2f",
mmeteor, mbleu, mter)
return mmeteor, mbleu, mter
def extract_references(self, directory, val=True):
"""
Get reference descriptions for split we are generating outputs for.
Helper function for bleu_score().
"""
prefix = "val" if val else "test"
references = self.data_gen.get_refs_by_split_as_list(prefix)
for refid in xrange(len(references[0])):
codecs.open('%s/%s_reference.ref%d'
% (directory, prefix, refid), 'w', 'utf-8').write('\n'.join([x[refid] for x in references]))
def build_model(self, generate=False):
'''
Build a Keras model if one does not yet exist.
Helper function for generate().
'''
if generate:
t = self.args.generation_timesteps
else:
t = self.data_gen.max_seq_len
if self.args.mrnn:
m = models.MRNN(self.args.embed_size, self.args.hidden_size,
self.vocab_len,
self.args.dropin,
self.args.optimiser, self.args.l2reg,
hsn_size=self.hsn_size,
weights=self.args.checkpoint,
gru=self.args.gru,
clipnorm=self.args.clipnorm,
t=t)
else:
m = models.NIC(self.args.embed_size, self.args.hidden_size,
self.vocab_len,
self.args.dropin,
self.args.optimiser, self.args.l2reg,
hsn_size=self.hsn_size,
weights=self.args.checkpoint,
gru=self.args.gru,
clipnorm=self.args.clipnorm,
t=t)
self.model = m.buildKerasModel(use_sourcelang=self.use_sourcelang,
use_image=self.use_image)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate descriptions from a trained model")
# General options
parser.add_argument("--run_string", default="", type=str,
help="Optional string to help you identify the run")
parser.add_argument("--debug", action="store_true",
help="Print debug messages to stdout?")
parser.add_argument("--fixed_seed", action="store_true",
help="Start with a fixed random seed? Useful for\
reproding experiments. (default = False)")
parser.add_argument("--num_sents", default=5, type=int,
help="Number of descriptions/image for training")
parser.add_argument("--model_checkpoints", type=str, required=True,
help="Path to the checkpointed parameters")
parser.add_argument("--best_pplx", action="store_true",
help="Use the best PPLX checkpoint instead of the\
best BLEU checkpoint? Default = False.")
# Define the types of input data the model will receive
parser.add_argument("--dataset", default="", type=str, help="Path to the\
HDF5 dataset to use for training / val input\
(defaults to flickr8k)")
parser.add_argument("--supertrain_datasets", nargs="+", help="Paths to the\
datasets to use as additional training input (defaults\
to None)")
parser.add_argument("--unk", type=int,
help="unknown character cut-off. Default=3", default=3)
parser.add_argument("--maximum_length", type=int, default=50,
help="Maximum length of sequences permissible\
in the training data (Default = 50)")
parser.add_argument("--existing_vocab", type=str, default="",
help="Use an existing vocabulary model to define the\
vocabulary and UNKing in this dataset?\
(default = "", which means we will derive the\
vocabulary from the training dataset")
parser.add_argument("--no_image", action="store_true",
help="Do not use image data.")
parser.add_argument("--source_vectors", default=None, type=str,
help="Path to final hidden representations of\
encoder/source language VisualWordLSTM model.\
(default: None.) Expects a final_hidden_representation\
vector for each image in the dataset")
parser.add_argument("--source_enc", type=str, default=None,
help="Which type of source encoder features? Expects\
either 'mt_enc' or 'vis_enc'. Required.")
parser.add_argument("--source_type", type=str, default=None,
help="Source features over gold or predicted tokens?\
Expects 'gold' or 'predicted'. Required")
parser.add_argument("--source_merge", type=str, default="sum",
help="How to merge source features. Only applies if \
there are multiple feature vectors. Expects 'sum', \
'avg', or 'concat'.")
# Model hyperparameters
parser.add_argument("--batch_size", default=100, type=int)
parser.add_argument("--embed_size", default=256, type=int)
parser.add_argument("--hidden_size", default=256, type=int)
parser.add_argument("--dropin", default=0.5, type=float,
help="Prob. of dropping embedding units. Default=0.5")
parser.add_argument("--gru", action="store_true", help="Use GRU instead\
of LSTM recurrent state? (default = False)")
parser.add_argument("--big_batch_size", default=10000, type=int,
help="Number of examples to load from disk at a time;\
0 loads entire dataset. Default is 10000")
parser.add_argument("--mrnn", action="store_true",
help="Use a Mao-style multimodal recurrent neural\
network?")
parser.add_argument("--peeking_source", action="store_true",
help="Input the source features at every timestep?\
Default=False.")
# Optimisation details
parser.add_argument("--optimiser", default="adam", type=str,
help="Optimiser: rmsprop, momentum, adagrad, etc.")
parser.add_argument("--lr", default=0.001, type=float)
parser.add_argument("--beta1", default=None, type=float)
parser.add_argument("--beta2", default=None, type=float)
parser.add_argument("--epsilon", default=None, type=float)
parser.add_argument("--stopping_loss", default="bleu", type=str,
help="minimise cross-entropy or maximise BLEU?")
parser.add_argument("--l2reg", default=1e-8, type=float,
help="L2 cost penalty. Default=1e-8")
parser.add_argument("--clipnorm", default=-1, type=float,
help="Clip gradients? (default = -1, which means\
don't clip the gradients.")
parser.add_argument("--max_epochs", default=50, type=int,
help="Maxmimum number of training epochs. Used with\
--predefined_epochs")
parser.add_argument("--patience", type=int, default=10, help="Training\
will be terminated if validation BLEU score does not\
increase for this number of epochs")
parser.add_argument("--no_early_stopping", action="store_true")
# Language generation details
parser.add_argument("--generation_timesteps", default=10, type=int,
help="Maximum number of words to generate for unseen\
data (default=10).")
parser.add_argument("--test", action="store_true",
help="Generate for the test images? (Default=False)\
which means we will generate for the val images")
parser.add_argument("--without_scores", action="store_true",
help="Don't calculate BLEU or perplexity. Useful if\
you only want to see the generated sentences or if\
you don't have ground-truth sentences for evaluation.")
parser.add_argument("--beam_width", type=int, default=1,
help="Number of hypotheses to consider when decoding.\
Default=1, which means arg max decoding.")
parser.add_argument("--verbose", action="store_true",
help="Verbose output while decoding? If you choose\
verbose output then you'll see the total beam search\
decoding process. (Default = False)")
parser.add_argument("--multeval", action="store_true",
help="Evaluate using multeval?")
parser.add_argument("--meteor_lang", type=str, required=True,
help="Language of the input dataset. Required for\
correct Meteor evaluation. See\
http://www.cs.cmu.edu/~alavie/METEOR/README.html#languages\
for options.")
parser.add_argument("--no_pplx", action="store_true",
help="Skip perplexity calculation?")
# Legacy options
parser.add_argument("--generate_from_N_words", type=int, default=0,
help="Use N words as starting point when generating\
strings. Useful mostly for mt-only model (in other\
cases, image provides enough useful starting\
context.)")
parser.add_argument("--predefined_epochs", action="store_true",
help="Do you want to stop training after a specified\
number of epochs, regardless of early-stopping\
criteria? Use in conjunction with --max_epochs.")
# Neccesary but unused in this module
parser.add_argument("--h5_writeable", action="store_true",
help="Open the H5 file for write-access? Useful for\
serialising hidden states to disk. (default = False)")
parser.add_argument("--use_predicted_tokens", action="store_true",
help="Generate final hidden state\
activations over oracle inputs or from predicted\
inputs? Default = False ( == Oracle)")
w = GroundedTranslationGenerator(parser.parse_args())
w.generate()
| [
"logging.getLogger",
"math.log",
"numpy.argsort",
"numpy.array",
"copy.deepcopy",
"os.listdir",
"argparse.ArgumentParser",
"sys.stdout.flush",
"subprocess.check_call",
"models.MRNN",
"itertools.takewhile",
"numpy.argmax",
"h5py.File",
"data_generator.VisualWordDataGenerator",
"codecs.ope... | [((58, 94), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': '"""nan"""'}), "(threshold='nan')\n", (77, 94), True, 'import numpy as np\n'), ((356, 414), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'stream': 'sys.stdout'}), '(level=logging.INFO, stream=sys.stdout)\n', (375, 414), False, 'import logging\n'), ((424, 451), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (441, 451), False, 'import logging\n'), ((25038, 25124), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate descriptions from a trained model"""'}), "(description=\n 'Generate descriptions from a trained model')\n", (25061, 25124), False, 'import argparse\n'), ((559, 570), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (568, 570), False, 'import os\n'), ((862, 873), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (871, 873), False, 'import os\n'), ((882, 904), 'os.chdir', 'os.chdir', (['self.newPath'], {}), '(self.newPath)\n', (890, 904), False, 'import os\n'), ((963, 987), 'os.chdir', 'os.chdir', (['self.savedPath'], {}), '(self.savedPath)\n', (971, 987), False, 'import os\n'), ((1932, 1985), 'data_generator.VisualWordDataGenerator', 'VisualWordDataGenerator', (['self.args', 'self.args.dataset'], {}), '(self.args, self.args.dataset)\n', (1955, 1985), False, 'from data_generator import VisualWordDataGenerator\n'), ((16177, 16202), 'math.pow', 'math.pow', (['(2)', 'norm_logprob'], {}), '(2, norm_logprob)\n', (16185, 16202), False, 'import math\n'), ((16653, 16674), 'copy.deepcopy', 'deepcopy', (['text_arrays'], {}), '(text_arrays)\n', (16661, 16674), False, 'from copy import deepcopy\n'), ((20900, 21082), 'subprocess.check_call', 'subprocess.check_call', (["[\n 'perl multi-bleu.perl %s/%s_reference.ref < %s/%sGenerated | tee %s/%sBLEU'\n % (directory, prefix, directory, prefix, directory, prefix)]"], {'shell': '(True)'}), "([\n 'perl multi-bleu.perl %s/%s_reference.ref < %s/%sGenerated | tee %s/%sBLEU'\n % (directory, prefix, directory, prefix, directory, prefix)], shell=True)\n", (20921, 21082), False, 'import subprocess\n'), ((1610, 1647), 'h5py.File', 'h5py.File', (['"""flickr8k/dataset.h5"""', '"""r"""'], {}), "('flickr8k/dataset.h5', 'r')\n", (1619, 1647), False, 'import h5py\n'), ((1689, 1740), 'h5py.File', 'h5py.File', (["('%s/dataset.h5' % self.args.dataset)", '"""r"""'], {}), "('%s/dataset.h5' % self.args.dataset, 'r')\n", (1698, 1740), False, 'import h5py\n'), ((4241, 4305), 'codecs.open', 'codecs.open', (["('%s/%sGenerated' % (filepath, prefix))", '"""w"""', '"""utf-8"""'], {}), "('%s/%sGenerated' % (filepath, prefix), 'w', 'utf-8')\n", (4252, 4305), False, 'import codecs\n'), ((12034, 12098), 'codecs.open', 'codecs.open', (["('%s/%sGenerated' % (filepath, prefix))", '"""w"""', '"""utf-8"""'], {}), "('%s/%sGenerated' % (filepath, prefix), 'w', 'utf-8')\n", (12045, 12098), False, 'import codecs\n'), ((15287, 15314), 'copy.deepcopy', 'deepcopy', (["data[1]['output']"], {}), "(data[1]['output'])\n", (15295, 15314), False, 'from copy import deepcopy\n'), ((17731, 17749), 'numpy.array', 'np.array', (['dupes[0]'], {}), '(dupes[0])\n', (17739, 17749), True, 'import numpy as np\n'), ((17773, 17791), 'numpy.array', 'np.array', (['dupes[1]'], {}), '(dupes[1])\n', (17781, 17791), True, 'import numpy as np\n'), ((17815, 17833), 'numpy.array', 'np.array', (['dupes[2]'], {}), '(dupes[2])\n', (17823, 17833), True, 'import numpy as np\n'), ((20147, 20186), 'os.listdir', 'os.listdir', (['self.args.model_checkpoints'], {}), '(self.args.model_checkpoints)\n', (20157, 20186), False, 'import os\n'), ((21581, 21890), 'subprocess.check_call', 'subprocess.check_call', (["[\n './multeval.sh eval --refs ../%s/%s_reference.* --hyps-baseline ../%s/%sGenerated --meteor.language %s \\t\\t --threads 4 \\t\\t2> multevaloutput 1> multevaloutput'\n % (directory, prefix, directory, prefix, self.args.meteor_lang)]"], {'shell': '(True)'}), "([\n './multeval.sh eval --refs ../%s/%s_reference.* --hyps-baseline ../%s/%sGenerated --meteor.language %s \\t\\t --threads 4 \\t\\t2> multevaloutput 1> multevaloutput'\n % (directory, prefix, directory, prefix, self.args.meteor_lang)],\n shell=True)\n", (21602, 21890), False, 'import subprocess\n'), ((23914, 24163), 'models.MRNN', 'models.MRNN', (['self.args.embed_size', 'self.args.hidden_size', 'self.vocab_len', 'self.args.dropin', 'self.args.optimiser', 'self.args.l2reg'], {'hsn_size': 'self.hsn_size', 'weights': 'self.args.checkpoint', 'gru': 'self.args.gru', 'clipnorm': 'self.args.clipnorm', 't': 't'}), '(self.args.embed_size, self.args.hidden_size, self.vocab_len,\n self.args.dropin, self.args.optimiser, self.args.l2reg, hsn_size=self.\n hsn_size, weights=self.args.checkpoint, gru=self.args.gru, clipnorm=\n self.args.clipnorm, t=t)\n', (23925, 24163), False, 'import models\n'), ((24404, 24652), 'models.NIC', 'models.NIC', (['self.args.embed_size', 'self.args.hidden_size', 'self.vocab_len', 'self.args.dropin', 'self.args.optimiser', 'self.args.l2reg'], {'hsn_size': 'self.hsn_size', 'weights': 'self.args.checkpoint', 'gru': 'self.args.gru', 'clipnorm': 'self.args.clipnorm', 't': 't'}), '(self.args.embed_size, self.args.hidden_size, self.vocab_len,\n self.args.dropin, self.args.optimiser, self.args.l2reg, hsn_size=self.\n hsn_size, weights=self.args.checkpoint, gru=self.args.gru, clipnorm=\n self.args.clipnorm, t=t)\n', (24414, 24652), False, 'import models\n'), ((12280, 12305), 'copy.deepcopy', 'deepcopy', (["data[0]['text']"], {}), "(data[0]['text'])\n", (12288, 12305), False, 'from copy import deepcopy\n'), ((13868, 13886), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13884, 13886), False, 'import sys\n'), ((18484, 18502), 'numpy.array', 'np.array', (['dupes[0]'], {}), '(dupes[0])\n', (18492, 18502), True, 'import numpy as np\n'), ((18526, 18544), 'numpy.array', 'np.array', (['dupes[1]'], {}), '(dupes[1])\n', (18534, 18544), True, 'import numpy as np\n'), ((6641, 6679), 'numpy.argsort', 'np.argsort', (['(-next_word_indices)'], {'axis': '(1)'}), '(-next_word_indices, axis=1)\n', (6651, 6679), True, 'import numpy as np\n'), ((13311, 13345), 'numpy.argmax', 'np.argmax', (['preds[:, t - 1]'], {'axis': '(1)'}), '(preds[:, t - 1], axis=1)\n', (13320, 13345), True, 'import numpy as np\n'), ((15652, 15677), 'numpy.argmax', 'np.argmax', (['Y_target[i, t]'], {}), '(Y_target[i, t])\n', (15661, 15677), True, 'import numpy as np\n'), ((17535, 17559), 'copy.deepcopy', 'deepcopy', (['words[0, :, :]'], {}), '(words[0, :, :])\n', (17543, 17559), False, 'from copy import deepcopy\n'), ((19189, 19207), 'numpy.array', 'np.array', (['dupes[0]'], {}), '(dupes[0])\n', (19197, 19207), True, 'import numpy as np\n'), ((19231, 19249), 'numpy.array', 'np.array', (['dupes[1]'], {}), '(dupes[1])\n', (19239, 19249), True, 'import numpy as np\n'), ((23429, 23508), 'codecs.open', 'codecs.open', (["('%s/%s_reference.ref%d' % (directory, prefix, refid))", '"""w"""', '"""utf-8"""'], {}), "('%s/%s_reference.ref%d' % (directory, prefix, refid), 'w', 'utf-8')\n", (23440, 23508), False, 'import codecs\n'), ((5227, 5248), 'numpy.argmax', 'np.argmax', (['text[i, t]'], {}), '(text[i, t])\n', (5236, 5248), True, 'import numpy as np\n'), ((12646, 12667), 'numpy.argmax', 'np.argmax', (['text[i, t]'], {}), '(text[i, t])\n', (12655, 12667), True, 'import numpy as np\n'), ((15815, 15851), 'math.log', 'math.log', (['preds[i, t, target_idx]', '(2)'], {}), '(preds[i, t, target_idx], 2)\n', (15823, 15851), False, 'import math\n'), ((18335, 18359), 'copy.deepcopy', 'deepcopy', (['words[0, :, :]'], {}), '(words[0, :, :])\n', (18343, 18359), False, 'from copy import deepcopy\n'), ((19037, 19061), 'copy.deepcopy', 'deepcopy', (['words[0, :, :]'], {}), '(words[0, :, :])\n', (19045, 19061), False, 'from copy import deepcopy\n'), ((13069, 13094), 'numpy.argmax', 'np.argmax', (['text[0, t - 1]'], {}), '(text[0, t - 1])\n', (13078, 13094), True, 'import numpy as np\n'), ((14440, 14488), 'itertools.takewhile', 'itertools.takewhile', (["(lambda n: n != '<E>')", 's[1:]'], {}), "(lambda n: n != '<E>', s[1:])\n", (14459, 14488), False, 'import itertools\n'), ((11065, 11129), 'itertools.takewhile', 'itertools.takewhile', (["(lambda n: n != '<E>')", 'complete_sentences[i]'], {}), "(lambda n: n != '<E>', complete_sentences[i])\n", (11084, 11129), False, 'import itertools\n'), ((11327, 11391), 'itertools.takewhile', 'itertools.takewhile', (["(lambda n: n != '<E>')", 'complete_sentences[i]'], {}), "(lambda n: n != '<E>', complete_sentences[i])\n", (11346, 11391), False, 'import itertools\n'), ((7745, 7763), 'math.log', 'math.log', (['wordProb'], {}), '(wordProb)\n', (7753, 7763), False, 'import math\n'), ((14172, 14236), 'itertools.takewhile', 'itertools.takewhile', (["(lambda n: n != '<E>')", 'complete_sentences[i]'], {}), "(lambda n: n != '<E>', complete_sentences[i])\n", (14191, 14236), False, 'import itertools\n')] |
from copy import deepcopy
from typing import List
from rdkit import Chem
from icolos.core.step_utils.obabel_structconvert import OBabelStructConvert
from icolos.utils.enums.compound_enums import (
CompoundContainerEnum,
EnumerationContainerEnum,
)
from icolos.utils.enums.program_parameters import SchrodingerExecutablesEnum
from icolos.core.step_utils.structconvert import StructConvert
from icolos.utils.general.icolos_exceptions import ContainerCorrupted
from icolos.utils.enums.write_out_enums import WriteOutEnum
from typing import Union
import numpy as np
import os
_WE = WriteOutEnum()
_SEE = SchrodingerExecutablesEnum()
class Conformer:
"""This class is a storage class for individual conformers associated with a given Enumeration."""
def __init__(
self,
conformer: Chem.Mol = None,
conformer_id: int = None,
enumeration_object=None,
):
self._conformer = conformer
self._conformer_id = conformer_id
self._enumeration_object = enumeration_object
self._extra_data_dictionary = {}
def get_compound_name(self) -> str:
if self.get_enumeration_object() is not None:
return self.get_enumeration_object().get_compound_name()
def get_index_string(self) -> str:
enum_obj = self.get_enumeration_object()
enum_str = ""
if enum_obj is not None:
enum_str = enum_obj.get_index_string()
conf_str = ""
if self.get_conformer_id() is not None:
conf_str = str(self.get_conformer_id())
return ":".join([enum_str, conf_str])
def add_extra_data(self, key: str, data):
self._extra_data_dictionary[key] = data
def get_extra_data(self) -> dict:
return self._extra_data_dictionary
def clear_extra_data(self):
self._extra_data_dictionary = {}
def set_enumeration_object(self, enumeration_object):
self._enumeration_object = enumeration_object
def get_enumeration_object(self):
return self._enumeration_object
def get_molecule(self) -> Chem.Mol:
return self._conformer
def set_molecule(self, conformer: Chem.Mol):
self._conformer = conformer
def set_conformer_id(self, conformer_id: int):
self._conformer_id = conformer_id
def get_conformer_id(self) -> int:
return self._conformer_id
def empty(self) -> bool:
if self.get_molecule() is None:
return True
return False
def _clone(self):
clone = Conformer(
conformer=deepcopy(self.get_molecule()),
conformer_id=self.get_conformer_id(),
enumeration_object=self.get_enumeration_object(),
)
clone._extra_data_dictionary = deepcopy(self.get_extra_data())
return clone
def __copy__(self):
return self._clone()
def __deepcopy__(self, memo):
return self._clone()
def __repr__(self):
parent_enumeration_id = (
None
if self.get_enumeration_object() is None
else self.get_enumeration_object().get_enumeration_id()
)
return "<Icolos conformer: id=%s, parent enumeration: %s>" % (
self.get_conformer_id(),
parent_enumeration_id,
)
def __str__(self):
return self.__repr__()
def write(self, path: str, format_=_WE.SDF):
writer = Chem.SDWriter(path)
molecule = self.get_molecule()
molecule.SetProp(_WE.RDKIT_NAME, self.get_index_string())
molecule.SetProp(_WE.INDEX_STRING, self.get_index_string())
writer.write(molecule)
writer.close()
if format_ == _WE.PDB:
pdb_path = path.split(".")[0] + ".pdb"
# convert the written sdf file to a pdb with OB
converter = OBabelStructConvert()
converter.sdf2pdb(sdf_file=path, pdb_file=pdb_path)
os.remove(path)
def update_coordinates(self, path: str):
old = self.get_molecule()
for mol in Chem.SDMolSupplier(path, removeHs=False):
mol.SetProp(_WE.RDKIT_NAME, old.GetProp(_WE.RDKIT_NAME))
for prop in old.GetPropNames():
mol.SetProp(prop, old.GetProp(prop))
self.set_molecule(mol)
# only one molecule expected at this stage, so stop after first run
break
self.write("".join([path, "_out"]))
class Enumeration:
"""This class bundles all information on an enumeration, especially all conformers generated."""
def __init__(
self,
compound_object=None,
smile: str = "",
molecule: Chem.Mol = None,
original_smile: str = None,
enumeration_id: int = None,
):
self._MC = CompoundContainerEnum()
self._EC = EnumerationContainerEnum()
self._smile = smile
self._compound_object = compound_object
self._molecule = molecule
self._original_smile = original_smile
self._enumeration_id = enumeration_id
self._conformers = []
def empty(self) -> bool:
if len(self.get_conformers()) == 0:
return True
return False
def get_compound_name(self) -> str:
if self.get_compound_object() is not None:
return self.get_compound_object().get_name()
def _get_next_conformer_id(self) -> int:
ids = [conf.get_conformer_id() for conf in self.get_conformers()]
if len(ids) == 0:
return 0
else:
return max(ids) + 1
def sort_conformers(
self, by_tag: Union[str, List[str]], reverse: bool = True, aggregation="sum"
):
conformers = self.get_conformers()
if isinstance(by_tag, str):
conformers = sorted(
conformers,
key=lambda x: float(x.get_molecule().GetProp(by_tag)),
reverse=reverse,
)
self._conformers = conformers
self.reset_conformer_ids()
elif isinstance(by_tag, list):
# need to normalise the values, calculate max and min of each tag in the series
def normalise_tag(value, tag):
all_tag_values = [
float(conf.get_molecule().GetProp(tag)) for conf in conformers
]
max_tag = np.max(all_tag_values)
min_tag = np.min(all_tag_values)
return (float(value) - min_tag) / (max_tag - min_tag)
# if we specify multiple tags, aggregate according the the provided aggregation function
if aggregation == "sum":
conformers = sorted(
conformers,
key=lambda x: np.sum(
[
float(normalise_tag(x.get_molecule().GetProp(i), i))
for i in by_tag
]
),
reverse=reverse,
)
self._conformers = conformers
elif aggregation == "product":
conformers = sorted(
conformers,
key=lambda x: np.product(
[
float(normalise_tag(x.get_molecule().GetProp(i), i))
for i in by_tag
]
),
reverse=reverse,
)
self._conformers = conformers
else:
raise AttributeError(
"Only sum or product aggregation modes are currently supported - ABORT"
)
# for ligand in self.ligands:
# ligand.set_conformers(sorted(ligand.get_conformers(),
# key=lambda x: float(x.GetProp(_ROE.GLIDE_DOCKING_SCORE)), reverse=False))
# ligand.add_tags_to_conformers()
def find_conformer(self, conformer_id: int) -> Conformer:
conf = [
conf
for conf in self.get_conformers()
if conf.get_conformer_id() == conformer_id
]
if len(conf) == 0:
raise IndexError(f"Could not find conformer with id {conformer_id}.")
elif len(conf) > 1:
raise ContainerCorrupted(
f"More than one conformer with id {conformer_id} found in the same Enumeration instance (compound_number: {self.get_enumeration_id()})."
)
return conf[0]
def get_conformer_ids(self) -> List[int]:
ids = [conf.get_conformer_id() for conf in self.get_conformers()]
return ids
def reset_conformer_ids(self):
for new_id, conf in enumerate(self.get_conformers()):
conf.set_conformer_id(conformer_id=new_id)
def add_conformer(self, conformer: Conformer, auto_update: bool = True):
"""Add a new conformer. If "auto_update" is True, the Enumeration class will be set to "self" and
the conformer_id will be set to the next free index."""
conformer = deepcopy(conformer)
if auto_update:
conformer.set_enumeration_object(self)
conformer.set_conformer_id(self._get_next_conformer_id())
self._conformers.append(conformer)
def add_conformers(self, conformers: List[Conformer], auto_update: bool = True):
"""Add new conformers. If "auto_update" is True, the Enumeration class will be set to "self" and
the conformer_id will be set to the next free index."""
for conformer in conformers:
self.add_conformer(conformer=conformer, auto_update=auto_update)
def get_index_string(self) -> str:
comp_obj = self.get_compound_object()
comp_str = ""
if comp_obj is not None:
comp_str = comp_obj.get_index_string()
enum_str = ""
if self.get_enumeration_id() is not None:
enum_str = str(self.get_enumeration_id())
return ":".join([comp_str, enum_str])
def clean_failed_conformers(self):
# all conformers, where the molecule has been set to None by a function can be considered to have failed
for idx in list(reversed(range(len(self._conformers)))):
if self._conformers[idx].get_molecule() is None:
del self._conformers[idx]
self.reset_conformer_ids()
def clear_molecule(self):
self._molecule = None
def clear_conformers(self):
self._conformers = []
def get_conformers(self) -> List[Conformer]:
return self._conformers
def clone_conformers(self) -> List[Conformer]:
return [deepcopy(conf) for conf in self._conformers]
def set_compound_object(self, compound_object):
self._compound_object = compound_object
def get_compound_object(self):
return self._compound_object
def set_enumeration_id(self, enumeration_id: int):
self._enumeration_id = enumeration_id
def get_enumeration_id(self) -> int:
return self._enumeration_id
def set_smile(self, smile: str):
self._smile = smile
def get_smile(self) -> str:
return self._smile
def set_molecule(self, molecule: Chem.Mol):
self._molecule = molecule
def get_molecule(self) -> Chem.Mol:
return self._molecule
def set_original_smile(self, original_smile: str):
self._original_smile = original_smile
def get_original_smile(self) -> str:
return self._original_smile
def _clone(self):
clone = Enumeration(
compound_object=self.get_compound_object(),
smile=self.get_smile(),
molecule=deepcopy(self.get_molecule()),
original_smile=self.get_original_smile(),
enumeration_id=self.get_enumeration_id(),
)
for conf in self.get_conformers():
conf = deepcopy(conf)
conf.set_enumeration_object(enumeration_object=clone)
clone.add_conformer(conf, auto_update=False)
return clone
def __copy__(self):
return self._clone()
def __deepcopy__(self, memo):
return self._clone()
def __repr__(self):
parent_compound_id = (
None
if self.get_compound_object() is None
else self.get_compound_object().get_compound_number()
)
return (
"<Icolos enumeration: id=%s, smile=%s, parent compound: %s, num_conformers: %i>"
% (
self.get_enumeration_id(),
self.get_smile(),
parent_compound_id,
len(self._conformers),
)
)
def __str__(self):
return self.__repr__()
def __iter__(self):
return iter(self._conformers)
def __getitem__(self, key: int) -> Conformer:
return self._conformers[key]
def __len__(self) -> int:
return len(self.get_conformers())
class Compound:
"""This class bundles all information on a molecule and serves mainly to group enumerations."""
def __init__(self, name: str = "", compound_number: int = None):
self._CC = CompoundContainerEnum()
self._EC = EnumerationContainerEnum()
self._name = name
self._compound_number = compound_number
self._enumerations = []
def __repr__(self):
return "<Icolos compound: name=%s, compound_number=%s, enumerations=%s>" % (
self.get_name(),
self.get_compound_number(),
len(self.get_enumerations()),
)
def __str__(self):
return self.__repr__()
def get_index_string(self) -> str:
if self.get_compound_number() is not None:
return str(self.get_compound_number())
else:
return ""
def set_name(self, name: str):
self._name = name
def get_name(self) -> str:
return self._name
def set_compound_number(self, compound_number: int):
self._compound_number = compound_number
def get_compound_number(self) -> int:
return self._compound_number
def add_enumeration(self, enumeration: Enumeration, auto_update: bool = True):
"""Add a new enumeration. If "auto_update" is True, the Compound class will be set to "self" and
the enumeration_id will be set to the next free index."""
enumeration = deepcopy(enumeration)
if auto_update:
enumeration.set_compound_object(self)
enumeration.set_enumeration_id(self._get_next_enumeration_id())
self._enumerations.append(enumeration)
def add_enumerations(
self, enumerations: List[Enumeration], auto_update: bool = True
):
"""Add new enumerations. If "auto_update" is True, the Compound class will be set to "self" and
the enumeration_id will be set to the next free index."""
for enumeration in enumerations:
self.add_enumeration(enumeration=enumeration, auto_update=auto_update)
def clear_enumerations(self):
self._enumerations = []
def find_enumeration(self, idx: int):
for enum in self.get_enumerations():
if enum.get_enumeration_id() == idx:
return enum
def get_enumerations(self) -> List[Enumeration]:
return self._enumerations
def _clone(self):
clone = Compound(
name=self.get_name(), compound_number=self.get_compound_number()
)
for enum in self.get_enumerations():
enum = deepcopy(enum)
enum.set_compound_object(compound_object=clone)
clone.add_enumeration(enum, auto_update=False)
return clone
def __iter__(self):
return iter(self._enumerations)
def __copy__(self):
return self._clone()
def __deepcopy__(self, memo):
return self._clone()
def __getitem__(self, key: int) -> Enumeration:
return self._enumerations[key]
def __len__(self) -> int:
return len(self.get_enumerations())
def _get_next_enumeration_id(self):
ids = [enum.get_enumeration_id() for enum in self.get_enumerations()]
if len(ids) == 0:
return 0
else:
return max(ids) + 1
def find_enumeration(self, enumeration_id: int) -> Enumeration:
enum = [
enum
for enum in self.get_enumerations()
if enum.get_enumeration_id() == enumeration_id
]
if len(enum) == 0:
raise IndexError(f"Could not find enumeration with id {enumeration_id}.")
elif len(enum) > 1:
raise ContainerCorrupted(
f"More than one enumeration with id {enumeration_id} found in the same Compound instance (compound_number: {self.get_compound_number()})."
)
return enum[0]
def get_enumeration_ids(self) -> List[int]:
ids = [enum.get_enumeration_id() for enum in self.get_enumerations()]
return ids
def reset_enumeration_ids(self):
for new_id, enum in enumerate(self.get_enumerations()):
enum.set_enumeration_id(enumeration_id=new_id)
def reset_all_ids(self):
self.reset_enumeration_ids()
for enum in self.get_enumerations():
enum.reset_conformer_ids()
def update_all_relations(self):
for enum in self.get_enumerations():
enum.set_compound_object(self)
for conf in enum.get_conformers():
conf.set_enumeration_object(enum)
def empty(self) -> bool:
if len(self.get_enumerations()) == 0:
return True
return False
def unroll_conformers(self) -> List[Conformer]:
conformers = []
for enum in self.get_enumerations():
# guard against empty enumerations that might be used when constructing more complex data flows
if enum.empty():
continue
for conf in enum.get_conformers():
conformers.append(conf)
return conformers
# TODO: Replacing these three functions by a wrapper object
def get_compound_by_id(compounds: List[Compound], id: int) -> Compound:
for compound in compounds:
if compound.get_compound_number() == id:
return compound
raise ValueError(
f"Could not find compound with id {id} in list of length {len(compounds)}."
)
def get_compound_by_name(compounds: List[Compound], name: str) -> Compound:
for compound in compounds:
if compound.get_name() == name:
return compound
raise ValueError(
f"Could not find compound with name {name} in list of length {len(compounds)}."
)
def unroll_conformers(compounds: List[Compound]) -> List[Conformer]:
all_conformers = []
for comp in compounds:
all_conformers = all_conformers + comp.unroll_conformers()
return all_conformers
def unroll_enumerations(compounds: List[Compound]) -> List[Enumeration]:
all_enumerations = []
for comp in compounds:
all_enumerations = all_enumerations + comp.get_enumerations()
return all_enumerations
| [
"icolos.utils.enums.compound_enums.CompoundContainerEnum",
"numpy.min",
"numpy.max",
"icolos.utils.enums.write_out_enums.WriteOutEnum",
"icolos.core.step_utils.obabel_structconvert.OBabelStructConvert",
"rdkit.Chem.SDMolSupplier",
"copy.deepcopy",
"rdkit.Chem.SDWriter",
"icolos.utils.enums.compound_... | [((588, 602), 'icolos.utils.enums.write_out_enums.WriteOutEnum', 'WriteOutEnum', ([], {}), '()\n', (600, 602), False, 'from icolos.utils.enums.write_out_enums import WriteOutEnum\n'), ((610, 638), 'icolos.utils.enums.program_parameters.SchrodingerExecutablesEnum', 'SchrodingerExecutablesEnum', ([], {}), '()\n', (636, 638), False, 'from icolos.utils.enums.program_parameters import SchrodingerExecutablesEnum\n'), ((3405, 3424), 'rdkit.Chem.SDWriter', 'Chem.SDWriter', (['path'], {}), '(path)\n', (3418, 3424), False, 'from rdkit import Chem\n'), ((4031, 4071), 'rdkit.Chem.SDMolSupplier', 'Chem.SDMolSupplier', (['path'], {'removeHs': '(False)'}), '(path, removeHs=False)\n', (4049, 4071), False, 'from rdkit import Chem\n'), ((4760, 4783), 'icolos.utils.enums.compound_enums.CompoundContainerEnum', 'CompoundContainerEnum', ([], {}), '()\n', (4781, 4783), False, 'from icolos.utils.enums.compound_enums import CompoundContainerEnum, EnumerationContainerEnum\n'), ((4803, 4829), 'icolos.utils.enums.compound_enums.EnumerationContainerEnum', 'EnumerationContainerEnum', ([], {}), '()\n', (4827, 4829), False, 'from icolos.utils.enums.compound_enums import CompoundContainerEnum, EnumerationContainerEnum\n'), ((9051, 9070), 'copy.deepcopy', 'deepcopy', (['conformer'], {}), '(conformer)\n', (9059, 9070), False, 'from copy import deepcopy\n'), ((13121, 13144), 'icolos.utils.enums.compound_enums.CompoundContainerEnum', 'CompoundContainerEnum', ([], {}), '()\n', (13142, 13144), False, 'from icolos.utils.enums.compound_enums import CompoundContainerEnum, EnumerationContainerEnum\n'), ((13164, 13190), 'icolos.utils.enums.compound_enums.EnumerationContainerEnum', 'EnumerationContainerEnum', ([], {}), '()\n', (13188, 13190), False, 'from icolos.utils.enums.compound_enums import CompoundContainerEnum, EnumerationContainerEnum\n'), ((14344, 14365), 'copy.deepcopy', 'deepcopy', (['enumeration'], {}), '(enumeration)\n', (14352, 14365), False, 'from copy import deepcopy\n'), ((3818, 3839), 'icolos.core.step_utils.obabel_structconvert.OBabelStructConvert', 'OBabelStructConvert', ([], {}), '()\n', (3837, 3839), False, 'from icolos.core.step_utils.obabel_structconvert import OBabelStructConvert\n'), ((3916, 3931), 'os.remove', 'os.remove', (['path'], {}), '(path)\n', (3925, 3931), False, 'import os\n'), ((10622, 10636), 'copy.deepcopy', 'deepcopy', (['conf'], {}), '(conf)\n', (10630, 10636), False, 'from copy import deepcopy\n'), ((11857, 11871), 'copy.deepcopy', 'deepcopy', (['conf'], {}), '(conf)\n', (11865, 11871), False, 'from copy import deepcopy\n'), ((15483, 15497), 'copy.deepcopy', 'deepcopy', (['enum'], {}), '(enum)\n', (15491, 15497), False, 'from copy import deepcopy\n'), ((6336, 6358), 'numpy.max', 'np.max', (['all_tag_values'], {}), '(all_tag_values)\n', (6342, 6358), True, 'import numpy as np\n'), ((6385, 6407), 'numpy.min', 'np.min', (['all_tag_values'], {}), '(all_tag_values)\n', (6391, 6407), True, 'import numpy as np\n')] |
#!/usr/bin/python3
'''
Abstract:
This is a program for ploting
Usage:
plot_signal_noise_ratio.py [sed data]
The input sed data should arranged like that:
[ S1, S2, S3, ..., N1, N2, N3, ...],
[ S1, S2, S3, ..., N1, N2, N3, ...],
...
Editor:
Jacob975
##################################
# Python3 #
# This code is made in python3 #
##################################
20181023
####################################
update log
20181023 version alpha 1
1. The code works
20181119 version alpha 2
1. Allow you to upload two datalog for comparison
'''
import time
import numpy as np
import matplotlib.pyplot as plt
from sys import argv
#--------------------------------------------
# main code
if __name__ == "__main__":
VERBOSE = 0
# measure time
start_time = time.time()
#-----------------------------------
# Load argv
if len(argv) < 2 or len(argv) > 3 :
print ("The numbers of arguments is wrong.")
print ("Usage: plot_signal_noise_ratio.py [sed data]")
exit(1)
sed_name = argv[1]
# This program allow you compare SNR.
if len(argv) == 3:
sed_name_2 = argv[2]
#-----------------------------------
# Load table
sed_table = np.loadtxt(sed_name)
sed_table_2 = None
if len(argv) == 3:
sed_table_2 = np.loadtxt(sed_name_2)
band_name = ['J', 'H', 'K', 'IRAC1', 'IRAC2', 'IRAC3', 'IRAC4', 'MIPS1']
#-----------------------------------
# Plot the ratio
ratio = [0, 0, 0, 0.047, 0.047, 0.047, 0.047, 0.095]
number_of_bands = len(sed_table[0])//2
fig, axs = plt.subplots(3, 3, figsize = (16, 12), sharex = 'all', sharey = 'all')
plt.suptitle("SNR_{0}".format(sed_name[:4]), fontsize=28)
axs = axs.ravel()
for i in range(number_of_bands):
axs[i].set_title(band_name[i])
axs[i].set_ylabel('uncertainties(mJy)')
axs[i].set_xlabel('flux(mJy)')
# Histogram of sources.
ax2 = axs[i].twinx()
numbers, bin_edges = np.histogram(sed_table[:,i], bins = np.logspace(-3, 4, 101))
bins = bin_edges[1:]
ax2.plot(bins, numbers)
# Plot the S/N = 3 line
axs[i].plot([3e-3, 3e3], [1e-3, 1e3], 'k--', alpha = 0.5)
# Scatter all sources.
axs[i].scatter(sed_table[:,i], sed_table[:,i+number_of_bands], s = 5, c = 'orange' )
if len(argv) ==3:
axs[i].scatter(sed_table_2[:,i], sed_table_2[:,i+number_of_bands], s = 5, c = 'r' )
if ratio[i] != 0:
axs[i].plot([0.01, 2000], [0.01*ratio[i], 2000*ratio[i]], 'k-', label = r'$\frac{N}{S}$ = %.4f' % ratio[i])
# Basic settings
axs[i].grid(True)
axs[i].set_yscale("log", nonposx='clip')
axs[i].set_xscale('log', nonposy='clip')
axs[i].set_ylim(ymin = 1e-4, ymax = 1e3)
axs[i].set_xlim(xmin = 1e-3, xmax = 1e4)
axs[i].legend()
#plt.show()
plt.savefig('{0}_signal_noise_relation.png'.format(sed_name[:4]))
#-----------------------------------
# measure time
elapsed_time = time.time() - start_time
print ("Exiting Main Program, spending ", elapsed_time, "seconds.")
| [
"numpy.loadtxt",
"numpy.logspace",
"time.time",
"matplotlib.pyplot.subplots"
] | [((837, 848), 'time.time', 'time.time', ([], {}), '()\n', (846, 848), False, 'import time\n'), ((1269, 1289), 'numpy.loadtxt', 'np.loadtxt', (['sed_name'], {}), '(sed_name)\n', (1279, 1289), True, 'import numpy as np\n'), ((1635, 1699), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'figsize': '(16, 12)', 'sharex': '"""all"""', 'sharey': '"""all"""'}), "(3, 3, figsize=(16, 12), sharex='all', sharey='all')\n", (1647, 1699), True, 'import matplotlib.pyplot as plt\n'), ((1358, 1380), 'numpy.loadtxt', 'np.loadtxt', (['sed_name_2'], {}), '(sed_name_2)\n', (1368, 1380), True, 'import numpy as np\n'), ((3091, 3102), 'time.time', 'time.time', ([], {}), '()\n', (3100, 3102), False, 'import time\n'), ((2079, 2102), 'numpy.logspace', 'np.logspace', (['(-3)', '(4)', '(101)'], {}), '(-3, 4, 101)\n', (2090, 2102), True, 'import numpy as np\n')] |
"""Windpower and PV power calculation from ERA5 weather data using Feedinlib module.
This script gets input parameters from the config.mk file.
These parameters can be changed depending on the user interest.
target_file:=ERA5_data.nc : Netcdf file of weather data downloaded from CDS using feedinlib module
start_date:=2018-01-01 : Start date of dowloaded weather data (default time in UTC)
end_date:= 2018-12-31 : End date of downloaded weather data (default time in UTC)
lon:=8.15 : Longitude of data point
lat:=53.20 : Latitutude of data point
turbine_name:= E-101/3050 : Wind turbine type
hub_height:= 135 . Turbine hub height
wind_data:= wind_data.csv : Weather data in csv. It important (if not generated using the
"make weather_data") to load the csv file and set the columns and rows to feedinlib format.
see link below for example
https://github.com/oemof/feedinlib/blob/dev/example/simple_feedin.py
pv_panel:= Advent_Solar_Ventura_210___2008_ : Photovoltaic panel type
inverter_type:= ABB__MICRO_0_25_I_OUTD_US_208__208V_ : Inverter type
solar_data:= solar_data.csv : Weather data in csv. It important (if not generated using the
"make weather_data") to load the csv file and set the columns and rows to feedinlib format.
see link below for example
https://github.com/oemof/feedinlib/blob/dev/example/simple_feedin.py
Note: if the weather data is dowloaded using "make weather_data", the wind and solar data csv
files generated are already set to feedinlib format, hence no further work is needed to be done
before using them for feedin calculations.
see https://github.com/oemof/feedinlib/tree/dev/example for an example implementation of feedinlib.
"""
from feedinlib import Photovoltaic, WindPowerPlant
import pandas as pd
from numpy import isnan
import sys
import os
weather_dir = "../data/01_raw_input_data/"
def windpower_timeseries(wind_data, turbine_name, hub_height, scale=True):
"""Generate windpower feedin time-series."""
# The available in turbine types and specification can found in the oemof database.
# "https://github.com/wind-python/windpowerlib/blob/dev/windpowerlib/oedb/turbine_data.csv"
turbine_spec = {
'turbine_type': turbine_name,
'hub_height': hub_height
}
wind_turbine = WindPowerPlant(**turbine_spec)
if scale:
feedin_wind = wind_turbine.feedin(
weather=wind_data, scaling="nominal_power")
else:
feedin_wind = wind_turbine.feedin(weather=wind_data)
return feedin_wind
def pv_timeseries(lon, lat, solar_data, pv_panel, inverter_type, scale=True):
"""Generate PV power feedin timeseries."""
# pv system parameters
system_data = {
'module_name': pv_panel,
'inverter_name': inverter_type,
'azimuth': 180,
'tilt': 30,
'albedo': 0.2
}
pv_system = Photovoltaic(**system_data)
if scale:
feedin_pv = pv_system.feedin(weather=solar_data,
location=(lat, lon),
scaling="peak_power")
else:
feedin_pv = pv_system.feedin(weather=solar_data,
location=(lat, lon))
where_nan = isnan(feedin_pv)
feedin_pv[where_nan] = 0
feedin_pv[feedin_pv < 0] = 0
return feedin_pv
if __name__ == "__main__":
lon = float(sys.argv[1])
lat = float(sys.argv[2])
solar_data = sys.argv[3]
wind_data = sys.argv[4]
turbine_name = sys.argv[5]
pv_panel = sys.argv[6]
inverter_type = sys.argv[7]
hub_height = int(sys.argv[8])
solar_data = pd.read_csv(os.path.join(weather_dir, "solar_data.csv"),
index_col=0, date_parser=lambda idx:
pd.to_datetime(idx, utc=True))
# read multi-index wind data
wind_data = pd.read_csv(os.path.join(weather_dir, "wind_data.csv"), index_col=[
0], header=[0, 1], date_parser=lambda idx:
pd.to_datetime(idx, utc=True))
# convert multi-index data frame columns levels to integer
wind_data.columns = wind_data.columns.set_levels(
wind_data.columns.levels[1].astype(int), level=1)
windpower = windpower_timeseries(
wind_data, turbine_name, hub_height, scale=True)
pvpower = pv_timeseries(lon, lat, solar_data,
pv_panel, inverter_type, scale=True)
windpower = windpower.to_frame().rename(
columns={"feedin_power_plant": "wind"})
windpower.to_csv(os.path.join(weather_dir, "wind_power.csv"))
pvpower = pvpower.to_frame().rename(columns={0: "pv"})
pvpower.to_csv(os.path.join(weather_dir, "pv_power.csv"))
| [
"feedinlib.Photovoltaic",
"os.path.join",
"numpy.isnan",
"feedinlib.WindPowerPlant",
"pandas.to_datetime"
] | [((2263, 2293), 'feedinlib.WindPowerPlant', 'WindPowerPlant', ([], {}), '(**turbine_spec)\n', (2277, 2293), False, 'from feedinlib import Photovoltaic, WindPowerPlant\n'), ((2838, 2865), 'feedinlib.Photovoltaic', 'Photovoltaic', ([], {}), '(**system_data)\n', (2850, 2865), False, 'from feedinlib import Photovoltaic, WindPowerPlant\n'), ((3196, 3212), 'numpy.isnan', 'isnan', (['feedin_pv'], {}), '(feedin_pv)\n', (3201, 3212), False, 'from numpy import isnan\n'), ((3595, 3638), 'os.path.join', 'os.path.join', (['weather_dir', '"""solar_data.csv"""'], {}), "(weather_dir, 'solar_data.csv')\n", (3607, 3638), False, 'import os\n'), ((3827, 3869), 'os.path.join', 'os.path.join', (['weather_dir', '"""wind_data.csv"""'], {}), "(weather_dir, 'wind_data.csv')\n", (3839, 3869), False, 'import os\n'), ((4514, 4557), 'os.path.join', 'os.path.join', (['weather_dir', '"""wind_power.csv"""'], {}), "(weather_dir, 'wind_power.csv')\n", (4526, 4557), False, 'import os\n'), ((4638, 4679), 'os.path.join', 'os.path.join', (['weather_dir', '"""pv_power.csv"""'], {}), "(weather_dir, 'pv_power.csv')\n", (4650, 4679), False, 'import os\n'), ((3735, 3764), 'pandas.to_datetime', 'pd.to_datetime', (['idx'], {'utc': '(True)'}), '(idx, utc=True)\n', (3749, 3764), True, 'import pandas as pd\n'), ((3982, 4011), 'pandas.to_datetime', 'pd.to_datetime', (['idx'], {'utc': '(True)'}), '(idx, utc=True)\n', (3996, 4011), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*-
# @Time : 2021/2/10 11:58 上午
# @Author : <NAME>
# @FileName: __init__.py
# @Software: PyCharm
# @Blog :https://lesliewongcv.github.io/
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../')
import GenericAlgorithm as GA
from math import *
def fitness_score(input, NP, D):
Z = np.zeros(NP)
for i in range(D):
Z += input[:, i]**2
#Z = 1/(input-5) * np.sin(10 * pi * input) + 2
#Z = input[:, 0]**2 + input[:, 1]**2 + (input[:, 2] + 500)**2 + input[:, 3]**2 + input[:, 4]**2 - 200
return Z
def init_popu(NP, D):
X = np.random.random([NP, D]) * 200 - 100
X_fitness = fitness_score(X, NP, D)
return X, X_fitness
| [
"numpy.random.random",
"numpy.zeros",
"sys.path.append"
] | [((228, 250), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (243, 250), False, 'import sys\n'), ((342, 354), 'numpy.zeros', 'np.zeros', (['NP'], {}), '(NP)\n', (350, 354), True, 'import numpy as np\n'), ((608, 633), 'numpy.random.random', 'np.random.random', (['[NP, D]'], {}), '([NP, D])\n', (624, 633), True, 'import numpy as np\n')] |
import numpy as np
def unflatten(w, weights):
sizes = [x.size for x in weights]
split_idx = np.cumsum(sizes)
update_ravelled = np.split(w, split_idx)[:-1]
shapes = [x.shape for x in weights]
update_list = [np.reshape(u, s) for s, u in zip(shapes, update_ravelled)]
return update_list
def flatten_update(update):
return np.concatenate([x.ravel() for x in update]) | [
"numpy.cumsum",
"numpy.split",
"numpy.reshape"
] | [((102, 118), 'numpy.cumsum', 'np.cumsum', (['sizes'], {}), '(sizes)\n', (111, 118), True, 'import numpy as np\n'), ((141, 163), 'numpy.split', 'np.split', (['w', 'split_idx'], {}), '(w, split_idx)\n', (149, 163), True, 'import numpy as np\n'), ((228, 244), 'numpy.reshape', 'np.reshape', (['u', 's'], {}), '(u, s)\n', (238, 244), True, 'import numpy as np\n')] |
import sys
import os
import cv2
import numpy as np
import json
from PIL import Image, ImageFont, ImageDraw
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from urllib.request import urlretrieve
import requests
from keras.models import Model,load_model
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from yolo_detector import YOLO_detect
from yolo_multiple_output import YOLO, YOLO2
import time
yolo = YOLO()
yolo2 = YOLO2()
yolo_detect = YOLO_detect()
global link
link = []
class Ui_webcam(QtWidgets.QWidget):
def __init__(self, parent=None):
super(Ui_webcam, self).__init__(parent)
self.timer_camera = QtCore.QTimer() # 初始化定時器
self.cap = cv2.VideoCapture() # 初始化攝像頭
self.CAM_NUM = 0
self.set_ui()
self.slot_init()
self.__flag_work = 0
self.x = 0
self.count = 0
self.label_type = str(0)
self.label_class = str(0)
self.all_link =[]
def set_ui(self):
self.centralwidget = QtWidgets.QWidget()
self.centralwidget.setObjectName("centralwidget")
self.label_pic = QtWidgets.QLabel(self.centralwidget)
self.label_pic.setGeometry(QtCore.QRect(600, 550, 300, 120))
self.label_pic.setText("")
pixmap = QtGui.QPixmap("figures/origin-logo.png")
self.label_pic.setPixmap(pixmap.scaled(150, 150))
self.label_pic.setObjectName("label_pic")
self.__layout_main = QtWidgets.QHBoxLayout() # 採用QHBoxLayout類,按照從左到右的順序來添加控件
self.__layout_fun_button = QtWidgets.QVBoxLayout()
self.__layout_data_show = QtWidgets.QVBoxLayout() # QVBoxLayout類垂直地擺放小部件
self.button_open_camera = QtWidgets.QPushButton(u'打開相機')
self.button_close = QtWidgets.QPushButton(u'退出')
self.button_open_camera.resize(70,50)
self.button_close.resize(50,10)
# move()方法是移動窗口在屏幕上的位置到x = 500,y = 500的位置上
self.move(500, 500)
# 信息顯示
self.label_show_camera = QtWidgets.QLabel()
self.label_move = QtWidgets.QLabel()
self.label_move.setFixedSize(100, 100)
self.label_show_camera.setFixedSize(641, 481)
self.label_show_camera.setAutoFillBackground(False)
self.__layout_fun_button.addWidget(self.label_pic)
self.__layout_fun_button.addWidget(self.button_open_camera)
self.__layout_fun_button.addWidget(self.button_close)
self.__layout_fun_button.addWidget(self.label_move)
self.__layout_main.addLayout(self.__layout_fun_button)
self.__layout_main.addWidget(self.label_show_camera)
self.setLayout(self.__layout_main)
self.label_move.raise_()
self.setWindowTitle(u'攝像頭')
def slot_init(self): # 建立通信連接
self.button_open_camera.clicked.connect(self.button_open_camera_click)
self.timer_camera.timeout.connect(self.show_camera)
self.button_close.clicked.connect(self.close)
def button_open_camera_click(self):
if self.timer_camera.isActive() == False:
flag = self.cap.open(self.CAM_NUM)
if flag == False:
msg = QtWidgets.QMessageBox.Warning(self, u'Warning', u'請檢測相機與電腦是否連接正確',
buttons=QtWidgets.QMessageBox.Ok,
defaultButton=QtWidgets.QMessageBox.Ok)
else:
self.timer_camera.start(30)
self.button_open_camera.setText(u'關閉相機')
else:
self.timer_camera.stop()
self.cap.release()
self.label_show_camera.clear()
self.button_open_camera.setText(u'打開相機')
def show_camera(self):
flag, self.image = self.cap.read()
show = cv2.resize(self.image, (640, 480))
show = cv2.flip(show, 1)
show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
image = Image.fromarray(show)
r_image, output_boxes, output_classes, label_t = yolo.detect_image(image)
for i in range(0, len(output_boxes)):
box = output_boxes[i]
image_detect2 = r_image.crop([box[1]+3, box[0]+3, box[3]-3, box[2]-3])
pil_img = Image.fromarray(np.array(image_detect2))
pil_img.save('image/0.jpg')
v = 0
output_scores2, output_classes2, bg = yolo2.detect_image(image_detect2, v)
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * r_image.size[1] + 0.5).astype('int32'))
thickness = (r_image.size[0] + r_image.size[1]) // 300
if (output_scores2 != 0) & (output_classes[i] != 5):
label = '{} {:.2f}'.format(output_classes2, output_scores2)
draw = ImageDraw.Draw(r_image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(r_image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(r_image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - 2 * label_size[1]])
else:
text_origin = np.array([left, 2 * (top + 1)])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=bg)
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
global label_t
global label_color
label = label.split(' ')
label_color = label
label_type = label_t.split(' ')
self.label_class = label_color[0]
self.label_type = label_type[0]
result = np.asarray(r_image)
showImage = QtGui.QImage(result, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888)
self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage))
if len(self.label_class) != 1:
if len(self.label_type) != 1:
label_name={'backpack': '後背包',
'cross_bag': '斜背包',
'satchel_bag': '側背包',
'handbag': '手提包',
'fanny_pack':'腰包'
}.get(self.label_type, 'error') # 'error'為預設返回值,可自設定
color_name={'black':'黑色',
'red':'紅色',
'white':'白色',
'blue':'藍色',
'brown':'咖啡色',
'yellow':'黃色',
'green':'綠色',
'gray':'灰色',
'pink':'粉紅色',
'orange':'橘色',
'other':'',
'white':'白色',
'purple':'紫色'
}.get(self.label_class,'error')
name = '{}{}'.format(color_name, label_name)
print(name)
crawler.productsearch(name)
crawler.pchomeinfo(self)
if self.cap.isOpened():
self.cap.release()
if self.timer_camera.isActive():
self.timer_camera.stop()
index = AE_sort.sort(self)
link = crawler.link(self)
urlLink1 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black>pick me</font> </a>".format(url=link[index[1]-1][0])
btn.clicked.connect(lambda: l1.setText(urlLink1))
l1.setOpenExternalLinks(True)
req1 = requests.get(link[index[1]-1][1])
photo1 = QPixmap()
photo1.loadFromData(req1.content)
btn.clicked.connect(lambda: ui1.setPixmap(photo1.scaled(200, 200)))
urlLink2 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black>pick me</font> </a>".format(url=link[index[2]-1][0])
btn.clicked.connect(lambda: l2.setText(urlLink2))
l2.setOpenExternalLinks(True)
req2 = requests.get(link[index[2]-1][1])
photo2 = QPixmap()
photo2.loadFromData(req2.content)
btn.clicked.connect(lambda: ui2.setPixmap(photo2.scaled(200,200)))
urlLink3 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black>pick me</font> </a>".format(url=link[index[3]-1][0])
btn.clicked.connect(lambda: l3.setText(urlLink3))
l3.setOpenExternalLinks(True)
req3 = requests.get(link[index[3]-1][1])
photo3 = QPixmap()
photo3.loadFromData(req3.content)
btn.clicked.connect(lambda: ui3.setPixmap(photo3.scaled(200,200)))
urlLink4 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black>pick me</font> </a>".format(url=link[index[4]-1][0])
btn.clicked.connect(lambda: l4.setText(urlLink4))
l4.setOpenExternalLinks(True)
req4 = requests.get(link[index[4]-1][1])
photo4 = QPixmap()
photo4.loadFromData(req4.content)
btn.clicked.connect(lambda: ui4.setPixmap(photo4.scaled(200,200)))
urlLink5 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black>pick me</font> </a>".format(url=link[index[5]-1][0])
btn.clicked.connect(lambda: l5.setText(urlLink5))
l5.setOpenExternalLinks(True)
req5 = requests.get(link[index[5]-1][1])
photo5 = QPixmap()
photo5.loadFromData(req5.content)
btn.clicked.connect(lambda: ui5.setPixmap(photo5.scaled(200,200)))
urlLink6 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black>pick me</font> </a>".format(url=link[index[6]-1][0])
btn.clicked.connect(lambda: l6.setText(urlLink6))
l6.setOpenExternalLinks(True)
req6 = requests.get(link[index[6]-1][1])
photo6 = QPixmap()
photo6.loadFromData(req6.content)
btn.clicked.connect(lambda: ui6.setPixmap(photo6.scaled(200,200)))
def closeEvent(self, event):
ok = QtWidgets.QPushButton()
cancel = QtWidgets.QPushButton()
msg = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Warning, u'關閉', u'是否關閉!')
msg.addButton(ok, QtWidgets.QMessageBox.ActionRole)
msg.addButton(cancel, QtWidgets.QMessageBox.RejectRole)
ok.setText(u'確定')
cancel.setText(u'取消')
if msg.exec_() == QtWidgets.QMessageBox.RejectRole:
event.ignore()
else:
if self.cap.isOpened():
self.cap.release()
if self.timer_camera.isActive():
self.timer_camera.stop()
event.accept()
class Ui_MainWindow(QWidget):
def __init__(self, parent=None, url=None):
super().__init__(parent)
self.url = url
req = requests.get(self.url)
photo = QPixmap()
photo.loadFromData(req.content)
self.label = QLabel()
self.label.setPixmap(photo.scaled(200,200))
layout = QVBoxLayout()
layout.addWidget(self.label)
self.setLayout(layout)
class crawler(object):
def __init__(self, parent=None):
super(crawler, self).__init__(parent)
self.all_link = []
def productsearch(main):
global pchomesearch
global itemname
global shpsearch
str(main)
pchome = 'https://ecshweb.pchome.com.tw/search/v3.3/all/results?q='
pchomesearch = pchome + main
shp1 = 'https://shopee.tw/search/?keyword='
shp2 = '&sortBy=sales'
shpsearch = shp1 + main + shp2
print("Pchome24H購物:")
print(pchomesearch)
def pchomeinfo(self):
global pchomesearch
res = requests.get(pchomesearch)
ress = res.text
jd = json.loads(ress)
pcitems = []
pcprices = []
pcurls = []
picurls = []
pcmainurl = 'http://24h.pchome.com.tw/prod/'
picmainurl = 'https://b.ecimg.tw/'
f = open('link.txt', 'w')
for n in range(1, 11):
try:
for item in jd['prods']:
pcitems.append(item['name'])
pcprices.append(item['price'])
url = pcmainurl + item['Id']
pcurls.append(url)
picurl = picmainurl + item['picB']
picurls.append(picurl)
pcitems0 = pcitems[n]
pcprices0 = pcprices[n] # price
pcurls0 = pcurls[n] # URL
picurls0 = picurls[n] # bag_image
except:
pcitems0 = '唤哦,查無相關資料'
pcprices0 = '噢哦,查無相關資料'
pcurls0 = '噢哦,查無相關資料'
picurls = '噢哦,查無相關資料'
print(pcurls0)
f.write(pcurls0+'\n')
f.write(picurls0+'\n')
if self.all_link == []:
self.all_link = [[pcurls0]]
self.all_link.append([picurls0])
else:
self.all_link.append([pcurls0])
self.all_link.append([picurls0])
local = os.path.join('image\\%s.jpg' % n)
urlretrieve(picurls0, local)
self.all_link = np.array(self.all_link).reshape(10, 2)
def link(self):
return self.all_link
class AE_sort():
def cosine_similarity(ratings):
sim = ratings.dot(ratings.T)
if not isinstance(sim, np.ndarray):
sim = sim.toarray()
norms = np.array([np.sqrt(np.diagonal(sim))])
return (sim / norms / norms.T)
def sort(self):
imgDB_size = 11
imagepath = 'image/'
img_path = os.listdir(imagepath)
for p in img_path:
fullpath = os.path.join(imagepath, p)
img = Image.open(fullpath)
r_image, boxes, classes, scores, time = yolo_detect.detect_image(img)
if len(scores) > 0:
max_score = np.argmax(scores)
image_detect2 = r_image.crop(
[boxes[max_score][1], boxes[max_score][0], boxes[max_score][3], boxes[max_score][2]])
image_detect2.save(imagepath + p)
X = []
for im in range(imgDB_size):
img_path = imagepath+str(im) + '.jpg'
print(img_path)
img = cv2.imread(img_path)
img = cv2.resize(img, (32,32), interpolation=cv2.INTER_CUBIC)
img = img.reshape(32 * 32 * 3)
X.append(img)
X = np.array(X)
X = X/255.0
model = load_model('model_data/encoder_model_deep.h5')
X_ae = model.predict(X)
X_ae = np.array(X_ae)
features = X_ae.reshape(imgDB_size, 256)
sim = AE_sort.cosine_similarity(features)
index1 = np.argsort(-sim[0])
fea = []
for i in range(imgDB_size):
fe = np.round(sum(abs(features[i]-features[0])), 2)
find = list(np.where(fea == fe))
flag = 0
for j in fea:
if j == fe:
flag = 1
if flag == 0:
fea.append(fe)
else:
fea.append(1000000)
#fea.append(fe)
index = np.argsort(fea)
return index
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
webcam=Ui_webcam()
url1='https://b.ecimg.tw//items/DCAGMVA9009Q76N/000001_1565577423.jpg'
url2='https://b.ecimg.tw//items/DICMHYA9009T0V1/000001_1550291342.jpg'
url3='https://b.ecimg.tw//items/DGBX19A9008BED8/000001_1501754321.jpg'
url4='https://b.ecimg.tw//items/DIBA89A9009RVZ1/000001_1558489835.jpg'
url5='https://b.ecimg.tw//items/DGCN0VA9009CAIU/000001_1534927659.jpg'
url6='https://b.ecimg.tw//items/DCAGMV1900A6GNN/000001_1564571586.jpg'
photo = QPixmap()
label = QLabel()
label.setPixmap(photo.scaled(200, 200))
photo2 = QPixmap()
label2 = QLabel()
label2.setPixmap(photo2.scaled(200, 200))
photo3 = QPixmap()
label3 = QLabel()
label3.setPixmap(photo3.scaled(200, 200))
photo4 = QPixmap()
label4 = QLabel()
label4.setPixmap(photo4.scaled(200, 200))
photo5 = QPixmap()
label5 = QLabel()
label5.setPixmap(photo5.scaled(200, 200))
photo6 = QPixmap()
label6 = QLabel()
label6.setPixmap(photo6.scaled(200, 200))
ui1 = label
ui2 = label2
ui3 = label3
ui4 = label4
ui5 = label5
ui6 = label6
urlLink1 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black> </font> </a>".format(url=url1)
l1 = QtWidgets.QLabel()
l1.setText(urlLink1)
urlLink2 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black> </font> </a>".format(url=url2)
l2 = QtWidgets.QLabel()
l2.setText(urlLink2)
urlLink3 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black> </font> </a>".format(url=url3)
l3 = QtWidgets.QLabel()
l3.setText(urlLink3)
urlLink4 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black> </font> </a>".format(url=url4)
l4 = QtWidgets.QLabel()
l4.setText(urlLink4)
urlLink5 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black> </font> </a>".format(url=url5)
l5 = QtWidgets.QLabel()
l5.setText(urlLink5)
urlLink6 = " <a href=\"{url}\"> <font face=Tw Cen MT Condensed size=2 color=black> </font> </a>".format(url=url6)
l6 = QtWidgets.QLabel()
l6.setText(urlLink6)
sub_layout1 = QtWidgets.QVBoxLayout()
sub_layout1.addWidget(ui1)
sub_layout1.addWidget(l1)
sub_layout1.addWidget(ui2)
sub_layout1.addWidget(l2)
sub_layout2 = QtWidgets.QVBoxLayout()
sub_layout2.addWidget(ui3)
sub_layout2.addWidget(l3)
sub_layout2.addWidget(ui4)
sub_layout2.addWidget(l4)
sub_layout3 = QtWidgets.QVBoxLayout()
sub_layout3.addWidget(ui5)
sub_layout3.addWidget(l5)
sub_layout3.addWidget(ui6)
sub_layout3.addWidget(l6)
btn = QPushButton('顯示結果')
main_layout = QtWidgets.QHBoxLayout()
main_layout.addWidget(webcam)
main_layout.addWidget(btn)
main_layout.addLayout(sub_layout1)
main_layout.addLayout(sub_layout2)
main_layout.addLayout(sub_layout3)
layout_widget = QtWidgets.QWidget()
layout_widget.setLayout(main_layout)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(layout_widget)
# 設置背景顏色
palette1 = QPalette()
palette1.setBrush(main_window.backgroundRole(), QBrush(QPixmap('figures/background.jpg')))
main_window.setPalette(palette1)
main_window.show()
app.exec_()
| [
"yolo_multiple_output.YOLO",
"PyQt5.QtWidgets.QMessageBox",
"PyQt5.QtGui.QPixmap.fromImage",
"PyQt5.QtGui.QImage",
"numpy.argsort",
"numpy.array",
"PIL.ImageDraw.Draw",
"PyQt5.QtWidgets.QApplication",
"PyQt5.QtWidgets.QVBoxLayout",
"os.listdir",
"urllib.request.urlretrieve",
"numpy.where",
"... | [((596, 602), 'yolo_multiple_output.YOLO', 'YOLO', ([], {}), '()\n', (600, 602), False, 'from yolo_multiple_output import YOLO, YOLO2\n'), ((612, 619), 'yolo_multiple_output.YOLO2', 'YOLO2', ([], {}), '()\n', (617, 619), False, 'from yolo_multiple_output import YOLO, YOLO2\n'), ((635, 648), 'yolo_detector.YOLO_detect', 'YOLO_detect', ([], {}), '()\n', (646, 648), False, 'from yolo_detector import YOLO_detect\n'), ((16372, 16404), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (16394, 16404), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17701, 17719), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (17717, 17719), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((17877, 17895), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (17893, 17895), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18057, 18075), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (18073, 18075), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18233, 18251), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (18249, 18251), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18409, 18427), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (18425, 18427), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18585, 18603), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (18601, 18603), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18651, 18674), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (18672, 18674), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18822, 18845), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (18843, 18845), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18993, 19016), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (19014, 19016), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((19197, 19220), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (19218, 19220), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((19433, 19452), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (19450, 19452), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((19516, 19539), 'PyQt5.QtWidgets.QMainWindow', 'QtWidgets.QMainWindow', ([], {}), '()\n', (19537, 19539), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((830, 845), 'PyQt5.QtCore.QTimer', 'QtCore.QTimer', ([], {}), '()\n', (843, 845), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((876, 894), 'cv2.VideoCapture', 'cv2.VideoCapture', ([], {}), '()\n', (892, 894), False, 'import cv2\n'), ((1205, 1224), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ([], {}), '()\n', (1222, 1224), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1310, 1346), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', (['self.centralwidget'], {}), '(self.centralwidget)\n', (1326, 1346), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1471, 1511), 'PyQt5.QtGui.QPixmap', 'QtGui.QPixmap', (['"""figures/origin-logo.png"""'], {}), "('figures/origin-logo.png')\n", (1484, 1511), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1654, 1677), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1675, 1677), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1747, 1770), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1768, 1770), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1806, 1829), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1827, 1829), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1891, 1921), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['u"""打開相機"""'], {}), "(u'打開相機')\n", (1912, 1921), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1951, 1979), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['u"""退出"""'], {}), "(u'退出')\n", (1972, 1979), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2203, 2221), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (2219, 2221), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2249, 2267), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (2265, 2267), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4008, 4042), 'cv2.resize', 'cv2.resize', (['self.image', '(640, 480)'], {}), '(self.image, (640, 480))\n', (4018, 4042), False, 'import cv2\n'), ((4059, 4076), 'cv2.flip', 'cv2.flip', (['show', '(1)'], {}), '(show, 1)\n', (4067, 4076), False, 'import cv2\n'), ((4093, 4130), 'cv2.cvtColor', 'cv2.cvtColor', (['show', 'cv2.COLOR_BGR2RGB'], {}), '(show, cv2.COLOR_BGR2RGB)\n', (4105, 4130), False, 'import cv2\n'), ((4148, 4169), 'PIL.Image.fromarray', 'Image.fromarray', (['show'], {}), '(show)\n', (4163, 4169), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((6307, 6326), 'numpy.asarray', 'np.asarray', (['r_image'], {}), '(r_image)\n', (6317, 6326), True, 'import numpy as np\n'), ((6348, 6426), 'PyQt5.QtGui.QImage', 'QtGui.QImage', (['result', 'show.shape[1]', 'show.shape[0]', 'QtGui.QImage.Format_RGB888'], {}), '(result, show.shape[1], show.shape[0], QtGui.QImage.Format_RGB888)\n', (6360, 6426), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((10968, 10991), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', ([], {}), '()\n', (10989, 10991), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11010, 11033), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', ([], {}), '()\n', (11031, 11033), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11049, 11118), 'PyQt5.QtWidgets.QMessageBox', 'QtWidgets.QMessageBox', (['QtWidgets.QMessageBox.Warning', 'u"""關閉"""', 'u"""是否關閉!"""'], {}), "(QtWidgets.QMessageBox.Warning, u'關閉', u'是否關閉!')\n", (11070, 11118), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((11760, 11782), 'requests.get', 'requests.get', (['self.url'], {}), '(self.url)\n', (11772, 11782), False, 'import requests\n'), ((12684, 12710), 'requests.get', 'requests.get', (['pchomesearch'], {}), '(pchomesearch)\n', (12696, 12710), False, 'import requests\n'), ((12750, 12766), 'json.loads', 'json.loads', (['ress'], {}), '(ress)\n', (12760, 12766), False, 'import json\n'), ((14669, 14690), 'os.listdir', 'os.listdir', (['imagepath'], {}), '(imagepath)\n', (14679, 14690), False, 'import os\n'), ((15537, 15548), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (15545, 15548), True, 'import numpy as np\n'), ((15589, 15635), 'keras.models.load_model', 'load_model', (['"""model_data/encoder_model_deep.h5"""'], {}), "('model_data/encoder_model_deep.h5')\n", (15599, 15635), False, 'from keras.models import Model, load_model\n'), ((15685, 15699), 'numpy.array', 'np.array', (['X_ae'], {}), '(X_ae)\n', (15693, 15699), True, 'import numpy as np\n'), ((15832, 15851), 'numpy.argsort', 'np.argsort', (['(-sim[0])'], {}), '(-sim[0])\n', (15842, 15851), True, 'import numpy as np\n'), ((16291, 16306), 'numpy.argsort', 'np.argsort', (['fea'], {}), '(fea)\n', (16301, 16306), True, 'import numpy as np\n'), ((1383, 1415), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(600)', '(550)', '(300)', '(120)'], {}), '(600, 550, 300, 120)\n', (1395, 1415), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6469, 6503), 'PyQt5.QtGui.QPixmap.fromImage', 'QtGui.QPixmap.fromImage', (['showImage'], {}), '(showImage)\n', (6492, 6503), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((14114, 14147), 'os.path.join', 'os.path.join', (["('image\\\\%s.jpg' % n)"], {}), "('image\\\\%s.jpg' % n)\n", (14126, 14147), False, 'import os\n'), ((14161, 14189), 'urllib.request.urlretrieve', 'urlretrieve', (['picurls0', 'local'], {}), '(picurls0, local)\n', (14172, 14189), False, 'from urllib.request import urlretrieve\n'), ((14743, 14769), 'os.path.join', 'os.path.join', (['imagepath', 'p'], {}), '(imagepath, p)\n', (14755, 14769), False, 'import os\n'), ((14789, 14809), 'PIL.Image.open', 'Image.open', (['fullpath'], {}), '(fullpath)\n', (14799, 14809), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((15338, 15358), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (15348, 15358), False, 'import cv2\n'), ((15378, 15434), 'cv2.resize', 'cv2.resize', (['img', '(32, 32)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(img, (32, 32), interpolation=cv2.INTER_CUBIC)\n', (15388, 15434), False, 'import cv2\n'), ((3366, 3511), 'PyQt5.QtWidgets.QMessageBox.Warning', 'QtWidgets.QMessageBox.Warning', (['self', 'u"""Warning"""', 'u"""請檢測相機與電腦是否連接正確"""'], {'buttons': 'QtWidgets.QMessageBox.Ok', 'defaultButton': 'QtWidgets.QMessageBox.Ok'}), "(self, u'Warning', u'請檢測相機與電腦是否連接正確', buttons=\n QtWidgets.QMessageBox.Ok, defaultButton=QtWidgets.QMessageBox.Ok)\n", (3395, 3511), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4458, 4481), 'numpy.array', 'np.array', (['image_detect2'], {}), '(image_detect2)\n', (4466, 4481), True, 'import numpy as np\n'), ((5038, 5061), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['r_image'], {}), '(r_image)\n', (5052, 5061), False, 'from PIL import Image, ImageFont, ImageDraw\n'), ((8227, 8262), 'requests.get', 'requests.get', (['link[index[1] - 1][1]'], {}), '(link[index[1] - 1][1])\n', (8239, 8262), False, 'import requests\n'), ((8725, 8760), 'requests.get', 'requests.get', (['link[index[2] - 1][1]'], {}), '(link[index[2] - 1][1])\n', (8737, 8760), False, 'import requests\n'), ((9222, 9257), 'requests.get', 'requests.get', (['link[index[3] - 1][1]'], {}), '(link[index[3] - 1][1])\n', (9234, 9257), False, 'import requests\n'), ((9719, 9754), 'requests.get', 'requests.get', (['link[index[4] - 1][1]'], {}), '(link[index[4] - 1][1])\n', (9731, 9754), False, 'import requests\n'), ((10216, 10251), 'requests.get', 'requests.get', (['link[index[5] - 1][1]'], {}), '(link[index[5] - 1][1])\n', (10228, 10251), False, 'import requests\n'), ((10713, 10748), 'requests.get', 'requests.get', (['link[index[6] - 1][1]'], {}), '(link[index[6] - 1][1])\n', (10725, 10748), False, 'import requests\n'), ((14215, 14238), 'numpy.array', 'np.array', (['self.all_link'], {}), '(self.all_link)\n', (14223, 14238), True, 'import numpy as np\n'), ((14955, 14972), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (14964, 14972), True, 'import numpy as np\n'), ((15997, 16016), 'numpy.where', 'np.where', (['(fea == fe)'], {}), '(fea == fe)\n', (16005, 16016), True, 'import numpy as np\n'), ((5618, 5659), 'numpy.array', 'np.array', (['[left, top - 2 * label_size[1]]'], {}), '([left, top - 2 * label_size[1]])\n', (5626, 5659), True, 'import numpy as np\n'), ((5718, 5749), 'numpy.array', 'np.array', (['[left, 2 * (top + 1)]'], {}), '([left, 2 * (top + 1)])\n', (5726, 5749), True, 'import numpy as np\n'), ((14513, 14529), 'numpy.diagonal', 'np.diagonal', (['sim'], {}), '(sim)\n', (14524, 14529), True, 'import numpy as np\n'), ((4747, 4785), 'numpy.floor', 'np.floor', (['(0.03 * r_image.size[1] + 0.5)'], {}), '(0.03 * r_image.size[1] + 0.5)\n', (4755, 4785), True, 'import numpy as np\n'), ((5199, 5218), 'numpy.floor', 'np.floor', (['(top + 0.5)'], {}), '(top + 0.5)\n', (5207, 5218), True, 'import numpy as np\n'), ((5267, 5287), 'numpy.floor', 'np.floor', (['(left + 0.5)'], {}), '(left + 0.5)\n', (5275, 5287), True, 'import numpy as np\n'), ((5352, 5374), 'numpy.floor', 'np.floor', (['(bottom + 0.5)'], {}), '(bottom + 0.5)\n', (5360, 5374), True, 'import numpy as np\n'), ((5438, 5459), 'numpy.floor', 'np.floor', (['(right + 0.5)'], {}), '(right + 0.5)\n', (5446, 5459), True, 'import numpy as np\n')] |
# import libraries
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
# load data
# data set used: https://www.kaggle.com/sulianova/cardiovascular-disease-dataset/data
df = pd.read_csv('cardio_train.csv', sep=',')
# print first 7 rows of data
print(df.head(7))
# get shape of data
print(df.shape)
# count null values in each column
print(df.isna().sum())
# another way to check for null or missing values
print(df.isnull().values.any())
# basic stats
print(df.describe())
# count individuals with cardiovascular disease and without
print(df['cardio'].value_counts())
# create years column
df['years'] = (df['age'] / 365).round(0)
df['years'] = pd.to_numeric(df['years'], downcast='integer')
# remove years columns
df = df.drop('years', axis=1)
# remove or drop id column
df = df.drop('id', axis=1)
# split data into feature and target data
X = df.iloc[:, :-1].values
Y = df.iloc[:, -1].values
# feature scaling (scale value and data between 0 and 1 inclusive)
X = StandardScaler().fit_transform(X)
#EVERYTHING UNTIL THIS POINT SHOULD BE THE SAME
#make gridsearch params
parameters = {
'penalty':['l1', 'l2', 'elasticnet', 'none'],
'C':np.logspace(0,4,20),
'solver':['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'max_iter': [100, 500, 1000, 2500]
}
#make gridsearch
clf = GridSearchCV(estimator=LogisticRegression(), param_grid=parameters, verbose=3, n_jobs=-1, cv=4, return_train_score=True)
#THE STEPS BELOW WILL VARY BASED ON YOUR MODELS
#results = pd.DataFrame(clf.cv_results_)
#results.to_csv('LogReg Cardio Results Full')
#results_slice = results.iloc[:, [0, 2, 4, 5, 6, 7, 8, 13, 14, 15]]
#results_slice.to_csv('LogReg Cardio Results Sliced')
#results = pd.read_csv('LogReg Cardio Results Sliced')
#results.dropna()
#results = results.sort_values(by=['mean_test_score'], ascending=False)
#results['mean_test_score'].value_counts()
#results = results.loc[results['mean_test_score']== results['mean_test_score'].max()]
#results = results.loc[results['param_max_iter'] == 100]
| [
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"pandas.to_numeric",
"numpy.logspace"
] | [((312, 352), 'pandas.read_csv', 'pd.read_csv', (['"""cardio_train.csv"""'], {'sep': '""","""'}), "('cardio_train.csv', sep=',')\n", (323, 352), True, 'import pandas as pd\n'), ((790, 836), 'pandas.to_numeric', 'pd.to_numeric', (["df['years']"], {'downcast': '"""integer"""'}), "(df['years'], downcast='integer')\n", (803, 836), True, 'import pandas as pd\n'), ((1304, 1325), 'numpy.logspace', 'np.logspace', (['(0)', '(4)', '(20)'], {}), '(0, 4, 20)\n', (1315, 1325), True, 'import numpy as np\n'), ((1122, 1138), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1136, 1138), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1477, 1497), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {}), '()\n', (1495, 1497), False, 'from sklearn.linear_model import LogisticRegression\n')] |
import numpy as np
from helpers import get_signal, number_of_frames, first_last_frame_number
import os
import cv2
import seaborn as sns; sns.set()
mode = 1024
fps = 20
signal_list = ['range', 'reflectivity', 'signal', 'ambient']
signal_name = input("Signal to display:")
for idx, val in enumerate(signal_list):
if signal_name == val:
break
# =============== Build the array of images =============== #
os.chdir('/media/nizar/Transcend/test in the lab/Data/myFormat/Lidar')
file_name_t = input("Time and date:")
file_name = 'Lidar_myFormat_packet_' + str(file_name_t) + '.txt'
img_array = get_signal(file_name, mode, signal_list[idx])
########################################################################################################################
# # file_name = 'Lidar_myFormat_packet_' + str(file_name_t)
# img_array_depth, list = number_of_frames(file_name , mode)
# img_array = np.zeros((img_array_depth+1, int(mode/4)+17, 16)).astype(np.int) # this is the array the contains all the pixels acquired by the sensor
# m, i, j, k, l = 0, 0, 0, 0, 0
# enc_list = []
# #find the smallest encoder number
# for m in range(0, len(list)):
# if m % 18 == 0:
# encoder_count = ['0']
# s = 0
# for c in list[m]:
# if s == 5 and (c != ' ' or c != '\n'): # s=5 for encoder count
# encoder_count.append(c)
# if c == ' ':
# s += 1
# if s == 6: # s=6 for encoder count
# break
# enc = ''
# enc = (int(enc.join(encoder_count)))
# print(enc)
# enc_list.append(enc)
#
# enc_min = min(enc_list)
# framelist = frame_list(list)[0]
# # ######################################### for: fill the array! ##############################################
# for k in range(0, img_array_depth+1):
#
# # print(l)
# for j in range(0, int(mode/4)+17):
# for i in range(0, 18):
# if l % 18 == 0:
# encoder_count = ['0']
# s = 0
# for c in list[l]:
# if s == 5 and (c != ' ' or c != '\n'): # s=3 encoder don't touch!
# encoder_count.append(c)
# if c == ' ':
# s += 1
# if s == 6: # s=4 encoder don't touch!
# break
# enc = ''
# enc = (int(enc.join(encoder_count))-enc_min)/(44*(2048/mode))
# real_frame = first_last_frame_number(list[l])
# else:
# if (l+1)%18 == 0:
# print (list[l])
# else:
# signal = ['0']
# s = 0
# for c in list[l]:
# if s == 2 and (c != ' ' or c != '\n'): # s=1 or 0 1 for reflectivity 0 for range
# signal.append(c)
# if c == ' ':
# s += 1
# if s == 3: # s=1 or 2; 2 for reflectivity 1 for range
# break
# # print (l)
# sig = ''
# sig = int(sig.join(signal))
# img_array[real_frame - framelist][int(enc)][i-1] = sig
#
#
# l += 1
# # print(l)
# if l >= len(list):
# break
# if l >= len(list):
# break
# if l >= len(list):
# break
# # print (l)
# # print (j)
# # print (k)
# print(enc)
########################################################################################################################
import matplotlib.pyplot as plt
# ax = sns.heatmap(img_array[222][0:256], square=True, linewidth=0)
# plt.show()
k = number_of_frames(file_name, mode)[0]
b = np.zeros((k, int(mode/4), 64))
for frame in range(0, k):
for i in range(0, 16):
for j in range(0, 4):
b[frame][0:int(mode/4), i*4+j] = img_array[frame][0:int(mode / 4), i]
# im = plt.imshow(b[1][0:int(mode/4)])
# for i in range(0, k):
# # im.set_data(b[i][0:int(mode/4)])
# im = plt.imshow(np.flip(np.rot90(b[i][0:int(mode/4)], 3), 1))
# plt.axis('off')
# plt.pause(0.01)
# initialize water image
height = 64
width = int(mode / 4)
water_depth = np.zeros((height, width), dtype=float)
# initialize video writer
fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
video_filename = 'Lidar_myFormat_packet_' + str(file_name_t) + '_' + signal_list[idx] + '.avi'
out = cv2.VideoWriter(video_filename, fourcc, fps, (width, height))
# new frame after each addition of water
for i in range(k):
#add this array to the video
gray = cv2.normalize(np.flip(np.rot90(b[i], 1), 1), None, 255, 0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
gray_3c = cv2.merge([gray, gray, gray])
out.write(gray_3c)
# close out the video writer
out.release()
# import numpy as np
# import os
# mode = 2048
# fps = 10
#
# def number_of_frames(file, mode):
# '''
# :arg: string; Name of the file with a specific format (like the one returned by the function "extract_data_txt_file")
# :return: int; This function return the total number of frames in the txt file
# '''
# # mode = 2048 # depend on the scanning mode of the lidar, it can take the following values: 512, 1024, 2048
# fn = 65536 # 2^16 max frame number (max reached)
# # =============== open the file =============== #
# file1 = open(file, 'r')
# lineList = file1.readlines() # save all the lines in a list
# file1.close()
# # =============== close the file =============== #
#
# # =============== get the first frame number =============== #
# for line in lineList:
# if 'F' in line:
# print(line)
# i = 0
# l = []
# for c in line:
# if i == 3 and c != ' ':
# l.append(c)
# if c == ' ':
# i += 1
# if i == 4:
# break
# f1 = ''
# f1 = int(f1.join(l))
# break
# # =============== get the last frame number =============== #
# for line in lineList[::-1]:
# if 'F' in line:
# # print(line)
# i = 0
# l = []
# for c in line:
# if i == 3 and c != ' ':
# l.append(c)
# if c == ' ':
# i += 1
# if i == 4:
# break
# f2 = ''
# f2 = int(f2.join(l))
# break
# # =============== check if the max has been reached =============== #
# max_frame_reach = 0
# i = 0
# for line in lineList:
# if i % 18 == 0:
# if 'F 65535' in line:
# # print(line)
# max_frame_reach += 1
# i += 1
#
#
# i = int(max_frame_reach/(mode/4))
# # print (i)
# number_frames = int(f2 - f1 + i * fn)
#
# return number_frames, lineList
#
#
# # =============== Build the array of images =============== #
# os.chdir('/media/nizar/Transcend/test in the lab/Data/myFormat/Lidar')
# file_name_t = input("Time and date:")
# file_name = 'Lidar_myFormat_packet_' + str(file_name_t)
# img_array_depth, list = number_of_frames(file_name + '.txt', mode)
# img_array = np.zeros((img_array_depth, int(mode/4)+17, 16)).astype(np.int) # this is the array the contains all the pixels acquired by the sensor
# m, i, j, k, l = 0 , 0 ,0 , 0, 0
# enc_list = []
# #find the smallest encoder number
# for m in range(0, len(list)):
# if m % 18 == 0:
# encoder_count = ['0']
# s = 0
# for c in list[m]:
# if s == 5 and (c != ' ' or c != '\n'): # s=5 for encoder count
# encoder_count.append(c)
# if c == ' ':
# s += 1
# if s == 6: # s=6 for encoder count
# break
# enc = ''
# enc = (int(enc.join(encoder_count)))
# print(enc)
# enc_list.append(enc)
#
# enc_min = min(enc_list)
# ######################################### for: fill the array! ##############################################
# for k in range(0, img_array_depth):
# # print(l)
# for j in range(0, int(mode/4)+17):
# for i in range(0, 18):
# if l % 18 == 0:
# encoder_count = ['0']
# s = 0
# for c in list[l]:
# if s == 5 and (c != ' ' or c != '\n'): # s=3 encoder don't touch!
# encoder_count.append(c)
# if c == ' ':
# s += 1
# if s == 6: # s=4 encoder don't touch!
# break
# enc = ''
# enc = (int(enc.join(encoder_count))-enc_min)/(44*(2048/mode))
# else:
# if (l+1)%18 == 0:
# print (list[l])
# else:
# signal = ['0']
# s = 0
# for c in list[l]:
# if s == 3 and (c != ' ' or c != '\n'): # s=1 or 0 1 for reflectivity 0 for range
# signal.append(c)
# if c == ' ':
# s += 1
# if s == 4: # s=1 or 2; 2 for reflectivity 1 for range
# break
# # print (l)
# sig = ''
# sig = int(sig.join(signal))
# img_array[k][int(enc)][i-1] = sig
#
#
# l += 1
# # print(l)
# if l >= len(list):
# break
# if l >= len(list):
# break
# if l >= len(list):
# break
# # print (l)
# # print (j)
# # print (k)
# print(enc)
# import matplotlib.pyplot as plt
# import seaborn as sns; sns.set()
#
# # ax = sns.heatmap(img_array[222][0:256], square=True, linewidth=0)
# # plt.show()
#
#
# b = np.zeros((k, int(mode/4), 64))
# for frame in range(0, k):
# for i in range(0, 16):
# for j in range(0, 4):
# b[frame][0:int(mode/4), i*4+j] = img_array[frame][0:int(mode / 4), i]
#
#
#
# im = plt.imshow(b[1][0:int(mode/4)])
# for i in range(0, k):
# # im.set_data(b[i][0:int(mode/4)])
# im = plt.imshow(np.flip(np.rot90(b[i][0:int(mode/4)], 3), 1))
# plt.axis('off')
# plt.pause(0.01)
#
# import cv2
# # initialize water image
# height = 64
# width = int(mode / 4)
# water_depth = np.zeros((height, width), dtype=float)
# # initialize video writer
# fourcc = cv2.VideoWriter_fourcc('M','J','P','G')
# video_filename = file_name + '_ambient.avi'
# out = cv2.VideoWriter(video_filename, fourcc, fps, (width, height))
# # new frame after each addition of water
# for i in range(k):
# #add this array to the video
# gray = cv2.normalize(np.flip(np.rot90(b[i], 1), 1), None, 255, 0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
# gray_3c = cv2.merge([gray, gray, gray])
# out.write(gray_3c)
# # close out the video writer
# out.release()
#
| [
"seaborn.set",
"cv2.merge",
"helpers.get_signal",
"cv2.VideoWriter",
"os.chdir",
"helpers.number_of_frames",
"numpy.zeros",
"cv2.VideoWriter_fourcc",
"numpy.rot90"
] | [((137, 146), 'seaborn.set', 'sns.set', ([], {}), '()\n', (144, 146), True, 'import seaborn as sns\n'), ((417, 487), 'os.chdir', 'os.chdir', (['"""/media/nizar/Transcend/test in the lab/Data/myFormat/Lidar"""'], {}), "('/media/nizar/Transcend/test in the lab/Data/myFormat/Lidar')\n", (425, 487), False, 'import os\n'), ((603, 648), 'helpers.get_signal', 'get_signal', (['file_name', 'mode', 'signal_list[idx]'], {}), '(file_name, mode, signal_list[idx])\n', (613, 648), False, 'from helpers import get_signal, number_of_frames, first_last_frame_number\n'), ((4341, 4379), 'numpy.zeros', 'np.zeros', (['(height, width)'], {'dtype': 'float'}), '((height, width), dtype=float)\n', (4349, 4379), True, 'import numpy as np\n'), ((4415, 4457), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (['"""M"""', '"""J"""', '"""P"""', '"""G"""'], {}), "('M', 'J', 'P', 'G')\n", (4437, 4457), False, 'import cv2\n'), ((4556, 4617), 'cv2.VideoWriter', 'cv2.VideoWriter', (['video_filename', 'fourcc', 'fps', '(width, height)'], {}), '(video_filename, fourcc, fps, (width, height))\n', (4571, 4617), False, 'import cv2\n'), ((3810, 3843), 'helpers.number_of_frames', 'number_of_frames', (['file_name', 'mode'], {}), '(file_name, mode)\n', (3826, 3843), False, 'from helpers import get_signal, number_of_frames, first_last_frame_number\n'), ((4839, 4868), 'cv2.merge', 'cv2.merge', (['[gray, gray, gray]'], {}), '([gray, gray, gray])\n', (4848, 4868), False, 'import cv2\n'), ((4744, 4761), 'numpy.rot90', 'np.rot90', (['b[i]', '(1)'], {}), '(b[i], 1)\n', (4752, 4761), True, 'import numpy as np\n')] |
import sys
import open3d
import numpy as np
import time
import os
from geometric_registration.utils import get_pcd, get_keypts, get_desc, loadlog
import cv2
from functools import partial
def build_correspondence(source_desc, target_desc):
"""
Find the mutually closest point pairs in feature space.
source and target are descriptor for 2 point cloud key points. [5000, 32]
"""
distance = np.sqrt(2 - 2 * (source_desc @ target_desc.T))
source_idx = np.argmin(distance, axis=1)
source_dis = np.min(distance, axis=1)
target_idx = np.argmin(distance, axis=0)
target_dis = np.min(distance, axis=0)
result = []
for i in range(len(source_idx)):
if target_idx[source_idx[i]] == i:
result.append([i, source_idx[i]])
return np.array(result)
def register2Fragments(id1, id2, keyptspath, descpath, resultpath, logpath, gtLog, desc_name, inlier_ratio, distance_threshold):
"""
Register point cloud {id1} and {id2} using the keypts location and descriptors.
"""
cloud_bin_s = f'cloud_bin_{id1}'
cloud_bin_t = f'cloud_bin_{id2}'
write_file = f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'
if os.path.exists(os.path.join(resultpath, write_file)):
return 0, 0, 0
source_keypts = get_keypts(keyptspath, cloud_bin_s)
target_keypts = get_keypts(keyptspath, cloud_bin_t)
source_desc = get_desc(descpath, cloud_bin_s, desc_name)
target_desc = get_desc(descpath, cloud_bin_t, desc_name)
source_desc = np.nan_to_num(source_desc)
target_desc = np.nan_to_num(target_desc)
# Select {num_keypts} points based on the scores. The descriptors and keypts are already sorted based on the detection score.
num_keypts = 250
source_keypts = source_keypts[-num_keypts:, :]
source_desc = source_desc[-num_keypts:, :]
target_keypts = target_keypts[-num_keypts:, :]
target_desc = target_desc[-num_keypts:, :]
# Select {num_keypts} points randomly.
# num_keypts = 250
# source_indices = np.random.choice(range(source_keypts.shape[0]), num_keypts)
# target_indices = np.random.choice(range(target_keypts.shape[0]), num_keypts)
# source_keypts = source_keypts[source_indices, :]
# source_desc = source_desc[source_indices, :]
# target_keypts = target_keypts[target_indices, :]
# target_desc = target_desc[target_indices, :]
key = f'{cloud_bin_s.split("_")[-1]}_{cloud_bin_t.split("_")[-1]}'
if key not in gtLog.keys():
# skip the pairs that have less than 30% overlap.
num_inliers = 0
inlier_ratio = 0
gt_flag = 0
else:
# build correspondence set in feature space.
corr = build_correspondence(source_desc, target_desc)
# calculate the inlier ratio, this is for Feature Matching Recall.
gt_trans = gtLog[key]
frag1 = source_keypts[corr[:, 0]]
frag2_pc = open3d.PointCloud()
frag2_pc.points = open3d.utility.Vector3dVector(target_keypts[corr[:, 1]])
frag2_pc.transform(gt_trans)
frag2 = np.asarray(frag2_pc.points)
distance = np.sqrt(np.sum(np.power(frag1 - frag2, 2), axis=1))
num_inliers = np.sum(distance < distance_threshold)
if num_inliers / len(distance) < inlier_ratio:
print(key)
print("num_corr:", len(corr), "inlier_ratio:", num_inliers / len(distance))
inlier_ratio = num_inliers / len(distance)
gt_flag = 1
# calculate the transformation matrix using RANSAC, this is for Registration Recall.
source_pcd = open3d.PointCloud()
source_pcd.points = open3d.utility.Vector3dVector(source_keypts)
target_pcd = open3d.PointCloud()
target_pcd.points = open3d.utility.Vector3dVector(target_keypts)
s_desc = open3d.registration.Feature()
s_desc.data = source_desc.T
t_desc = open3d.registration.Feature()
t_desc.data = target_desc.T
result = open3d.registration_ransac_based_on_feature_matching(
source_pcd, target_pcd, s_desc, t_desc,
0.05,
open3d.TransformationEstimationPointToPoint(False), 3,
[open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
open3d.CorrespondenceCheckerBasedOnDistance(0.05)],
open3d.RANSACConvergenceCriteria(50000, 1000))
# write the transformation matrix into .log file for evaluation.
with open(os.path.join(logpath, f'{desc_name}_{timestr}.log'), 'a+') as f:
trans = result.transformation
trans = np.linalg.inv(trans)
s1 = f'{id1}\t {id2}\t 37\n'
f.write(s1)
f.write(f"{trans[0,0]}\t {trans[0,1]}\t {trans[0,2]}\t {trans[0,3]}\t \n")
f.write(f"{trans[1,0]}\t {trans[1,1]}\t {trans[1,2]}\t {trans[1,3]}\t \n")
f.write(f"{trans[2,0]}\t {trans[2,1]}\t {trans[2,2]}\t {trans[2,3]}\t \n")
f.write(f"{trans[3,0]}\t {trans[3,1]}\t {trans[3,2]}\t {trans[3,3]}\t \n")
# write the result into resultpath so that it can be re-shown.
s = f"{cloud_bin_s}\t{cloud_bin_t}\t{num_inliers}\t{inlier_ratio:.8f}\t{gt_flag}"
with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'w+') as f:
f.write(s)
return num_inliers, inlier_ratio, gt_flag
def read_register_result(resultpath, id1, id2):
"""
Read the registration result of {id1} & {id2} from the resultpath
Return values contain the inlier_number, inlier_ratio, flag(indicating whether this pair is a ground truth match).
"""
cloud_bin_s = f'cloud_bin_{id1}'
cloud_bin_t = f'cloud_bin_{id2}'
with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'r') as f:
content = f.readlines()
nums = content[0].replace("\n", "").split("\t")[2:5]
return nums
def deal_with_one_scene(inlier_ratio, distance_threshold, scene):
"""
Function to register all the fragments pairs in one scene.
"""
logpath = f"log_result/{scene}-evaluation"
pcdpath = f"../data/3DMatch/fragments/{scene}/"
keyptspath = f"{desc_name}_{timestr}/keypoints/{scene}"
descpath = f"{desc_name}_{timestr}/descriptors/{scene}"
gtpath = f'gt_result/{scene}-evaluation/'
gtLog = loadlog(gtpath)
resultpath = f"pred_result/{scene}/{desc_name}_result_{timestr}"
if not os.path.exists(f"pred_result/{scene}/"):
os.mkdir(f"pred_result/{scene}/")
if not os.path.exists(resultpath):
os.mkdir(resultpath)
if not os.path.exists(logpath):
os.mkdir(logpath)
# register each pair
num_frag = len([filename for filename in os.listdir(pcdpath) if filename.endswith('ply')])
print(f"Start Evaluate Descriptor {desc_name} for {scene}")
start_time = time.time()
for id1 in range(num_frag):
for id2 in range(id1 + 1, num_frag):
register2Fragments(id1, id2, keyptspath, descpath, resultpath, logpath, gtLog, desc_name, inlier_ratio, distance_threshold)
print(f"Finish Evaluation, time: {time.time() - start_time:.2f}s")
if __name__ == '__main__':
scene_list = [
'7-scenes-redkitchen',
'sun3d-home_at-home_at_scan1_2013_jan_1',
'sun3d-home_md-home_md_scan9_2012_sep_30',
'sun3d-hotel_uc-scan3',
'sun3d-hotel_umd-maryland_hotel1',
'sun3d-hotel_umd-maryland_hotel3',
'sun3d-mit_76_studyroom-76-1studyroom2',
'sun3d-mit_lab_hj-lab_hj_tea_nov_2_2012_scan1_erika'
]
# will evaluate the descriptor in `{desc_name}_{timestr}` folder.
desc_name = sys.argv[1]
timestr = sys.argv[2]
# inlier_ratio = float(sys.argv[3])
# distance_threshold = float(sys.argv[4])
inlier_ratio = 0.05 # 5%
distance_threshold = 0.10 # 10cm
# multiprocessing to register each pair in each scene.
# this part is time-consuming
from multiprocessing import Pool
pool = Pool(len(scene_list))
func = partial(deal_with_one_scene, inlier_ratio, distance_threshold)
pool.map(func, scene_list)
pool.close()
pool.join()
# collect all the data and print the results.
inliers_list = []
recall_list = []
inliers_ratio_list = []
pred_match = 0
gt_match = 0
for scene in scene_list:
# evaluate
pcdpath = f"../data/3DMatch/fragments/{scene}/"
resultpath = f"pred_result/{scene}/{desc_name}_result_{timestr}"
num_frag = len([filename for filename in os.listdir(pcdpath) if filename.endswith('ply')])
result = []
for id1 in range(num_frag):
for id2 in range(id1 + 1, num_frag):
line = read_register_result(resultpath, id1, id2)
result.append([int(line[0]), float(line[1]), int(line[2])]) # inlier_number, inlier_ratio, flag.
result = np.array(result)
gt_results = np.sum(result[:, 2] == 1)
pred_results = np.sum(result[:, 1] > inlier_ratio)
pred_match += pred_results
gt_match += gt_results
recall = float(pred_results / gt_results) * 100
print(f"Correct Match {pred_results}, ground truth Match {gt_results}")
print(f"Recall {recall}%")
ave_num_inliers = np.sum(np.where(result[:, 2] == 1, result[:, 0], np.zeros(result.shape[0]))) / pred_results
print(f"Average Num Inliners: {ave_num_inliers}")
ave_inlier_ratio = np.sum(np.where(result[:, 2] == 1, result[:, 1], np.zeros(result.shape[0]))) / pred_results
print(f"Average Num Inliner Ratio: {ave_inlier_ratio}")
recall_list.append(recall)
inliers_list.append(ave_num_inliers)
inliers_ratio_list.append(ave_inlier_ratio)
print("*" * 40)
print(recall_list)
# print(f"True Avarage Recall: {pred_match / gt_match * 100}%")
print(f"Matching Recall Std: {np.std(recall_list)}")
average_recall = sum(recall_list) / len(recall_list)
print(f"All 8 scene, average recall: {average_recall}%")
average_inliers = sum(inliers_list) / len(inliers_list)
print(f"All 8 scene, average num inliers: {average_inliers}")
average_inliers_ratio = sum(inliers_ratio_list) / len(inliers_list)
print(f"All 8 scene, average num inliers ratio: {average_inliers_ratio}")
| [
"numpy.sqrt",
"open3d.PointCloud",
"numpy.array",
"geometric_registration.utils.get_desc",
"open3d.TransformationEstimationPointToPoint",
"os.path.exists",
"os.listdir",
"numpy.asarray",
"geometric_registration.utils.loadlog",
"open3d.registration.Feature",
"os.mkdir",
"numpy.min",
"numpy.ar... | [((411, 457), 'numpy.sqrt', 'np.sqrt', (['(2 - 2 * (source_desc @ target_desc.T))'], {}), '(2 - 2 * (source_desc @ target_desc.T))\n', (418, 457), True, 'import numpy as np\n'), ((475, 502), 'numpy.argmin', 'np.argmin', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (484, 502), True, 'import numpy as np\n'), ((520, 544), 'numpy.min', 'np.min', (['distance'], {'axis': '(1)'}), '(distance, axis=1)\n', (526, 544), True, 'import numpy as np\n'), ((562, 589), 'numpy.argmin', 'np.argmin', (['distance'], {'axis': '(0)'}), '(distance, axis=0)\n', (571, 589), True, 'import numpy as np\n'), ((607, 631), 'numpy.min', 'np.min', (['distance'], {'axis': '(0)'}), '(distance, axis=0)\n', (613, 631), True, 'import numpy as np\n'), ((786, 802), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (794, 802), True, 'import numpy as np\n'), ((1267, 1302), 'geometric_registration.utils.get_keypts', 'get_keypts', (['keyptspath', 'cloud_bin_s'], {}), '(keyptspath, cloud_bin_s)\n', (1277, 1302), False, 'from geometric_registration.utils import get_pcd, get_keypts, get_desc, loadlog\n'), ((1323, 1358), 'geometric_registration.utils.get_keypts', 'get_keypts', (['keyptspath', 'cloud_bin_t'], {}), '(keyptspath, cloud_bin_t)\n', (1333, 1358), False, 'from geometric_registration.utils import get_pcd, get_keypts, get_desc, loadlog\n'), ((1377, 1419), 'geometric_registration.utils.get_desc', 'get_desc', (['descpath', 'cloud_bin_s', 'desc_name'], {}), '(descpath, cloud_bin_s, desc_name)\n', (1385, 1419), False, 'from geometric_registration.utils import get_pcd, get_keypts, get_desc, loadlog\n'), ((1438, 1480), 'geometric_registration.utils.get_desc', 'get_desc', (['descpath', 'cloud_bin_t', 'desc_name'], {}), '(descpath, cloud_bin_t, desc_name)\n', (1446, 1480), False, 'from geometric_registration.utils import get_pcd, get_keypts, get_desc, loadlog\n'), ((1499, 1525), 'numpy.nan_to_num', 'np.nan_to_num', (['source_desc'], {}), '(source_desc)\n', (1512, 1525), True, 'import numpy as np\n'), ((1544, 1570), 'numpy.nan_to_num', 'np.nan_to_num', (['target_desc'], {}), '(target_desc)\n', (1557, 1570), True, 'import numpy as np\n'), ((6233, 6248), 'geometric_registration.utils.loadlog', 'loadlog', (['gtpath'], {}), '(gtpath)\n', (6240, 6248), False, 'from geometric_registration.utils import get_pcd, get_keypts, get_desc, loadlog\n'), ((6744, 6755), 'time.time', 'time.time', ([], {}), '()\n', (6753, 6755), False, 'import time\n'), ((7906, 7968), 'functools.partial', 'partial', (['deal_with_one_scene', 'inlier_ratio', 'distance_threshold'], {}), '(deal_with_one_scene, inlier_ratio, distance_threshold)\n', (7913, 7968), False, 'from functools import partial\n'), ((1185, 1221), 'os.path.join', 'os.path.join', (['resultpath', 'write_file'], {}), '(resultpath, write_file)\n', (1197, 1221), False, 'import os\n'), ((2884, 2903), 'open3d.PointCloud', 'open3d.PointCloud', ([], {}), '()\n', (2901, 2903), False, 'import open3d\n'), ((2930, 2986), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['target_keypts[corr[:, 1]]'], {}), '(target_keypts[corr[:, 1]])\n', (2959, 2986), False, 'import open3d\n'), ((3040, 3067), 'numpy.asarray', 'np.asarray', (['frag2_pc.points'], {}), '(frag2_pc.points)\n', (3050, 3067), True, 'import numpy as np\n'), ((3161, 3198), 'numpy.sum', 'np.sum', (['(distance < distance_threshold)'], {}), '(distance < distance_threshold)\n', (3167, 3198), True, 'import numpy as np\n'), ((3551, 3570), 'open3d.PointCloud', 'open3d.PointCloud', ([], {}), '()\n', (3568, 3570), False, 'import open3d\n'), ((3599, 3643), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['source_keypts'], {}), '(source_keypts)\n', (3628, 3643), False, 'import open3d\n'), ((3665, 3684), 'open3d.PointCloud', 'open3d.PointCloud', ([], {}), '()\n', (3682, 3684), False, 'import open3d\n'), ((3713, 3757), 'open3d.utility.Vector3dVector', 'open3d.utility.Vector3dVector', (['target_keypts'], {}), '(target_keypts)\n', (3742, 3757), False, 'import open3d\n'), ((3775, 3804), 'open3d.registration.Feature', 'open3d.registration.Feature', ([], {}), '()\n', (3802, 3804), False, 'import open3d\n'), ((3858, 3887), 'open3d.registration.Feature', 'open3d.registration.Feature', ([], {}), '()\n', (3885, 3887), False, 'import open3d\n'), ((6329, 6368), 'os.path.exists', 'os.path.exists', (['f"""pred_result/{scene}/"""'], {}), "(f'pred_result/{scene}/')\n", (6343, 6368), False, 'import os\n'), ((6378, 6411), 'os.mkdir', 'os.mkdir', (['f"""pred_result/{scene}/"""'], {}), "(f'pred_result/{scene}/')\n", (6386, 6411), False, 'import os\n'), ((6423, 6449), 'os.path.exists', 'os.path.exists', (['resultpath'], {}), '(resultpath)\n', (6437, 6449), False, 'import os\n'), ((6459, 6479), 'os.mkdir', 'os.mkdir', (['resultpath'], {}), '(resultpath)\n', (6467, 6479), False, 'import os\n'), ((6491, 6514), 'os.path.exists', 'os.path.exists', (['logpath'], {}), '(logpath)\n', (6505, 6514), False, 'import os\n'), ((6524, 6541), 'os.mkdir', 'os.mkdir', (['logpath'], {}), '(logpath)\n', (6532, 6541), False, 'import os\n'), ((8769, 8785), 'numpy.array', 'np.array', (['result'], {}), '(result)\n', (8777, 8785), True, 'import numpy as np\n'), ((8807, 8832), 'numpy.sum', 'np.sum', (['(result[:, 2] == 1)'], {}), '(result[:, 2] == 1)\n', (8813, 8832), True, 'import numpy as np\n'), ((8856, 8891), 'numpy.sum', 'np.sum', (['(result[:, 1] > inlier_ratio)'], {}), '(result[:, 1] > inlier_ratio)\n', (8862, 8891), True, 'import numpy as np\n'), ((4077, 4127), 'open3d.TransformationEstimationPointToPoint', 'open3d.TransformationEstimationPointToPoint', (['(False)'], {}), '(False)\n', (4120, 4127), False, 'import open3d\n'), ((4274, 4319), 'open3d.RANSACConvergenceCriteria', 'open3d.RANSACConvergenceCriteria', (['(50000)', '(1000)'], {}), '(50000, 1000)\n', (4306, 4319), False, 'import open3d\n'), ((4540, 4560), 'numpy.linalg.inv', 'np.linalg.inv', (['trans'], {}), '(trans)\n', (4553, 4560), True, 'import numpy as np\n'), ((5143, 5206), 'os.path.join', 'os.path.join', (['resultpath', 'f"""{cloud_bin_s}_{cloud_bin_t}.rt.txt"""'], {}), "(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt')\n", (5155, 5206), False, 'import os\n'), ((5628, 5691), 'os.path.join', 'os.path.join', (['resultpath', 'f"""{cloud_bin_s}_{cloud_bin_t}.rt.txt"""'], {}), "(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt')\n", (5640, 5691), False, 'import os\n'), ((3102, 3128), 'numpy.power', 'np.power', (['(frag1 - frag2)', '(2)'], {}), '(frag1 - frag2, 2)\n', (3110, 3128), True, 'import numpy as np\n'), ((4145, 4195), 'open3d.CorrespondenceCheckerBasedOnEdgeLength', 'open3d.CorrespondenceCheckerBasedOnEdgeLength', (['(0.9)'], {}), '(0.9)\n', (4190, 4195), False, 'import open3d\n'), ((4210, 4259), 'open3d.CorrespondenceCheckerBasedOnDistance', 'open3d.CorrespondenceCheckerBasedOnDistance', (['(0.05)'], {}), '(0.05)\n', (4253, 4259), False, 'import open3d\n'), ((4413, 4464), 'os.path.join', 'os.path.join', (['logpath', 'f"""{desc_name}_{timestr}.log"""'], {}), "(logpath, f'{desc_name}_{timestr}.log')\n", (4425, 4464), False, 'import os\n'), ((6613, 6632), 'os.listdir', 'os.listdir', (['pcdpath'], {}), '(pcdpath)\n', (6623, 6632), False, 'import os\n'), ((9766, 9785), 'numpy.std', 'np.std', (['recall_list'], {}), '(recall_list)\n', (9772, 9785), True, 'import numpy as np\n'), ((7007, 7018), 'time.time', 'time.time', ([], {}), '()\n', (7016, 7018), False, 'import time\n'), ((8417, 8436), 'os.listdir', 'os.listdir', (['pcdpath'], {}), '(pcdpath)\n', (8427, 8436), False, 'import os\n'), ((9204, 9229), 'numpy.zeros', 'np.zeros', (['result.shape[0]'], {}), '(result.shape[0])\n', (9212, 9229), True, 'import numpy as np\n'), ((9381, 9406), 'numpy.zeros', 'np.zeros', (['result.shape[0]'], {}), '(result.shape[0])\n', (9389, 9406), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from jina.excepts import BadClientCallback
from jina.flow import Flow
def validate(x):
raise NotImplementedError
@pytest.mark.parametrize('restful', [False, True])
def test_client_on_error(restful):
# In this particular test, when you write two tests in a row, you are testing the following case:
#
# You are testing exception in client's callback, not error in client's request generator
# 1. The exception breaks the `async for req in stub.Call(req_iter)` on the client
# 2. Server probably has something hold in the stream
# 3. Restart the client, keep server untouched.
# 4. Now, server stucks (because it considers the last connection wasn't end yet)
def validate(x):
raise NotImplementedError
with Flow(restful=restful).add() as f:
t = 0
try:
f.index_ndarray(np.random.random([5, 4]), on_done=validate, continue_on_error=False)
except BadClientCallback:
# bad client callback will break the `async for req in stub.Call(req_iter)`
t = 1
# now query the gateway again, make sure gateway's channel is still usable
f.index_ndarray(np.random.random([5, 4]), on_done=validate, continue_on_error=True)
assert t == 1
| [
"numpy.random.random",
"pytest.mark.parametrize",
"jina.flow.Flow"
] | [((156, 205), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""restful"""', '[False, True]'], {}), "('restful', [False, True])\n", (179, 205), False, 'import pytest\n'), ((1196, 1220), 'numpy.random.random', 'np.random.random', (['[5, 4]'], {}), '([5, 4])\n', (1212, 1220), True, 'import numpy as np\n'), ((791, 812), 'jina.flow.Flow', 'Flow', ([], {'restful': 'restful'}), '(restful=restful)\n', (795, 812), False, 'from jina.flow import Flow\n'), ((880, 904), 'numpy.random.random', 'np.random.random', (['[5, 4]'], {}), '([5, 4])\n', (896, 904), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
from scipy.linalg import cho_factor, cho_solve
import time
start_time = time.time()
#float_formatter = '{:.4f}'.format
#np.set_printoptions(formatter={'float_kind':float_formatter})
N = 1000
print('N: ', N)
#Filling N*N array to initialize it
A1 = np.zeros((N,N), float)
A2 = np.zeros((N,N), float)
b1 = np.zeros((N,1), float)
b2 = np.ones((N,1), float)
#Filling arrays with the correspondant values
np.fill_diagonal(A1, 6)
np.fill_diagonal(A1[1:], -4)
np.fill_diagonal(A1[:, 1:], -4)
np.fill_diagonal(A1[2:], 1)
np.fill_diagonal(A1[:, 2:], 1)
np.fill_diagonal(A2, 7)
np.fill_diagonal(A2[1:], -4)
np.fill_diagonal(A2[:, 1:], -4)
np.fill_diagonal(A2[2:], 1)
np.fill_diagonal(A2[:, 2:], 1)
b1[0] = 3
b1[1] = -1
b1[-2] = -1
b1[-1] = 3
b2[0] = 4
b2[1] = 0
b2[-2] = 0
b2[-1] = 4
A, low = cho_factor(A1)
x = cho_solve((A, low), b1)
print('A1 x = b1 \n Ten median x are:')
ml = len(x) // 2 - 5
mu = len(x) // 2 + 5
print(x[ml : mu])
A, low = cho_factor(A2)
x = cho_solve((A, low), b2)
print('A2 x = b2 \n Ten median x are:')
ml = len(x) // 2 - 5
mu = len(x) // 2 + 5
print(x[ml : mu])
print("--- %s seconds ---" % (time.time() - start_time)) | [
"scipy.linalg.cho_solve",
"numpy.ones",
"scipy.linalg.cho_factor",
"numpy.fill_diagonal",
"numpy.zeros",
"time.time"
] | [((110, 121), 'time.time', 'time.time', ([], {}), '()\n', (119, 121), False, 'import time\n'), ((289, 312), 'numpy.zeros', 'np.zeros', (['(N, N)', 'float'], {}), '((N, N), float)\n', (297, 312), True, 'import numpy as np\n'), ((317, 340), 'numpy.zeros', 'np.zeros', (['(N, N)', 'float'], {}), '((N, N), float)\n', (325, 340), True, 'import numpy as np\n'), ((346, 369), 'numpy.zeros', 'np.zeros', (['(N, 1)', 'float'], {}), '((N, 1), float)\n', (354, 369), True, 'import numpy as np\n'), ((374, 396), 'numpy.ones', 'np.ones', (['(N, 1)', 'float'], {}), '((N, 1), float)\n', (381, 396), True, 'import numpy as np\n'), ((445, 468), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A1', '(6)'], {}), '(A1, 6)\n', (461, 468), True, 'import numpy as np\n'), ((469, 497), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A1[1:]', '(-4)'], {}), '(A1[1:], -4)\n', (485, 497), True, 'import numpy as np\n'), ((498, 529), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A1[:, 1:]', '(-4)'], {}), '(A1[:, 1:], -4)\n', (514, 529), True, 'import numpy as np\n'), ((530, 557), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A1[2:]', '(1)'], {}), '(A1[2:], 1)\n', (546, 557), True, 'import numpy as np\n'), ((558, 588), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A1[:, 2:]', '(1)'], {}), '(A1[:, 2:], 1)\n', (574, 588), True, 'import numpy as np\n'), ((590, 613), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A2', '(7)'], {}), '(A2, 7)\n', (606, 613), True, 'import numpy as np\n'), ((614, 642), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A2[1:]', '(-4)'], {}), '(A2[1:], -4)\n', (630, 642), True, 'import numpy as np\n'), ((643, 674), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A2[:, 1:]', '(-4)'], {}), '(A2[:, 1:], -4)\n', (659, 674), True, 'import numpy as np\n'), ((675, 702), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A2[2:]', '(1)'], {}), '(A2[2:], 1)\n', (691, 702), True, 'import numpy as np\n'), ((703, 733), 'numpy.fill_diagonal', 'np.fill_diagonal', (['A2[:, 2:]', '(1)'], {}), '(A2[:, 2:], 1)\n', (719, 733), True, 'import numpy as np\n'), ((832, 846), 'scipy.linalg.cho_factor', 'cho_factor', (['A1'], {}), '(A1)\n', (842, 846), False, 'from scipy.linalg import cho_factor, cho_solve\n'), ((851, 874), 'scipy.linalg.cho_solve', 'cho_solve', (['(A, low)', 'b1'], {}), '((A, low), b1)\n', (860, 874), False, 'from scipy.linalg import cho_factor, cho_solve\n'), ((988, 1002), 'scipy.linalg.cho_factor', 'cho_factor', (['A2'], {}), '(A2)\n', (998, 1002), False, 'from scipy.linalg import cho_factor, cho_solve\n'), ((1007, 1030), 'scipy.linalg.cho_solve', 'cho_solve', (['(A, low)', 'b2'], {}), '((A, low), b2)\n', (1016, 1030), False, 'from scipy.linalg import cho_factor, cho_solve\n'), ((1165, 1176), 'time.time', 'time.time', ([], {}), '()\n', (1174, 1176), False, 'import time\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Import Base Packages
# In[ ]:
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
# # Interface function to feature engineering data
# In[ ]:
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
column_names = [ 'age', 'workclass', 'fnlwgt', 'education', 'educational-num', 'marital-status', 'occupation', 'relationship', 'race', 'gender', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income' ]
columns_to_encoding = [ 'workclass', 'marital-status', 'occupation', 'relationship', 'race', 'gender' ]
columns_to_normalize = [ 'age', 'educational-num', 'hours-per-week', 'capital-gain', 'capital-loss' ]
le = LabelEncoder()
scaler = StandardScaler()
pl = PolynomialFeatures(2, include_bias=False)
def feature_engineering(filename, train=True):
df = pd.read_csv(filename, index_col=False)
df.drop(['fnlwgt', 'education', 'native-country'], axis=1, inplace=True)
df = pd.get_dummies(df, columns=columns_to_encoding)
df["income"] = le.fit_transform(df['income'])
if train:
X_temp = pl.fit_transform(df[columns_to_normalize])
X_temp = scaler.fit_transform(X_temp)
df.drop(columns_to_normalize, axis=1, inplace=True)
X_train = np.hstack((df.values, X_temp))
y_train = df['income']
columns_names = pl.get_feature_names(df.columns)
return np.hstack((df.columns.values, columns_names)), X_train, y_train
else:
X_temp = pl.transform(df[columns_to_normalize])
X_temp = scaler.transform(X_temp)
df.drop(columns_to_normalize, axis=1, inplace=True)
X_test = np.hstack((df.values, X_temp))
y_test = df['income']
columns_names = pl.get_feature_names(df.columns)
return np.hstack((df.columns.values, columns_names)), X_test, y_test
# # Load Data
# In[ ]:
columns_names, X, y = feature_engineering("../../../input/wenruliu_adult-income-dataset/adult.csv", train=True)
# In[ ]:
from sklearn.model_selection import train_test_split
def rmnan(X, y):
X_, y_ = [], []
for x, yt in zip(X, y):
if np.isnan(x).any() or np.isnan(yt).any():
continue
X_.append(x)
y_.append(yt)
return np.array(X_), np.array(y_)
X, y = rmnan(X, y)
# In[ ]:
X, X_test, y, y_test = train_test_split(X, y, test_size=0.30, random_state=42)
y.shape, y_test.shape
# # Find Best number of components to PCA
# In[ ]:
from sklearn.tree import DecisionTreeClassifier
from sklearn.decomposition import PCA
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.model_selection import RepeatedStratifiedKFold
param_distribution = { 'max_depth': np.arange(1, 15), }
scoring = { 'Accuracy': make_scorer(accuracy_score), 'F1_Score': make_scorer(fbeta_score, beta=1, average='micro'), }
# In[ ]:
result = []
kf = RepeatedStratifiedKFold(n_splits=2, n_repeats=2)
for fold, (train_index, test_index) in enumerate(kf.split(X, y)):
X_tr, X_tst = X[train_index], X[test_index]
y_tr, y_tst = y[train_index], y[test_index]
for i in range(1, 20):
# train
pca = PCA(i)
X_t = pca.fit_transform(X_tr)
search_cv = RandomizedSearchCV(DecisionTreeClassifier(), param_distribution, scoring=scoring, n_jobs=-1, cv=RepeatedStratifiedKFold(n_splits=2, n_repeats=2), refit='F1_Score')
search_cv.fit(X_t, y_tr)
model = search_cv.best_estimator_
# test
X_t = pca.transform(X_tst)
y_pred = model.predict(X_t)
# model evaluation
f1 = fbeta_score(y_tst, y_pred, beta=1)
acc = accuracy_score(y_tst, y_pred)
print(f"fold: {fold} - cp:{i} train: {search_cv.best_score_} test: f1={f1}, acc={acc}")
result.append((fold, i, acc, f1, pca, model))
# In[ ]:
best_f1 = 0
best_model = None
for fold, n, acc, f1, pca, model in result:
if best_f1 < f1:
best_f1 = f1
best_model=(fold, n, acc, f1, pca, model)
pca_components = best_model[1]
pca_components
# # Get best model with best pca_components number
# In[ ]:
result, metrics_ = [], []
kf = RepeatedStratifiedKFold(n_splits=10, n_repeats=1)
for fold, (train_index, test_index) in enumerate(kf.split(X, y)):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# train
pca = PCA(pca_components)
X_t = pca.fit_transform(X_train)
search_cv = RandomizedSearchCV(DecisionTreeClassifier(), param_distribution, scoring=scoring, n_jobs=-1, cv=RepeatedStratifiedKFold(n_splits=10, n_repeats=1), refit='F1_Score')
search_cv.fit(X_t, y_train)
model = search_cv.best_estimator_
# test
X_t = pca.transform(X_test)
y_pred = model.predict(X_t)
# model evaluation
f1 = fbeta_score(y_test, y_pred, beta=1)
acc = accuracy_score(y_test, y_pred)
print(f"fold: {fold} - cp:{pca_components} train: {search_cv.best_score_} test: f1={f1}, acc={acc}")
result.append((X_train, y_train, X_test, y_test, fold, i, acc, f1, pca, model))
metrics_.append((f1, acc))
# In[ ]:
best_f1 = 0
best_model = None
for X_train, y_train, X_test, y_test, fold, n, acc, f1, pca, model in result:
if best_f1 < f1:
best_f1 = f1
best_model=(X_train, y_train, X_test, y_test, fold, n, acc, f1, pca, model)
X_train, y_train, X_test, y_test = X, y, X_test, y_test #best_model[:4]
# # Analyse Model Result
# In[ ]:
from sklearn import metrics
pca, model = best_model[-2], best_model[-1]
probs = model.predict_proba(pca.transform(X_test))
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(y_test, preds)
roc_auc = metrics.auc(fpr, tpr)
# method I: plt
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
print()
# In[ ]:
f1_r, acc_r = [], []
for f1, acc in metrics_:
f1_r.append(f1)
acc_r.append(acc)
f1_r, acc_r = np.array(f1_r), np.array(acc_r)
l = f1_r.shape[0]
plt.title(f'F1 Score in Folds(PCA components = {pca_components})')
plt.plot(range(l), f1_r, 'r', label = 'F1 Score')
plt.plot(range(l), acc_r, 'b', label = 'Accuracy')
plt.legend(loc = 'lower right')
plt.xticks(range(l))
plt.xlim([0, l - 1])
plt.ylim([0.95, 1])
plt.ylabel('F1 Score')
plt.xlabel('Fold')
plt.grid()
print()
# ## Plot feature importances
# In[ ]:
def plot_feature_importances(clf, X_train, y_train=None, top_n=10, figsize=(8,8), print_table=False, title="Feature Importances"):
# https://www.kaggle.com/grfiv4/plotting-feature-importances
__name__ = "plot_feature_importances"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
X_train = pd.DataFrame(data=X_train, columns=[f"PC{i}" for i in range(1, X_train.shape[1] + 1)])
feat_imp = pd.DataFrame({'importance':clf.feature_importances_})
feat_imp['feature'] = X_train.columns
feat_imp.sort_values(by='importance', ascending=False, inplace=True)
feat_imp = feat_imp.iloc[:top_n]
feat_imp.sort_values(by='importance', inplace=True)
feat_imp = feat_imp.set_index('feature', drop=True)
feat_imp.plot.barh(title=title, figsize=figsize)
plt.xlabel('Feature Importance Score')
print()
if print_table:
from IPython.display import display
print("Top {} features in descending order of importance".format(top_n))
print(feat_imp.sort_values(by='importance', ascending=False))
return feat_imp
pca, clf = best_model[-2], best_model[-1]
feature_importance = plot_feature_importances(clf, pca.transform(X_train), top_n=X_train.shape[1], title=clf.__class__.__name__)
# ## Get Features Used to Generate PCA Components
# In[ ]:
# https://stackoverflow.com/questions/22348668/pca-decomposition-with-python-features-relevances
pca, clf = best_model[-2], best_model[-1]
index_components = [int(x[2:]) for x in feature_importance.index.values]
def features_used_to_generate_pca_components(index_components, pca, clf, columns_names):
for i in index_components:
index_features = np.abs(pca.components_[i - 1]).argsort()[:4]
features = columns_names[index_features]
print(f'PC{i}')
print(f'Features:')
for f in features:
print("\t" + f)
print()
features_used_to_generate_pca_components(index_components, pca, clf, columns_names)
# ## Confusion Matrix
# In[ ]:
from sklearn.metrics import confusion_matrix
pca, clf = best_model[-2], best_model[-1]
y_pred = clf.predict(pca.transform(X_test))
cm = confusion_matrix(y_test, y_pred)
cm
# In[ ]:
import itertools
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
""" This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
print()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plot_confusion_matrix(cm, [0, 1], True)
# ## Classification Report
# In[ ]:
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
# # Save Best Model
# In[ ]:
from sklearn.tree import export_graphviz
# Export as dot file
export_graphviz(best_model[-1], out_file='tree.dot', class_names = [">= 50K", "< 50K"], rounded = True, proportion = False, precision = 2, filled = True)
# Convert to png using system command (requires Graphviz)
from subprocess import call
call(['dot', '-Tpng', 'tree.dot', '-o', 'tree.png', '-Gdpi=600'])
# Display in jupyter notebook
from IPython.display import Image
Image(filename = 'tree.png')
# In[ ]:
from sklearn.externals import joblib
joblib.dump(best_model, 'lgr.joblib')
| [
"sklearn.preprocessing.LabelEncoder",
"sklearn.preprocessing.PolynomialFeatures",
"matplotlib.pyplot.grid",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"sklearn.metrics.classification_report",
"sklearn.metrics.auc",
"numpy.hstack",
"sklearn.tree.export_graphviz",
"numpy.array",
"sklearn.metri... | [((132, 165), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (155, 165), False, 'import warnings\n'), ((822, 836), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (834, 836), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((846, 862), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (860, 862), False, 'from sklearn.preprocessing import StandardScaler\n'), ((868, 909), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', (['(2)'], {'include_bias': '(False)'}), '(2, include_bias=False)\n', (886, 909), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((2483, 2537), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(42)'}), '(X, y, test_size=0.3, random_state=42)\n', (2499, 2537), False, 'from sklearn.model_selection import train_test_split\n'), ((3149, 3197), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': '(2)', 'n_repeats': '(2)'}), '(n_splits=2, n_repeats=2)\n', (3172, 3197), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((4420, 4469), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': '(10)', 'n_repeats': '(1)'}), '(n_splits=10, n_repeats=1)\n', (4443, 4469), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((5916, 5948), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y_test', 'preds'], {}), '(y_test, preds)\n', (5933, 5948), False, 'from sklearn import metrics\n'), ((5959, 5980), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (5970, 5980), False, 'from sklearn import metrics\n'), ((6030, 6076), 'matplotlib.pyplot.title', 'plt.title', (['"""Receiver Operating Characteristic"""'], {}), "('Receiver Operating Characteristic')\n", (6039, 6076), True, 'import matplotlib.pyplot as plt\n'), ((6077, 6131), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr', '"""b"""'], {'label': "('AUC = %0.2f' % roc_auc)"}), "(fpr, tpr, 'b', label='AUC = %0.2f' % roc_auc)\n", (6085, 6131), True, 'import matplotlib.pyplot as plt\n'), ((6134, 6163), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (6144, 6163), True, 'import matplotlib.pyplot as plt\n'), ((6166, 6197), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""r--"""'], {}), "([0, 1], [0, 1], 'r--')\n", (6174, 6197), True, 'import matplotlib.pyplot as plt\n'), ((6197, 6213), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, 1]'], {}), '([0, 1])\n', (6205, 6213), True, 'import matplotlib.pyplot as plt\n'), ((6214, 6230), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), '([0, 1])\n', (6222, 6230), True, 'import matplotlib.pyplot as plt\n'), ((6231, 6263), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (6241, 6263), True, 'import matplotlib.pyplot as plt\n'), ((6264, 6297), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (6274, 6297), True, 'import matplotlib.pyplot as plt\n'), ((6472, 6538), 'matplotlib.pyplot.title', 'plt.title', (['f"""F1 Score in Folds(PCA components = {pca_components})"""'], {}), "(f'F1 Score in Folds(PCA components = {pca_components})')\n", (6481, 6538), True, 'import matplotlib.pyplot as plt\n'), ((6640, 6669), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (6650, 6669), True, 'import matplotlib.pyplot as plt\n'), ((6693, 6713), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0, l - 1]'], {}), '([0, l - 1])\n', (6701, 6713), True, 'import matplotlib.pyplot as plt\n'), ((6714, 6733), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.95, 1]'], {}), '([0.95, 1])\n', (6722, 6733), True, 'import matplotlib.pyplot as plt\n'), ((6734, 6756), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""F1 Score"""'], {}), "('F1 Score')\n", (6744, 6756), True, 'import matplotlib.pyplot as plt\n'), ((6757, 6775), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Fold"""'], {}), "('Fold')\n", (6767, 6775), True, 'import matplotlib.pyplot as plt\n'), ((6776, 6786), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (6784, 6786), True, 'import matplotlib.pyplot as plt\n'), ((9059, 9091), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (9075, 9091), False, 'from sklearn.metrics import confusion_matrix\n'), ((10409, 10556), 'sklearn.tree.export_graphviz', 'export_graphviz', (['best_model[-1]'], {'out_file': '"""tree.dot"""', 'class_names': "['>= 50K', '< 50K']", 'rounded': '(True)', 'proportion': '(False)', 'precision': '(2)', 'filled': '(True)'}), "(best_model[-1], out_file='tree.dot', class_names=['>= 50K',\n '< 50K'], rounded=True, proportion=False, precision=2, filled=True)\n", (10424, 10556), False, 'from sklearn.tree import export_graphviz\n'), ((10652, 10717), 'subprocess.call', 'call', (["['dot', '-Tpng', 'tree.dot', '-o', 'tree.png', '-Gdpi=600']"], {}), "(['dot', '-Tpng', 'tree.dot', '-o', 'tree.png', '-Gdpi=600'])\n", (10656, 10717), False, 'from subprocess import call\n'), ((10783, 10809), 'IPython.display.Image', 'Image', ([], {'filename': '"""tree.png"""'}), "(filename='tree.png')\n", (10788, 10809), False, 'from IPython.display import Image\n'), ((10863, 10900), 'sklearn.externals.joblib.dump', 'joblib.dump', (['best_model', '"""lgr.joblib"""'], {}), "(best_model, 'lgr.joblib')\n", (10874, 10900), False, 'from sklearn.externals import joblib\n'), ((967, 1005), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'index_col': '(False)'}), '(filename, index_col=False)\n', (978, 1005), True, 'import pandas as pd\n'), ((1100, 1147), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': 'columns_to_encoding'}), '(df, columns=columns_to_encoding)\n', (1114, 1147), True, 'import pandas as pd\n'), ((2978, 2994), 'numpy.arange', 'np.arange', (['(1)', '(15)'], {}), '(1, 15)\n', (2987, 2994), True, 'import numpy as np\n'), ((3024, 3051), 'sklearn.metrics.make_scorer', 'make_scorer', (['accuracy_score'], {}), '(accuracy_score)\n', (3035, 3051), False, 'from sklearn.metrics import make_scorer\n'), ((3065, 3114), 'sklearn.metrics.make_scorer', 'make_scorer', (['fbeta_score'], {'beta': '(1)', 'average': '"""micro"""'}), "(fbeta_score, beta=1, average='micro')\n", (3076, 3114), False, 'from sklearn.metrics import make_scorer\n'), ((4667, 4686), 'sklearn.decomposition.PCA', 'PCA', (['pca_components'], {}), '(pca_components)\n', (4670, 4686), False, 'from sklearn.decomposition import PCA\n'), ((5093, 5128), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['y_test', 'y_pred'], {'beta': '(1)'}), '(y_test, y_pred, beta=1)\n', (5104, 5128), False, 'from sklearn.metrics import fbeta_score\n'), ((5139, 5169), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5153, 5169), False, 'from sklearn.metrics import accuracy_score\n'), ((6422, 6436), 'numpy.array', 'np.array', (['f1_r'], {}), '(f1_r)\n', (6430, 6436), True, 'import numpy as np\n'), ((6438, 6453), 'numpy.array', 'np.array', (['acc_r'], {}), '(acc_r)\n', (6446, 6453), True, 'import numpy as np\n'), ((7293, 7347), 'pandas.DataFrame', 'pd.DataFrame', (["{'importance': clf.feature_importances_}"], {}), "({'importance': clf.feature_importances_})\n", (7305, 7347), True, 'import pandas as pd\n'), ((7677, 7715), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Feature Importance Score"""'], {}), "('Feature Importance Score')\n", (7687, 7715), True, 'import matplotlib.pyplot as plt\n'), ((9567, 9617), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (9577, 9617), True, 'import matplotlib.pyplot as plt\n'), ((9622, 9638), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (9631, 9638), True, 'import matplotlib.pyplot as plt\n'), ((9696, 9740), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (9706, 9740), True, 'import matplotlib.pyplot as plt\n'), ((9745, 9776), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (9755, 9776), True, 'import matplotlib.pyplot as plt\n'), ((10049, 10073), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (10059, 10073), True, 'import matplotlib.pyplot as plt\n'), ((10078, 10107), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (10088, 10107), True, 'import matplotlib.pyplot as plt\n'), ((10112, 10130), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10128, 10130), True, 'import matplotlib.pyplot as plt\n'), ((10274, 10311), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (10295, 10311), False, 'from sklearn.metrics import classification_report\n'), ((1396, 1426), 'numpy.hstack', 'np.hstack', (['(df.values, X_temp)'], {}), '((df.values, X_temp))\n', (1405, 1426), True, 'import numpy as np\n'), ((1779, 1809), 'numpy.hstack', 'np.hstack', (['(df.values, X_temp)'], {}), '((df.values, X_temp))\n', (1788, 1809), True, 'import numpy as np\n'), ((2400, 2412), 'numpy.array', 'np.array', (['X_'], {}), '(X_)\n', (2408, 2412), True, 'import numpy as np\n'), ((2414, 2426), 'numpy.array', 'np.array', (['y_'], {}), '(y_)\n', (2422, 2426), True, 'import numpy as np\n'), ((3418, 3424), 'sklearn.decomposition.PCA', 'PCA', (['i'], {}), '(i)\n', (3421, 3424), False, 'from sklearn.decomposition import PCA\n'), ((3859, 3893), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['y_tst', 'y_pred'], {'beta': '(1)'}), '(y_tst, y_pred, beta=1)\n', (3870, 3893), False, 'from sklearn.metrics import fbeta_score\n'), ((3908, 3937), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_tst', 'y_pred'], {}), '(y_tst, y_pred)\n', (3922, 3937), False, 'from sklearn.metrics import accuracy_score\n'), ((4759, 4783), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (4781, 4783), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((1530, 1575), 'numpy.hstack', 'np.hstack', (['(df.columns.values, columns_names)'], {}), '((df.columns.values, columns_names))\n', (1539, 1575), True, 'import numpy as np\n'), ((1912, 1957), 'numpy.hstack', 'np.hstack', (['(df.columns.values, columns_names)'], {}), '((df.columns.values, columns_names))\n', (1921, 1957), True, 'import numpy as np\n'), ((3502, 3526), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (3524, 3526), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((4836, 4885), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': '(10)', 'n_repeats': '(1)'}), '(n_splits=10, n_repeats=1)\n', (4859, 4885), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((3579, 3627), 'sklearn.model_selection.RepeatedStratifiedKFold', 'RepeatedStratifiedKFold', ([], {'n_splits': '(2)', 'n_repeats': '(2)'}), '(n_splits=2, n_repeats=2)\n', (3602, 3627), False, 'from sklearn.model_selection import RepeatedStratifiedKFold\n'), ((2276, 2287), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (2284, 2287), True, 'import numpy as np\n'), ((2297, 2309), 'numpy.isnan', 'np.isnan', (['yt'], {}), '(yt)\n', (2305, 2309), True, 'import numpy as np\n'), ((8574, 8604), 'numpy.abs', 'np.abs', (['pca.components_[i - 1]'], {}), '(pca.components_[i - 1])\n', (8580, 8604), True, 'import numpy as np\n')] |
from Textures import VOICE_MODULATION, DIALOGBOX_READOUT, NAMIKO, FRAMEBORDER
__author__ = "<NAME>"
__copyright__ = "Copyright 2007, Cobra Project"
__credits__ = ["<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Demo"
from numpy import linspace
from random import randint
try:
import pygame
from pygame import freetype
except ImportError:
print("\n<Pygame> library is missing on your system."
"\nTry: \n C:\\pip install pygame on a window command prompt.")
raise SystemExit
class DialogBox(pygame.sprite.Sprite):
"""
Create a dialog box and display text vertically.
You can display a dialog box with a variable alpha transparency values through your game
The dialog box can be move around adjusting its coordinates.
e.g :
masako = DialogBox(gl_=GL, location_=(-DIALOG.get_width(), 50),
speed_=15, layer_=-3, voice_=True, scan_=True)
"""
images = None
character = None
containers = None
active = False
inventory = []
text = []
FONT = None
# Voice modulation representation
voice_modulation = None
# random char animation in the background
readout = None
scan_image = None
def __new__(cls,
gl_,
location_: tuple,
speed_: int = 30,
layer_: int = -3,
voice_: bool=True,
scan_: bool=True,
start_: int=0,
direction_: str = 'RIGHT',
text_color_=pygame.Color(149, 119, 236, 245),
fadein_=100,
fadeout_=1000,
*args, **kwargs):
# return if an instance already exist.
if DialogBox.active is None:
return
else:
return super().__new__(cls, *args, **kwargs)
def __init__(self,
gl_, # global variable
location_: tuple, # position to display the dialog box (x, y)
speed_: int = 15, # Refreshing time 15ms
layer_: int = 0, # Layer to display the dialog box, choose carefully otherwise the dialog box
# might be invisible (underneath other sprites)
voice_: bool=True, # Create a voice sprite fx (voice modulation effect)
scan_: bool=True, # scan effect (lateral scanning fx)
start_: int =0, # Frame number when the frame is triggered.
direction_: str='RIGHT', # moving direction
text_color_=pygame.Color(149, 119, 236, 245), # Text color
fadein_=100, # fade in effect starting frame
fadeout_=1000, # fadout fx starting frame number
):
assert isinstance(location_, tuple), \
'Expecting tuple for argument location_ got %s ' % type(location_)
assert isinstance(speed_, int), \
'Expecting int for argument speed_ got %s ' % type(speed_)
assert isinstance(layer_, int), \
'Expecting int for argument layer_ got %s ' % type(layer_)
assert DialogBox.images is not None, '\n[-]DialogBox.images must be initialised.'
assert DialogBox.voice_modulation is not None, '\n[-]DialogBox.voice_modulation must be initialised.'
assert isinstance(DialogBox.images, pygame.Surface), '\n[-]DialogBox.images must be a pygame.Surface type.'
# assert DialogBox.character is not None, '\n[-]DialogBox.character must be initialised.'
assert DialogBox.containers is not None, '\n[-]DialogBox.containers must be initialised.'
assert DialogBox.FONT is not None, '\n[-]DialogBox.Font must be initialised.'
# assert len(DialogBox.inventory) == 0, ' \n[-]DialogBox.inventory is not empty.'
assert DialogBox.readout is not None, '\n[-]DialogBox.readout must be initialised.'
pygame.sprite.Sprite.__init__(self, self.containers)
self.gl = gl_
self.text = DialogBox.text
self.image = DialogBox.images.copy()
self.image_copy = self.image.copy()
self.location = location_
self.rect = self.image.get_rect(topleft=(self.location[0], self.location[1]))
self.direction = direction_
self.text_origin = 150
self.dt = 0
self.index = 0
DialogBox.active = True
self.timing = speed_
self.max_width, self.max_height = self.image.get_size()
DialogBox.FONT.antialiased = True
# Voice modulation representation index
self.voice_module_index = 0
self.readout_index = 0
self.scan_background_surface = self.scan_image
self.scan_background_surface = pygame.transform.smoothscale(
self.scan_background_surface, (60, self.max_height - 15))
self.scan_index = 0
self.character = DialogBox.character
self.voice = voice_
self.scan = scan_
self.count = 0
# Frame number when the dialog box is active.
# Nothing will happen before frame 100
self.start_dialog_box = start_
self.text_color = text_color_
# dialog box start at frame self.start_dialog_box and zero indicate no delay
self.start_moving = self.start_dialog_box + 0
# stop moving 200 frames after self.start_dialog_box
self.stop_moving = self.start_dialog_box + 200
self.acceleration = linspace(12, 0, self.stop_moving)
self.move_counter = 0
diff = self.stop_moving - self.start_moving
# Fade in alpha values ( 0 -> 255) for diff values
self.fade_in = linspace(0, 255, diff)
self.fade_in_counter = 0
# Fade out alpha values ( 255 -> 0) for diff values
self.fade_out = linspace(255, 0, diff) # fade out
# Frame number when the fading out effect is starting
self.start_fadeout = fadeout_
self.start_fadein = fadein_
self.fade_out_counter = 0
DialogBox.inventory.append(self)
def move_right(self) -> None:
"""
Move the dialog box toward the display (left to right)
self.start_moving variable determine when the dialog box is starting moving
self.stop_moving variable is the opposite.
The velocity is control via the variable self.acceleration (deceleration), you can
change the values for exponential deceleration if you wish.
:return: None
"""
if self.rect.left < 10:
if self.start_moving < self.gl.FRAME < self.stop_moving:
self.rect.move_ip(self.acceleration[self.move_counter % len(self.acceleration) - 1], 0)
self.move_counter += 1
# Continue pushing the dialog box if not
# fully visible after 100 frames (low fps)
else:
if self.gl.FRAME > self.stop_moving:
self.rect.move_ip(2, 0)
def move_left(self) -> None:
"""
Move the dialog box toward the display (left to right)
self.start_moving variable determine when the dialog box is starting moving
self.stop_moving variable is the opposite.
The velocity is control via the variable self.acceleration (deceleration), you can
change the values for exponential deceleration if you wish.
:return: None
"""
if self.rect.right > self.gl.SCREENRECT.w - 10:
if self.start_moving < self.gl.FRAME < self.stop_moving:
self.rect.move_ip(-self.acceleration[self.move_counter % len(self.acceleration) - 1], 0)
self.move_counter += 1
# Continue pushing the dialog box if not
# fully visible after 100 frames (low fps)
else:
if self.gl.FRAME > self.stop_moving:
self.rect.move_ip(-2, 0)
def alpha_in(self) -> None:
"""
Create a fade-in effect for the dialog box, start with the min alpha values 0 and
fade in to the max value 255
self.start_moving variable determine the fade in starting frame number
self.stop_moving variable is the opposite (stop the effect)
:return: None
"""
if self.fade_in_counter < len(self.fade_in) - 1:
if self.start_fadein < self.gl.FRAME < self.stop_moving:
self.image.set_alpha(self.fade_in[self.fade_in_counter % len(self.fade_in) - 1])
self.fade_in_counter += 1
else:
self.image.set_alpha(255)
def alpha_out(self) -> None:
"""
Create a fading out effect of the dialog box.
Start the effect when frame number is over self.start_fadeout (adjust the variable if necessary)
Start with the highest value 255 and fade toward 0.
When the alpha value 0 is reached, the sprite is killed (removed from all groups)
:return: None
"""
if self.gl.FRAME > self.start_fadeout:
self.image.set_alpha(self.fade_out[self.fade_out_counter])
self.fade_out_counter += 1
if self.fade_out_counter > len(self.fade_out) - 1:
self.destroy()
@staticmethod
def destroy():
for instance in DialogBox.inventory:
DialogBox.inventory.remove(instance)
if hasattr(instance, 'kill'):
instance.kill()
def display_text(self, image_):
"""
Scroll the dialogs vertically (moving up)
You can control the text color (RGBA), adjust fgcolor=pygame.Color(149, 119, 236, 245),
text style : here freetype.STYLE_STRONG
text size : size=16
variable y is increment with 25 every lines (vertical spacing)
:param image_: Correspond to the dialog box image (most likely to be self.image)
It is necessary to create a copy of self.image prior passing it as an
positional argument in the display_text such as self.display_text(self.image.copy())
If omitted, the dialog box image will draw over the previous change and so on.
:return: pygame.Surface
"""
if isinstance(self.text, list):
if len(self.text) != 0:
x = 120
y = self.text_origin
for sentence in self.text:
if y > 10:
DialogBox.FONT.render_to(
image_, (x, y), sentence, fgcolor=self.text_color,
style=freetype.STYLE_STRONG, size=16)
y += 25
self.text_origin -= 0.2
return image_
def update(self) -> None:
if self.gl.FRAME > self.start_dialog_box:
if self.dt > self.timing:
self.image = self.image_copy.copy()
# self.rect = self.image.get_rect(topleft=(self.location[0], self.location[1]))
if self.character is not None and isinstance(self.character, list):
self.image.blit(self.character[self.index], (8, 20), special_flags=pygame.BLEND_RGB_ADD)
# skip the last sprite from the sprite list,
# last sprite is the glitch effect
if self.index < len(self.character) - 2:
if self.count > 20:
self.index += 1
self.count = 0
else:
self.index = 0
# display the glitch
if randint(0, 100) >= 98:
self.image.blit(self.character[-1], (10, 20))
if self.scan:
if self.scan_index < self.max_width - 65:
# scan effect speed 4 pixels / frame
self.scan_index += 4
else:
self.scan_index = 15
self.image.blit(
DialogBox.readout[int(self.readout_index)
% len(DialogBox.readout) - 1], (100, 0),
special_flags=pygame.BLEND_RGBA_ADD)
if self.voice:
self.image.blit(
DialogBox.voice_modulation[int(self.voice_module_index)
% len(DialogBox.voice_modulation) - 1], (0, 160),
special_flags=pygame.BLEND_RGBA_ADD)
# scan effect
if self.scan:
self.image.blit(self.scan_background_surface, (self.scan_index, 12),
special_flags=pygame.BLEND_RGB_ADD)
if self.scan_index < self.max_width:
self.scan_index += 0.1
else:
self.scan_index = 0
self.voice_module_index += 0.2
self.readout_index += 1
self.dt = 0
self.image = self.display_text(self.image.copy())
self.image.set_colorkey((0, 0, 0, 0), pygame.RLEACCEL)
self.alpha_in()
if self.gl.FRAME > self.start_fadeout:
self.alpha_out()
if self.direction is 'RIGHT':
self.move_right()
else:
self.move_left()
self.count += 1
self.dt += self.gl.TIME_PASSED_SECONDS
class GL:
FRAME = 0
if __name__ == '__main__':
pygame.init()
freetype.init(cache_size=64, resolution=72)
SCREENRECT = pygame.Rect(0, 0, 800, 1024)
screen = pygame.display.set_mode(SCREENRECT.size, pygame.HWSURFACE, 32)
BACKGROUND = pygame.image.load('Assets\\background.jpg').convert()
BACKGROUND = pygame.transform.scale(BACKGROUND, (SCREENRECT.size))
BACKGROUND.set_alpha(None)
# FONT = freetype.Font(os.path.join('Assets\\Fonts\\', 'Gtek Technology.ttf'), size=12)
# print(pygame.font.get_fonts(), pygame.font.match_font('bitstreamverasans'))
FONT = freetype.Font('C:\\Windows\\Fonts\\Arial.ttf')
FONT.antialiased = False
clock = pygame.time.Clock()
screen.blit(BACKGROUND, (0, 0))
sprite_group = pygame.sprite.Group()
All = pygame.sprite.RenderUpdates()
class Player:
def __init__(self):
pass
def alive(self):
return True
player = Player()
FRAMESURFACE = pygame.Surface((FRAMEBORDER.get_width() - 20, FRAMEBORDER.get_height() - 20),
pygame.RLEACCEL).convert()
FRAMESURFACE.fill((10, 10, 18, 200))
FRAMEBORDER.blit(FRAMESURFACE, (10, 15))
DIALOG = FRAMEBORDER
del FRAMEBORDER, FRAMESURFACE
DialogBox.containers = sprite_group, All
DialogBox.images = DIALOG
DialogBox.character = NAMIKO
DialogBox.voice_modulation = VOICE_MODULATION
DialogBox.readout = DIALOGBOX_READOUT
DialogBox.FONT = FONT
DialogBox.text = ["Protect the transport and reach out ", "Altera the green planet outside the", "asteroid belt.",
"There are huge asteroids ahead, focus ", "and dodge them carefully.", "Have fun and good luck.",
" ", "Over and out!", "Masako"]
im = pygame.image.load("Assets\\icon_glareFx_blue.png").convert()
DialogBox.scan_image = pygame.image.load("Assets\\icon_glareFx_blue.png").convert()
DialogBox.scan_image.set_colorkey((0, 0, 0, 0), pygame.RLEACCEL)
TIME_PASSED_SECONDS = 0
FRAME = 0
GL.TIME_PASSED_SECONDS = TIME_PASSED_SECONDS
GL.FRAME = FRAME
GL.SCREENRECT = SCREENRECT
masako = DialogBox(gl_=GL, location_=(-DIALOG.get_width(), 50),
speed_=15, layer_=-3, voice_=True, scan_=True, direction_='RIGHT',
text_color_=pygame.Color(149, 119, 236, 245), fadein_=500, fadeout_=1000)
cobra = pygame.image.load('Assets\\Cobra.png').convert()
cobra.set_colorkey((0, 0, 0, 0), pygame.RLEACCEL)
cobra = pygame.transform.smoothscale(cobra, (100, 170))
DialogBox.character = [cobra, cobra]
DialogBox.text = ["Don't worry, it won't take long", "before I wreck everything.", " "]
DialogBox.images = pygame.transform.smoothscale(DIALOG, (400, 200))
DialogBox.scan_image = pygame.image.load("Assets\\icon_glareFx_red.png").convert()
DialogBox.scan_image.set_colorkey((0, 0, 0, 0), pygame.RLEACCEL)
cob = DialogBox(gl_=GL, location_=(SCREENRECT.w + DialogBox.images.get_width(), 500),
speed_=15, layer_=-3, voice_=True, scan_=True, start_=500, direction_='LEFT',
text_color_=pygame.Color(249, 254, 56, 245), fadein_=500, fadeout_=1100)
STOP_GAME = False
while not STOP_GAME:
for event in pygame.event.get(): # User did something
keys = pygame.key.get_pressed()
if event.type == pygame.QUIT:
print('Quitting')
STOP_GAME = True
if keys[pygame.K_SPACE]:
pass
if keys[pygame.K_ESCAPE]:
STOP_GAME = True
# screen.fill((0,0,0))
screen.blit(BACKGROUND, (0, 0))
All.update()
All.draw(screen)
# dirty = All.draw(screen)
# pygame.display.update(dirty)
TIME_PASSED_SECONDS = clock.tick(60)
GL.TIME_PASSED_SECONDS = TIME_PASSED_SECONDS
pygame.display.flip()
FRAME += 1
GL.FRAME = FRAME
# print(clock.get_fps())
pygame.quit()
| [
"pygame.init",
"pygame.quit",
"pygame.sprite.RenderUpdates",
"pygame.transform.scale",
"Textures.FRAMEBORDER.blit",
"pygame.display.set_mode",
"pygame.transform.smoothscale",
"pygame.display.flip",
"numpy.linspace",
"pygame.freetype.init",
"pygame.image.load",
"pygame.Rect",
"random.randint"... | [((13983, 13996), 'pygame.init', 'pygame.init', ([], {}), '()\n', (13994, 13996), False, 'import pygame\n'), ((14002, 14045), 'pygame.freetype.init', 'freetype.init', ([], {'cache_size': '(64)', 'resolution': '(72)'}), '(cache_size=64, resolution=72)\n', (14015, 14045), False, 'from pygame import freetype\n'), ((14064, 14092), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', '(800)', '(1024)'], {}), '(0, 0, 800, 1024)\n', (14075, 14092), False, 'import pygame\n'), ((14107, 14169), 'pygame.display.set_mode', 'pygame.display.set_mode', (['SCREENRECT.size', 'pygame.HWSURFACE', '(32)'], {}), '(SCREENRECT.size, pygame.HWSURFACE, 32)\n', (14130, 14169), False, 'import pygame\n'), ((14260, 14311), 'pygame.transform.scale', 'pygame.transform.scale', (['BACKGROUND', 'SCREENRECT.size'], {}), '(BACKGROUND, SCREENRECT.size)\n', (14282, 14311), False, 'import pygame\n'), ((14534, 14580), 'pygame.freetype.Font', 'freetype.Font', (['"""C:\\\\Windows\\\\Fonts\\\\Arial.ttf"""'], {}), "('C:\\\\Windows\\\\Fonts\\\\Arial.ttf')\n", (14547, 14580), False, 'from pygame import freetype\n'), ((14624, 14643), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (14641, 14643), False, 'import pygame\n'), ((14701, 14722), 'pygame.sprite.Group', 'pygame.sprite.Group', ([], {}), '()\n', (14720, 14722), False, 'import pygame\n'), ((14734, 14763), 'pygame.sprite.RenderUpdates', 'pygame.sprite.RenderUpdates', ([], {}), '()\n', (14761, 14763), False, 'import pygame\n'), ((15123, 15163), 'Textures.FRAMEBORDER.blit', 'FRAMEBORDER.blit', (['FRAMESURFACE', '(10, 15)'], {}), '(FRAMESURFACE, (10, 15))\n', (15139, 15163), False, 'from Textures import VOICE_MODULATION, DIALOGBOX_READOUT, NAMIKO, FRAMEBORDER\n'), ((16527, 16574), 'pygame.transform.smoothscale', 'pygame.transform.smoothscale', (['cobra', '(100, 170)'], {}), '(cobra, (100, 170))\n', (16555, 16574), False, 'import pygame\n'), ((16734, 16782), 'pygame.transform.smoothscale', 'pygame.transform.smoothscale', (['DIALOG', '(400, 200)'], {}), '(DIALOG, (400, 200))\n', (16762, 16782), False, 'import pygame\n'), ((18065, 18078), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (18076, 18078), False, 'import pygame\n'), ((1640, 1672), 'pygame.Color', 'pygame.Color', (['(149)', '(119)', '(236)', '(245)'], {}), '(149, 119, 236, 245)\n', (1652, 1672), False, 'import pygame\n'), ((2775, 2807), 'pygame.Color', 'pygame.Color', (['(149)', '(119)', '(236)', '(245)'], {}), '(149, 119, 236, 245)\n', (2787, 2807), False, 'import pygame\n'), ((4159, 4211), 'pygame.sprite.Sprite.__init__', 'pygame.sprite.Sprite.__init__', (['self', 'self.containers'], {}), '(self, self.containers)\n', (4188, 4211), False, 'import pygame\n'), ((4985, 5076), 'pygame.transform.smoothscale', 'pygame.transform.smoothscale', (['self.scan_background_surface', '(60, self.max_height - 15)'], {}), '(self.scan_background_surface, (60, self.\n max_height - 15))\n', (5013, 5076), False, 'import pygame\n'), ((5711, 5744), 'numpy.linspace', 'linspace', (['(12)', '(0)', 'self.stop_moving'], {}), '(12, 0, self.stop_moving)\n', (5719, 5744), False, 'from numpy import linspace\n'), ((5913, 5935), 'numpy.linspace', 'linspace', (['(0)', '(255)', 'diff'], {}), '(0, 255, diff)\n', (5921, 5935), False, 'from numpy import linspace\n'), ((6057, 6079), 'numpy.linspace', 'linspace', (['(255)', '(0)', 'diff'], {}), '(255, 0, diff)\n', (6065, 6079), False, 'from numpy import linspace\n'), ((17304, 17322), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (17320, 17322), False, 'import pygame\n'), ((17954, 17975), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (17973, 17975), False, 'import pygame\n'), ((14188, 14231), 'pygame.image.load', 'pygame.image.load', (['"""Assets\\\\background.jpg"""'], {}), "('Assets\\\\background.jpg')\n", (14205, 14231), False, 'import pygame\n'), ((15765, 15815), 'pygame.image.load', 'pygame.image.load', (['"""Assets\\\\icon_glareFx_blue.png"""'], {}), "('Assets\\\\icon_glareFx_blue.png')\n", (15782, 15815), False, 'import pygame\n'), ((15854, 15904), 'pygame.image.load', 'pygame.image.load', (['"""Assets\\\\icon_glareFx_blue.png"""'], {}), "('Assets\\\\icon_glareFx_blue.png')\n", (15871, 15904), False, 'import pygame\n'), ((16333, 16365), 'pygame.Color', 'pygame.Color', (['(149)', '(119)', '(236)', '(245)'], {}), '(149, 119, 236, 245)\n', (16345, 16365), False, 'import pygame\n'), ((16410, 16448), 'pygame.image.load', 'pygame.image.load', (['"""Assets\\\\Cobra.png"""'], {}), "('Assets\\\\Cobra.png')\n", (16427, 16448), False, 'import pygame\n'), ((16811, 16860), 'pygame.image.load', 'pygame.image.load', (['"""Assets\\\\icon_glareFx_red.png"""'], {}), "('Assets\\\\icon_glareFx_red.png')\n", (16828, 16860), False, 'import pygame\n'), ((17166, 17197), 'pygame.Color', 'pygame.Color', (['(249)', '(254)', '(56)', '(245)'], {}), '(249, 254, 56, 245)\n', (17178, 17197), False, 'import pygame\n'), ((17366, 17390), 'pygame.key.get_pressed', 'pygame.key.get_pressed', ([], {}), '()\n', (17388, 17390), False, 'import pygame\n'), ((11954, 11969), 'random.randint', 'randint', (['(0)', '(100)'], {}), '(0, 100)\n', (11961, 11969), False, 'from random import randint\n'), ((14952, 14975), 'Textures.FRAMEBORDER.get_width', 'FRAMEBORDER.get_width', ([], {}), '()\n', (14973, 14975), False, 'from Textures import VOICE_MODULATION, DIALOGBOX_READOUT, NAMIKO, FRAMEBORDER\n'), ((14982, 15006), 'Textures.FRAMEBORDER.get_height', 'FRAMEBORDER.get_height', ([], {}), '()\n', (15004, 15006), False, 'from Textures import VOICE_MODULATION, DIALOGBOX_READOUT, NAMIKO, FRAMEBORDER\n')] |
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements strategies for creating arguments for functions that follow numpy's
`Generalized Universal Function API <https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_.
"""
from __future__ import absolute_import, division, print_function
import string
from collections import defaultdict
import numpy as np
import numpy.lib.function_base as npfb
from hypothesis.errors import InvalidArgument
from hypothesis.extra.numpy import arrays, order_check
from hypothesis.internal.validation import check_type, check_valid_bound
from hypothesis.strategies import SearchStrategy, builds, composite, fixed_dictionaries, integers, just, tuples
__all__ = ["gufunc_args", "gufunc_arg_shapes"]
# Should not ever need to broadcast beyond this, but should be able to set it
# as high as 32 before breaking assumptions in numpy.
GLOBAL_DIMS_MAX = 12
# Key used in min_side and max_side to indicate min/max on broadcasted dims,
# building sentinel class so we have clean __repr__.
class _BcastDimType(object):
def __repr__(self):
return "BCAST_DIM"
BCAST_DIM = _BcastDimType()
# Value used in default dict for max side if variable not specified
DEFAULT_MAX_SIDE = 5
# This uses "private" function of numpy, but it does the job. It throws a
# pretty readable exception for invalid input, we don't need to add anything.
parse_gufunc_signature = npfb._parse_gufunc_signature
def _weird_digits(ss):
"""In Python 3, some weird unicode characters pass `isdigit` but are not
0-9 characters. This function detects those cases.
"""
weird = set(cc for cc in ss if cc.isdigit() and (cc not in string.digits))
return weird
def _check_set_like(arg, name=""):
"""Validate input can be searched like a `set`."""
try:
0 in arg
except TypeError:
raise InvalidArgument("Expected set-like but got %s=%r (type=%s)" % (name, arg, type(arg).__name__))
def _check_valid_size_interval(min_size, max_size, name, floor=0):
"""Check valid for integers strategy and array shapes."""
# same checks as done in integers
check_valid_bound(min_size, name)
check_valid_bound(max_size, name)
order_check(name, floor, min_size, max_size) # ensure non-none & above 0
def _order_check_min_max(min_dict, max_dict):
"""Check min and max dict compatible with integers and array shapes."""
_check_valid_size_interval(min_dict.default_factory(), max_dict.default_factory(), "side default")
for kk in set(min_dict.keys()) | set(max_dict.keys()):
_check_valid_size_interval(min_dict[kk], max_dict[kk], "side %s" % kk)
def _int_or_dict(x, default_val):
"""Pre-process cases where argument `x` can be `int`, `dict`, or
`defaultdict`. In all cases, build a `defaultdict` and return it.
"""
# case 1: x already defaultdict, leave it be, pass thru
if isinstance(x, defaultdict):
return x
check_type(int, default_val, "default value")
try:
# case 2: x is or can be converted to dict
D = defaultdict(lambda: default_val, x)
except Exception:
# case 3: x is int => make a const dict
check_type(int, x, "constant value")
D = defaultdict(lambda: x)
# case 4: if can't be converted to dict or int, then exception raised
return D
@composite
def _tuple_of_arrays(draw, shapes, dtype, elements, unique=False):
"""Strategy to generate a tuple of ndarrays with specified shapes.
Parameters
----------
shapes : list of tuples of int
List of tuples where each tuple is the shape of an argument. A
`SearchStrategy` for list of tuples is also supported.
dtype : list-like of dtype
List of numpy `dtype` for each argument. These can be either strings
(``'int64'``), type (``np.int64``), or numpy `dtype`
(``np.dtype('int64')``). Built in Python types (`int`, `float`, etc)
also work. A single `dtype` can be supplied for all arguments.
elements : list-like of strategy
Strategies to fill in array elements on a per argument basis. One can
also specify a single strategy
(e.g., :func:`~hypothesis.strategies.floats`)
and have it applied to all arguments.
unique : list-like of bool
Boolean flag to specify if all elements in an array must be unique.
One can also specify a single boolean to apply it to all arguments.
Returns
-------
res : tuple of ndarrays
Resulting ndarrays with shape of `shapes` and elements from `elements`.
"""
if isinstance(shapes, SearchStrategy):
shapes = draw(shapes)
n = len(shapes)
# Need this since broadcast_to does not like vars of type type
if isinstance(dtype, type):
dtype = [dtype]
dtype = np.broadcast_to(dtype, (n,))
elements = np.broadcast_to(elements, (n,))
unique = np.broadcast_to(unique, (n,))
# This could somewhat easily be done using builds and avoid need for
# composite if shape is always given and not strategy. Otherwise, we need
# to chain strategies and probably not worth the effort.
res = tuple(draw(arrays(dd, ss, elements=ee, unique=uu)) for dd, ss, ee, uu in zip(dtype, shapes, elements, unique))
return res
def _signature_map(map_dict, parsed_sig):
"""Map values found in parsed gufunc signature.
Parameters
----------
map_dict : dict of str to int
Mapping from `str` dimension names to `int`. All strings in
`parsed_sig` must have entries in `map_dict`.
parsed_sig : list-like of tuples of str
gufunc signature that has already been parsed, e.g., using
`parse_gufunc_signature`.
Returns
-------
shapes : list of tuples of int
list of tuples where each tuple is the shape of an argument.
"""
shapes = [tuple(map_dict[k] for k in arg) for arg in parsed_sig]
return shapes
def _gufunc_arg_shapes(parsed_sig, min_side, max_side):
"""Strategy to generate array shapes for arguments to a function consistent
with its signature.
Parameters
----------
parsed_sig : list-like of tuples of str
gufunc signature that has already been parsed, e.g., using
`parse_gufunc_signature`.
min_side : defaultdict of str to int
Minimum size of any of the dimensions in `parsed_sig`.
max_side : defaultdict of str to int
Maximum size of any of the dimensions in `parsed_sig`.
Returns
-------
shapes : list of tuples of int
list of tuples where each tuple is the shape of an argument.
"""
assert min_side.default_factory() <= max_side.default_factory()
min_max_ok = all(min_side[kk] <= max_side[kk] for kk in set(min_side.keys()) | set(max_side.keys()))
assert min_max_ok
# Get all dimension names in signature, including numeric constants
all_dimensions = set([k for arg in parsed_sig for k in arg])
# Assume we have already checked for weird unicode characters that mess up
# isdigit in validation of signature.
dim_map_st = {
k: (just(int(k)) if k.isdigit() else integers(min_value=min_side[k], max_value=max_side[k]))
for k in all_dimensions
}
# Build strategy that draws ints for dimensions and subs them in
return builds(_signature_map, map_dict=fixed_dictionaries(dim_map_st), parsed_sig=just(parsed_sig))
def _append_bcast_dims(core_dims, b_dims, set_to_1, n_extra_per_arg):
"""Add extra broadcast dimensions to core dimensions of array shapes.
Parameters
----------
core_dims : list of tuples of int
list of tuples where each tuple is the core shape of an argument. It
has length `n_args`.
b_dims : ndarray of shape (max_dims_extra,)
Must be of `int` dtype and >= 0. Extra dimensions to pre-pend for
roadcasting.
set_to_1 : ndarray of shape (n_args, max_dims_extra)
Must be of `bool` dtype. Which extra dimensions get set to 1 for
broadcasting.
n_extra_per_arg : like-like of shape (n_args,)
Elements must be of int type. Must be in [0, max_dims_extra], how many
extra dimensions to pre-pend to each argument.
Returns
-------
shapes : list of tuples of int
list of tuples where each tuple is the shape of an argument. Extra
dimensions for broadcasting will be present in the shapes. It has
length `n_args`.
"""
# Build 2D array with extra dimensions
# e.g., extra_dims = [[2 5], [2 5]]
extra_dims = np.tile(b_dims, (len(core_dims), 1))
# e.g., extra_dims = [[1 5], [2 5]]
extra_dims[set_to_1] = 1 # This may be outside [min_side, max_side]
# Get full dimensions (core+extra), will chop some on left randomly
# e.g., shapes = [(5, 1, 3), (2, 5, 3, 1)]
# We use pp[len(pp) - nn:] instead of pp[-nn:] since that doesn't handle
# corner case with nn=0 correctly (seems like an oversight of py slicing).
# Call tolist() before tuple to ensure native int type.
shapes = [tuple(pp[len(pp) - nn :].tolist()) + ss for ss, pp, nn in zip(core_dims, extra_dims, n_extra_per_arg)]
return shapes
def gufunc_arg_shapes(signature, excluded=(), min_side=0, max_side=5, max_dims_extra=0):
"""Strategy to generate the shape of ndarrays for arguments to a function
consistent with its signature with extra dimensions to test broadcasting.
Parameters
----------
signature : str
Signature for shapes to be compatible with. Expects string in format
of numpy generalized universal function signature, e.g.,
`'(m,n),(n)->(m)'` for vectorized matrix-vector multiplication.
excluded : set(int)
Set-like of integers representing the positional for which the function
will not be vectorized. Uses same format as :obj:`numpy.vectorize`.
min_side : int or dict
Minimum size of any side of the arrays. It is good to test the corner
cases of 0 or 1 sized dimensions when applicable, but if not, a min
size can be supplied here. Minimums can be provided on a per-dimension
basis using a dict, e.g. ``min_side={'n': 2}``. One can use, e.g.,
``min_side={hypothesis_gufunc.gufunc.BCAST_DIM: 2}`` to limit the size
of the broadcasted dimensions.
max_side : int or dict
Maximum size of any side of the arrays. This can usually be kept small
and still find most corner cases in testing. Dictionaries can be
supplied as with `min_side`.
max_dims_extra : int
Maximum number of extra dimensions that can be appended on left of
arrays for broadcasting. This should be kept small as the memory used
grows exponentially with extra dimensions. By default, no extra
dimensions are added.
Returns
-------
shapes : list(tuple(int))
list of tuples where each tuple is the shape of an argument. Extra
dimensions for broadcasting will be present in the shapes.
Examples
--------
.. code-block:: pycon
>>> from hypothesis_gufunc.gufunc import BCAST_DIM
>>> gufunc_arg_shapes('(m,n),(n)->(m)',
min_side={'m': 1, 'n': 2}, max_side=3).example()
[(2, 3), (3,)]
>>> gufunc_arg_shapes('(m,n),(n)->(m)', max_side=9,
min_side={'m': 1, 'n': 2, BCAST_DIM: 5},
max_dims_extra=3).example()
[(6, 6, 7), (6, 7)]
>>> gufunc_arg_shapes('(m,n),(n)->(m)', excluded=(0,),
max_side=20, max_dims_extra=3).example()
[(11, 13), (1, 1, 1, 13)]
"""
_check_set_like(excluded, name="excluded")
min_side = _int_or_dict(min_side, 0)
max_side = _int_or_dict(max_side, DEFAULT_MAX_SIDE)
_order_check_min_max(min_side, max_side)
check_type(int, max_dims_extra, "extra dims")
order_check("extra dims", 0, max_dims_extra, GLOBAL_DIMS_MAX)
# Validate that the signature contains digits we can parse
weird_sig_digits = _weird_digits(signature)
if len(weird_sig_digits) > 0:
raise InvalidArgument("signature %s contains invalid digits: %s" % (signature, "".join(weird_sig_digits)))
# Parse out the signature: e.g., parses to [('n', 'm'), ('m', 'p')]
parsed_sig, _ = parse_gufunc_signature(signature)
# Get core shapes before broadcasted dimensions
shapes_st = _gufunc_arg_shapes(parsed_sig, min_side=min_side, max_side=max_side)
# Skip this broadcasting craziness if we don't want extra dims:
if max_dims_extra == 0:
return shapes_st
# We could use tuples instead without creating type ambiguity since
# max_dims_extra > 0 and avoid calling arrays, but prob ok like this.
bcast_dim_st = integers(min_value=min_side[BCAST_DIM], max_value=max_side[BCAST_DIM])
extra_dims_st = arrays(np.intp, (max_dims_extra,), elements=bcast_dim_st)
set_to_1_st = arrays(np.bool_, (len(parsed_sig), max_dims_extra))
# np.clip will convert to np int but we don't really care.
max_extra_per_arg = [
0 if nn in excluded else np.clip(GLOBAL_DIMS_MAX - len(ss), 0, max_dims_extra)
for nn, ss in enumerate(parsed_sig)
]
extra_per_arg_st = tuples(*[integers(min_value=0, max_value=mm) for mm in max_extra_per_arg])
return builds(_append_bcast_dims, shapes_st, extra_dims_st, set_to_1_st, extra_per_arg_st)
def gufunc_args(signature, dtype, elements, unique=False, excluded=(), min_side=0, max_side=5, max_dims_extra=0):
"""Strategy to generate a tuple of ndarrays for arguments to a function
consistent with its signature with extra dimensions to test broadcasting.
Parameters
----------
signature : str
Signature for shapes to be compatible with. Expects string in format
of numpy generalized universal function signature, e.g.,
`'(m,n),(n)->(m)'` for vectorized matrix-vector multiplication.
dtype : list(:class:`numpy:numpy.dtype`)
List of numpy `dtype` for each argument. These can be either strings
(``'int64'``), type (``np.int64``), or numpy `dtype`
(``np.dtype('int64')``). Built in Python types (`int`, `float`, etc)
also work. A single `dtype` can be supplied for all arguments.
elements : list
List of strategies to fill in array elements on a per argument basis.
One can also specify a single strategy
(e.g., :func:`~hypothesis.strategies.floats`)
and have it applied to all arguments.
unique : list(bool)
Boolean flag to specify if all elements in an array must be unique.
One can also specify a single boolean to apply it to all arguments.
excluded : set(int)
Set of integers representing the positional for which the function will
not be vectorized. Uses same format as :obj:`numpy.vectorize`.
min_side : int or dict
Minimum size of any side of the arrays. It is good to test the corner
cases of 0 or 1 sized dimensions when applicable, but if not, a min
size can be supplied here. Minimums can be provided on a per-dimension
basis using a dict, e.g. ``min_side={'n': 2}``. One can use, e.g.,
``min_side={hypothesis_gufunc.gufunc.BCAST_DIM: 2}`` to limit the size
of the broadcasted dimensions.
max_side : int or dict
Maximum size of any side of the arrays. This can usually be kept small
and still find most corner cases in testing. Dictionaries can be
supplied as with `min_side`.
max_dims_extra : int
Maximum number of extra dimensions that can be appended on left of
arrays for broadcasting. This should be kept small as the memory used
grows exponentially with extra dimensions. By default, no extra
dimensions are added.
Returns
-------
res : tuple(:class:`numpy:numpy.ndarray`)
Resulting ndarrays with shapes consistent with `signature` and elements
from `elements`. Extra dimensions for broadcasting will be present.
Examples
--------
.. code-block:: pycon
>>> from hypothesis_gufunc.gufunc import BCAST_DIM
>>> from hypothesis.strategies import integers, booleans
>>> gufunc_args('(m,n),(n)->(m)',
dtype=np.int_, elements=integers(0, 9), max_side=3,
min_side={'m': 1, 'n': 2, BCAST_DIM: 3}).example()
(array([[9, 8, 1],
[1, 7, 1]]), array([5, 6, 5]))
>>> gufunc_args('(m,n),(n)->(m)', dtype=['bool', 'int32'],
elements=[booleans(), integers(0, 100)],
unique=[False, True], max_dims_extra=3).example()
(array([[[[[ True, True, True, True, True],
[False, True, True, True, False]]]]], dtype=bool),
array([67, 43, 0, 34, 66], dtype=int32))
"""
shape_st = gufunc_arg_shapes(
signature, excluded=excluded, min_side=min_side, max_side=max_side, max_dims_extra=max_dims_extra
)
return _tuple_of_arrays(shape_st, dtype=dtype, elements=elements, unique=unique)
| [
"hypothesis.extra.numpy.order_check",
"hypothesis.strategies.builds",
"hypothesis.strategies.fixed_dictionaries",
"hypothesis.strategies.integers",
"hypothesis.extra.numpy.arrays",
"hypothesis.strategies.just",
"collections.defaultdict",
"hypothesis.internal.validation.check_valid_bound",
"numpy.bro... | [((2688, 2721), 'hypothesis.internal.validation.check_valid_bound', 'check_valid_bound', (['min_size', 'name'], {}), '(min_size, name)\n', (2705, 2721), False, 'from hypothesis.internal.validation import check_type, check_valid_bound\n'), ((2726, 2759), 'hypothesis.internal.validation.check_valid_bound', 'check_valid_bound', (['max_size', 'name'], {}), '(max_size, name)\n', (2743, 2759), False, 'from hypothesis.internal.validation import check_type, check_valid_bound\n'), ((2764, 2808), 'hypothesis.extra.numpy.order_check', 'order_check', (['name', 'floor', 'min_size', 'max_size'], {}), '(name, floor, min_size, max_size)\n', (2775, 2808), False, 'from hypothesis.extra.numpy import arrays, order_check\n'), ((3503, 3548), 'hypothesis.internal.validation.check_type', 'check_type', (['int', 'default_val', '"""default value"""'], {}), "(int, default_val, 'default value')\n", (3513, 3548), False, 'from hypothesis.internal.validation import check_type, check_valid_bound\n'), ((5370, 5398), 'numpy.broadcast_to', 'np.broadcast_to', (['dtype', '(n,)'], {}), '(dtype, (n,))\n', (5385, 5398), True, 'import numpy as np\n'), ((5415, 5446), 'numpy.broadcast_to', 'np.broadcast_to', (['elements', '(n,)'], {}), '(elements, (n,))\n', (5430, 5446), True, 'import numpy as np\n'), ((5460, 5489), 'numpy.broadcast_to', 'np.broadcast_to', (['unique', '(n,)'], {}), '(unique, (n,))\n', (5475, 5489), True, 'import numpy as np\n'), ((12394, 12439), 'hypothesis.internal.validation.check_type', 'check_type', (['int', 'max_dims_extra', '"""extra dims"""'], {}), "(int, max_dims_extra, 'extra dims')\n", (12404, 12439), False, 'from hypothesis.internal.validation import check_type, check_valid_bound\n'), ((12444, 12505), 'hypothesis.extra.numpy.order_check', 'order_check', (['"""extra dims"""', '(0)', 'max_dims_extra', 'GLOBAL_DIMS_MAX'], {}), "('extra dims', 0, max_dims_extra, GLOBAL_DIMS_MAX)\n", (12455, 12505), False, 'from hypothesis.extra.numpy import arrays, order_check\n'), ((13320, 13390), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': 'min_side[BCAST_DIM]', 'max_value': 'max_side[BCAST_DIM]'}), '(min_value=min_side[BCAST_DIM], max_value=max_side[BCAST_DIM])\n', (13328, 13390), False, 'from hypothesis.strategies import SearchStrategy, builds, composite, fixed_dictionaries, integers, just, tuples\n'), ((13411, 13468), 'hypothesis.extra.numpy.arrays', 'arrays', (['np.intp', '(max_dims_extra,)'], {'elements': 'bcast_dim_st'}), '(np.intp, (max_dims_extra,), elements=bcast_dim_st)\n', (13417, 13468), False, 'from hypothesis.extra.numpy import arrays, order_check\n'), ((13877, 13964), 'hypothesis.strategies.builds', 'builds', (['_append_bcast_dims', 'shapes_st', 'extra_dims_st', 'set_to_1_st', 'extra_per_arg_st'], {}), '(_append_bcast_dims, shapes_st, extra_dims_st, set_to_1_st,\n extra_per_arg_st)\n', (13883, 13964), False, 'from hypothesis.strategies import SearchStrategy, builds, composite, fixed_dictionaries, integers, just, tuples\n'), ((3621, 3657), 'collections.defaultdict', 'defaultdict', (['(lambda : default_val)', 'x'], {}), '(lambda : default_val, x)\n', (3632, 3657), False, 'from collections import defaultdict\n'), ((3735, 3771), 'hypothesis.internal.validation.check_type', 'check_type', (['int', 'x', '"""constant value"""'], {}), "(int, x, 'constant value')\n", (3745, 3771), False, 'from hypothesis.internal.validation import check_type, check_valid_bound\n'), ((3784, 3807), 'collections.defaultdict', 'defaultdict', (['(lambda : x)'], {}), '(lambda : x)\n', (3795, 3807), False, 'from collections import defaultdict\n'), ((7695, 7749), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': 'min_side[k]', 'max_value': 'max_side[k]'}), '(min_value=min_side[k], max_value=max_side[k])\n', (7703, 7749), False, 'from hypothesis.strategies import SearchStrategy, builds, composite, fixed_dictionaries, integers, just, tuples\n'), ((7902, 7932), 'hypothesis.strategies.fixed_dictionaries', 'fixed_dictionaries', (['dim_map_st'], {}), '(dim_map_st)\n', (7920, 7932), False, 'from hypothesis.strategies import SearchStrategy, builds, composite, fixed_dictionaries, integers, just, tuples\n'), ((7945, 7961), 'hypothesis.strategies.just', 'just', (['parsed_sig'], {}), '(parsed_sig)\n', (7949, 7961), False, 'from hypothesis.strategies import SearchStrategy, builds, composite, fixed_dictionaries, integers, just, tuples\n'), ((5724, 5762), 'hypothesis.extra.numpy.arrays', 'arrays', (['dd', 'ss'], {'elements': 'ee', 'unique': 'uu'}), '(dd, ss, elements=ee, unique=uu)\n', (5730, 5762), False, 'from hypothesis.extra.numpy import arrays, order_check\n'), ((13799, 13834), 'hypothesis.strategies.integers', 'integers', ([], {'min_value': '(0)', 'max_value': 'mm'}), '(min_value=0, max_value=mm)\n', (13807, 13834), False, 'from hypothesis.strategies import SearchStrategy, builds, composite, fixed_dictionaries, integers, just, tuples\n')] |
#!/usr/bin/python
#
# Author: <NAME> (<EMAIL>)
# Date: 19th May 2016
# Fusion Tables API:
# https://developers.google.com/resources/api-libraries/documentation/fusiontables/v2/python/latest/index.html
"""
Includes functions to integrate with Google Fusion Tables. The results and implementation is based on the API
provided by the Google Fusion Tables API:
https://developers.google.com/resources/api-libraries/documentation/fusiontables/v2/python/latest/index.html
"""
import csv
import os
import threading
import traceback
import logging
import numpy as np
from area import area
from lxml import etree
from googleapiclient.http import MediaIoBaseUpload
from geojson import FeatureCollection
from pykml.factory import KML_ElementMaker as KML
from colorker.security import CredentialManager
from colorker.settings import STORAGE
logger = logging.getLogger('worker')
def create_table(name, description, columns, data=None, share_with=None, admin=None, user_settings=None):
"""
Creates a fusion table for the given data and returns the table id.
:param str name: Name of the fusion table to create
:param str description: Description of the table to be created
:param columns: List of dictionaries having properties name and type
:type columns: list(dict)
:param data: List of dictionaries (optional)
:type data: list(dict)
:param share_with: Single email addreess string or a List of user email addresses (gmail only)
to share the created fusion table
:type share_with: str or list(str)
:param str admin: email address of the administrator who should have edit access to the created fusion table
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:rtype: str
:return: the table id of the created fusion table
"""
ft_service = CredentialManager.get_fusion_tables_service(user_settings)
drive_service = CredentialManager.get_drive_service(user_settings)
# converting column type to fusion table supported type
for column in columns:
column["type"] = str(column["type"]).upper()
column["type"] = "NUMBER" if column["type"] in ["INTEGER", "FLOAT", "NUMBER"] \
else "DATETIME" if column["type"] in ["TIMESTAMP", "DATETIME", "DATE"] \
else "LOCATION" if column["type"] == "LOCATION" \
else "STRING"
body = dict(name=name, description=description, attribution="Created by Columbus Workflow Engine",
attributionLink="http://www.columbus.cs.colostate.edu", columns=columns, isExportable=True)
table = ft_service.table()
result = table.insert(body=body).execute(num_retries=3)
table_id = result["tableId"]
logger.info("table created with id - " + table_id)
permissions = drive_service.permissions()
# give write access to the admin for all the created fusion tables
if admin is not None:
permissions.create(fileId=table_id, body={"emailAddress": admin, "type": "user", "role": "writer"},
sendNotificationEmail=False).execute(num_retries=3)
permissions.create(fileId=table_id,
body={"type": "anyone", "role": "reader", "allowFileDiscovery": False}).execute(num_retries=3)
if share_with is not None:
if isinstance(share_with, list):
for user_email in share_with:
if user_email.endswith("gmail.com"):
logger.info("setting drive permissions for user - " + user_email)
permissions.create(fileId=table_id,
body={"emailAddress": user_email, "type": "user", "role": "reader"},
sendNotificationEmail=False).execute(num_retries=3)
if isinstance(share_with, str) and share_with.endswith("gmail.com"):
logger.info("setting drive permissions for user - " + share_with)
permissions.create(fileId=table_id,
body={"emailAddress": share_with, "type": "user", "role": "reader"},
sendNotificationEmail=False).execute(num_retries=3)
if data is not None:
keys = [column["name"] for column in columns]
if user_settings is None:
user_settings = threading.current_thread().settings
temp_dir_path = user_settings.get(STORAGE.TEMPORARY.LOCAL, None)
if not os.path.exists(temp_dir_path):
os.makedirs(temp_dir_path)
filename = temp_dir_path + str(table_id) + ".csv"
with open(filename, 'wb') as upload_file:
dict_writer = csv.DictWriter(upload_file, keys)
dict_writer.writeheader()
dict_writer.writerows(data)
logger.info("created temporary file for upload. making call to import rows.")
upload_fd = open(filename, 'rb')
media_body = MediaIoBaseUpload(fd=upload_fd, mimetype="application/octet-stream")
result = table.importRows(tableId=table_id, media_body=media_body, startLine=1, isStrict=True,
encoding="UTF-8", delimiter=",").execute(num_retries=3)
logger.info("imported - " + str(result["numRowsReceived"]) + " rows")
return table_id
def create_ft_from_ftc(name, description, ftc, parties=None, admin=None, user_settings=None):
if isinstance(ftc, FeatureCollection) and ftc.get("columns", None) and isinstance(ftc["columns"], dict):
fields = sorted(ftc["columns"].keys())
columns = [{"name": str(field), "type": str(ftc["columns"][field])} for field in fields]
columns.append(
{"name": "x__geometry__x", "type": "LOCATION"}) # special property to access fusion table from maps API
data = []
for feature in ftc["features"]:
if feature["type"] == "Feature":
ft_prop = feature["properties"]
if feature["geometry"]["type"] == "Point":
point = feature["geometry"]["coordinates"]
location = KML.Point(KML.coordinates(str(point[0]) + "," + str(point[1])))
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "MultiPoint":
multipoint = feature["geometry"]["coordinates"]
geometries = [KML.Point(KML.coordinates(str(point[0]) + "," + str(point[1]))) for point in
multipoint]
location = KML.MultiGeometry()
for geometry in geometries:
location.append(geometry)
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "Polygon":
polygon = feature["geometry"]["coordinates"]
location = KML.Polygon()
for index in range(len(polygon)):
if index == 0:
location.append(KML.outerBoundaryIs(KML.LinearRing(KML.coordinates(
" ".join([str(point[0]) + "," + str(point[1]) for point in polygon[index]])))))
else:
location.append(KML.innerBoundaryIs(KML.LinearRing(KML.coordinates(
" ".join([str(point[0]) + "," + str(point[1]) for point in polygon[index]])))))
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "MultiPolygon":
multipolygon = feature["geometry"]["coordinates"]
location = KML.MultiGeometry()
for polygon in multipolygon:
kml = KML.Polygon()
for index in range(len(polygon)):
if index == 0:
kml.append(KML.outerBoundaryIs(KML.LinearRing(KML.coordinates(
" ".join([str(point[0]) + "," + str(point[1]) for point in polygon[index]])))))
else:
kml.append(KML.innerBoundaryIs(KML.LinearRing(KML.coordinates(
" ".join([str(point[0]) + "," + str(point[1]) for point in polygon[index]])))))
location.append(kml)
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "LineString":
linestring = feature["geometry"]["coordinates"]
location = KML.LineString(
KML.coordinates(" ".join([str(point[0]) + "," + str(point[1]) for point in linestring])))
ft_prop["x__geometry__x"] = etree.tostring(location)
elif feature["geometry"]["type"] == "MultiLineString":
multilinestring = feature["geometry"]["coordinates"]
location = KML.MultiGeometry()
for linestring in multilinestring:
location.append(KML.LineString(
KML.coordinates(" ".join([str(point[0]) + "," + str(point[1]) for point in linestring]))))
ft_prop["x__geometry__x"] = etree.tostring(location)
str_prop = {}
for key in ft_prop.keys():
str_prop[str(key) if isinstance(key, unicode) else key] = str(ft_prop[key]) if isinstance(
ft_prop[key], unicode) else ft_prop[key]
data.append(str_prop)
return create_table(name=name, description=description, columns=columns, data=data, share_with=parties,
admin=admin, user_settings=user_settings)
return None
def delete_table(table_id, user_settings=None):
"""
Deletes a fusion table
:param str table_id: identifier of the fusion table
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:raises BaseException: Any exception resulting from this operation
"""
try:
ft_keys = str(table_id).split(',')
for key in ft_keys:
ft_service = CredentialManager.get_fusion_tables_service(user_settings)
table = ft_service.table()
table.delete(tableId=key).execute(num_retries=3)
except BaseException as e:
logger.error(traceback.format_exc())
raise e
def read_table(table_id, user_settings=None):
"""
Reads a fusion table and returns its contants as a list of dictionaries
:param str table_id: identifier of the fusion table
:param dict user_settings: optional, A dictionary of settings specifying credentials for appropriate services.
If one is not provided, then this method must be invoked by an EngineThread
which defines the settings
:raises BaseException: Any exception resulting from this operation
"""
try:
ft_service = CredentialManager.get_fusion_tables_service(user_settings)
query = ft_service.query()
table = query.sql(sql='SELECT * FROM ' + str(table_id), hdrs=False).execute(num_retries=3)
result_rows = []
columns = [str(column) for column in table['columns']]
rows = table['rows']
for row in rows:
result_row = {}
for index, cell in enumerate(row):
result_row[columns[index]] = str(cell) if isinstance(cell, unicode) else cell
result_rows.append(result_row)
return result_rows
except BaseException as e:
logger.error(traceback.format_exc())
raise e
def get_polygons_from_ft(table_id, name_attr, geometry_attr, user_settings=None):
# finds only the first polygon with outer boundary
rows = read_table(table_id=table_id, user_settings=user_settings)
polygons = []
for row in rows:
polygon = dict(name=row[name_attr], geometry=[])
max_polygon = []
feature = row[geometry_attr]
if 'type' not in feature:
feature = feature['geometry']
if feature["type"] == "Polygon":
outer_boundary = feature["coordinates"][0]
for vertex in outer_boundary:
polygon['geometry'].append(dict(lon=vertex[0], lat=vertex[1]))
elif feature["type"] == "MultiPolygon":
for boundary in feature["coordinates"]:
max_polygon.append(area({"type": "Polygon", "coordinates": boundary}))
index = np.argmax(np.array(max_polygon))
for vertex in feature["coordinates"][index][0]:
polygon['geometry'].append(dict(lon=vertex[0], lat=vertex[1]))
elif feature["type"] == "GeometryCollection":
geometries = feature['geometries']
for geometry in geometries:
if geometry["type"] in ["Polygon", "MultiPolygon"]:
max_polygon.append(area(geometry))
else:
max_polygon.append(0)
index = np.argmax(np.array(max_polygon))
max_polygon = []
feature = geometries[index]
if feature["type"] == "Polygon":
outer_boundary = feature["coordinates"][0]
for vertex in outer_boundary:
polygon['geometry'].append(dict(lon=vertex[0], lat=vertex[1]))
elif feature["type"] == "MultiPolygon":
for boundary in feature["coordinates"]:
max_polygon.append(area({"type": "Polygon", "coordinates": boundary}))
index = np.argmax(np.array(max_polygon))
for vertex in feature["coordinates"][index][0]:
polygon['geometry'].append(dict(lon=vertex[0], lat=vertex[1]))
if len(polygon['geometry']) > 0:
polygons.append(polygon)
return polygons
| [
"logging.getLogger",
"os.path.exists",
"csv.DictWriter",
"traceback.format_exc",
"pykml.factory.KML_ElementMaker.MultiGeometry",
"threading.current_thread",
"os.makedirs",
"pykml.factory.KML_ElementMaker.Polygon",
"colorker.security.CredentialManager.get_fusion_tables_service",
"googleapiclient.ht... | [((844, 871), 'logging.getLogger', 'logging.getLogger', (['"""worker"""'], {}), "('worker')\n", (861, 871), False, 'import logging\n'), ((2041, 2099), 'colorker.security.CredentialManager.get_fusion_tables_service', 'CredentialManager.get_fusion_tables_service', (['user_settings'], {}), '(user_settings)\n', (2084, 2099), False, 'from colorker.security import CredentialManager\n'), ((2120, 2170), 'colorker.security.CredentialManager.get_drive_service', 'CredentialManager.get_drive_service', (['user_settings'], {}), '(user_settings)\n', (2155, 2170), False, 'from colorker.security import CredentialManager\n'), ((5075, 5143), 'googleapiclient.http.MediaIoBaseUpload', 'MediaIoBaseUpload', ([], {'fd': 'upload_fd', 'mimetype': '"""application/octet-stream"""'}), "(fd=upload_fd, mimetype='application/octet-stream')\n", (5092, 5143), False, 'from googleapiclient.http import MediaIoBaseUpload\n'), ((11397, 11455), 'colorker.security.CredentialManager.get_fusion_tables_service', 'CredentialManager.get_fusion_tables_service', (['user_settings'], {}), '(user_settings)\n', (11440, 11455), False, 'from colorker.security import CredentialManager\n'), ((4611, 4640), 'os.path.exists', 'os.path.exists', (['temp_dir_path'], {}), '(temp_dir_path)\n', (4625, 4640), False, 'import os\n'), ((4654, 4680), 'os.makedirs', 'os.makedirs', (['temp_dir_path'], {}), '(temp_dir_path)\n', (4665, 4680), False, 'import os\n'), ((4815, 4848), 'csv.DictWriter', 'csv.DictWriter', (['upload_file', 'keys'], {}), '(upload_file, keys)\n', (4829, 4848), False, 'import csv\n'), ((10573, 10631), 'colorker.security.CredentialManager.get_fusion_tables_service', 'CredentialManager.get_fusion_tables_service', (['user_settings'], {}), '(user_settings)\n', (10616, 10631), False, 'from colorker.security import CredentialManager\n'), ((4487, 4513), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (4511, 4513), False, 'import threading\n'), ((10784, 10806), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10804, 10806), False, 'import traceback\n'), ((12023, 12045), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (12043, 12045), False, 'import traceback\n'), ((6341, 6365), 'lxml.etree.tostring', 'etree.tostring', (['location'], {}), '(location)\n', (6355, 6365), False, 'from lxml import etree\n'), ((12940, 12961), 'numpy.array', 'np.array', (['max_polygon'], {}), '(max_polygon)\n', (12948, 12961), True, 'import numpy as np\n'), ((6688, 6707), 'pykml.factory.KML_ElementMaker.MultiGeometry', 'KML.MultiGeometry', ([], {}), '()\n', (6705, 6707), True, 'from pykml.factory import KML_ElementMaker as KML\n'), ((6854, 6878), 'lxml.etree.tostring', 'etree.tostring', (['location'], {}), '(location)\n', (6868, 6878), False, 'from lxml import etree\n'), ((12858, 12908), 'area.area', 'area', (["{'type': 'Polygon', 'coordinates': boundary}"], {}), "({'type': 'Polygon', 'coordinates': boundary})\n", (12862, 12908), False, 'from area import area\n'), ((13460, 13481), 'numpy.array', 'np.array', (['max_polygon'], {}), '(max_polygon)\n', (13468, 13481), True, 'import numpy as np\n'), ((7038, 7051), 'pykml.factory.KML_ElementMaker.Polygon', 'KML.Polygon', ([], {}), '()\n', (7049, 7051), True, 'from pykml.factory import KML_ElementMaker as KML\n'), ((7639, 7663), 'lxml.etree.tostring', 'etree.tostring', (['location'], {}), '(location)\n', (7653, 7663), False, 'from lxml import etree\n'), ((7833, 7852), 'pykml.factory.KML_ElementMaker.MultiGeometry', 'KML.MultiGeometry', ([], {}), '()\n', (7850, 7852), True, 'from pykml.factory import KML_ElementMaker as KML\n'), ((8596, 8620), 'lxml.etree.tostring', 'etree.tostring', (['location'], {}), '(location)\n', (8610, 8620), False, 'from lxml import etree\n'), ((13350, 13364), 'area.area', 'area', (['geometry'], {}), '(geometry)\n', (13354, 13364), False, 'from area import area\n'), ((14018, 14039), 'numpy.array', 'np.array', (['max_polygon'], {}), '(max_polygon)\n', (14026, 14039), True, 'import numpy as np\n'), ((7932, 7945), 'pykml.factory.KML_ElementMaker.Polygon', 'KML.Polygon', ([], {}), '()\n', (7943, 7945), True, 'from pykml.factory import KML_ElementMaker as KML\n'), ((8964, 8988), 'lxml.etree.tostring', 'etree.tostring', (['location'], {}), '(location)\n', (8978, 8988), False, 'from lxml import etree\n'), ((13932, 13982), 'area.area', 'area', (["{'type': 'Polygon', 'coordinates': boundary}"], {}), "({'type': 'Polygon', 'coordinates': boundary})\n", (13936, 13982), False, 'from area import area\n'), ((9164, 9183), 'pykml.factory.KML_ElementMaker.MultiGeometry', 'KML.MultiGeometry', ([], {}), '()\n', (9181, 9183), True, 'from pykml.factory import KML_ElementMaker as KML\n'), ((9462, 9486), 'lxml.etree.tostring', 'etree.tostring', (['location'], {}), '(location)\n', (9476, 9486), False, 'from lxml import etree\n')] |
# general libraries
import warnings
import numpy as np
# image processing libraries
from scipy import ndimage
from skimage.transform import radon
from skimage.measure.fit import _dynamic_max_trials
from ..generic.test_tools import \
construct_phase_plane, cross_spectrum_to_coordinate_list
from ..generic.data_tools import gradient_descent, secant
from ..preprocessing.shadow_transforms import pca
from .matching_tools_frequency_filters import \
raised_cosine, thresh_masking, normalize_power_spectrum, \
make_fourier_grid
from .matching_tools_frequency_metrics import local_coherence
def phase_jac(Q, m, W=np.array([]),
F1=np.array([]), F2=np.array([]), rank=2): # wip
"""
Parameters
----------
Q : numpy.array, size=(_,_), dtype=complex
cross spectrum
m : numpy.array, size=(2,1), dtype=float
displacement estimate, in pixel coordinate system
W : numpy.array, size=(m,n), dtype=float | boolean
weigthing matrix, in a range of 0...1
F1 : np,array, size=(m,n), dtype=integer
coordinate of the first axis from the Fourier spectrum.
F2 : np,array, size=(m,n), dtype=integer
coordinate of the second axis from the Fourier spectrum
rank : TYPE, optional
DESCRIPTION. The default is 2.
Returns
-------
dQdm : numpy.array, size=(m,n)
Jacobian of phase estimate
"""
# metric system: Fourier-based flip
# y +------><------+
# ^ | |
# | | |
# | v v
# <------+-------> x
# | ^ ^
# | | |
# v +------><------+
#
# indexing | indexing ^ y
# system 'ij'| system 'xy' |
# | |
# | i | x
# --------+--------> --------+-------->
# | |
# | |
# | j |
# v |
assert type(Q)==np.ndarray, ("please provide an array")
if Q.shape[0]==Q.shape[1]: # if Q is a cross-spectral matrix
if W.size==0: # if W is not given
W = np.ones((Q.shape[0], Q.shape[1]), dtype=float)
if F1.size==0:
F1,F2 = make_fourier_grid(Q, indexing='ij')
else: # list format
F1,F2 = Q[:,0], Q[:,1]
Q = Q[:,-1]
if W.size==0:
W = np.ones_like(Q, dtype=float)
if rank==2: # default
dXY = 1 - np.multiply(np.real(Q), +np.cos(F1*m[0]+F2*m[1])) \
- np.multiply(np.imag(Q), -np.sin(F1*m[0]+F2*m[1]))
else:
C_hat = construct_phase_plane(Q, m[0], m[1], indexing='ij')
QC = Q-C_hat # convert complex vector difference to metric
dXY = np.abs(np.multiply(W, QC)**rank)
dQdm = np.array([np.multiply(2*W.flatten()*F1.flatten(),dXY.flatten()), \
np.multiply(2*W.flatten()*F2.flatten(),dXY.flatten())]).T
return dQdm
def phase_secant(data, W=np.array([]), x_0=np.zeros((2))): # wip
"""get phase plane of cross-spectrum through secant
find slope of the phase plane through secant method (or Newton's method)
in multiple dimensions it is known as the Broyden's method.
Parameters
----------
data : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
or numpy.array, size=(m*n,3), dtype=complex
coordinate list with complex cross-sprectum at last collumn
W : numpy.array, size=(m,n), dtype=boolean
index of data that is correct
or numpy.array, size=(m*n,1), dtype=boolean
list with classification of correct data
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_gradient_descend
References
----------
.. [1] <NAME>. "A class of methods for solving nonlinear simultaneous
equations" Mathematics and computation. vol.19(92) pp.577--593, 1965.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_secant(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(data)==np.ndarray, ("please provide an array")
data = cross_spectrum_to_coordinate_list(data, W)
J = phase_jac(data, x_0)
x_hat,_ = secant(data[:,:-1], data[:,-1], J, x_0, \
n_iters=10)
di,dj = 2*x_hat[0], 2*x_hat[1]
return di,dj
def phase_gradient_descend(data, W=np.array([]), x_0=np.zeros((2))): # wip
"""get phase plane of cross-spectrum through principle component analysis
find slope of the phase plane through
principle component analysis
Parameters
----------
data : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
data : numpy.array, size=(m*n,3), dtype=complex
coordinate list with complex cross-sprectum at last collumn
W : numpy.array, size=(m,n), dtype=boolean
index of data that is correct
W : numpy.array, size=(m*n,1), dtype=boolean
list with classification of correct data
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_lsq
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_gradient_descend(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(data)==np.ndarray, ("please provide an array")
assert type(W)==np.ndarray, ("please provide an array")
data = cross_spectrum_to_coordinate_list(data, W)
x_hat,_ = gradient_descent(data[:,:-1], data[:,-1], x_0, \
learning_rate=1, n_iters=50)
di,dj = x_hat[1], x_hat[0]
return di,dj
def phase_tpss(Q, W, m, p=1e-4, l=4, j=5, n=3): #wip
"""get phase plane of cross-spectrum through two point step size iteration
find slope of the phase plane through
two point step size for phase correlation minimization
Parameters
----------
Q : numpy.array, size=(_,_), dtype=complex
cross spectrum
m0 : numpy.array, size=(2,1)
initial displacement estimate
p : float, default=1e4
closing error threshold
l : integer, default=4
number of refinements in iteration
j : integer, default=5
number of sub routines during an estimation
n : integer, default=3
mask convergence factor
Returns
-------
m : numpy.array, size=(2,1)
sub-pixel displacement
snr: float
signal-to-noise ratio
See Also
--------
phase_svd, phase_radon, phase_difference, phase_jac
References
----------
.. [1] Barzilai & Borwein. "Two-point step size gradient methods", IMA
journal of numerical analysis. vol.8 pp.141--148, 1988.
.. [2] Leprince, et al. "Automatic and precise orthorectification,
coregistration, and subpixel correlation of satellite images,
application to ground deformation measurements", IEEE Transactions on
geoscience and remote sensing vol.45(6) pp.1529-1558, 2007.
"""
m = np.squeeze(m)
s = 1.#1.25#.5#1.#.5#2.
Q = normalize_power_spectrum(Q)
#W = W/np.sum(W) # normalize weights
Fx,Fy = make_fourier_grid(Q)
# initialize
m_min = m.copy().ravel()
m_min += np.array([-.1, -.1])
J_min = phase_jac(Q, m_min, W=W)
g_min = np.sum(J_min, axis=0)
#print('di:{:+.4f}'.format(m[0])+' dj:{:+.4f}'.format(m[1]))
for i in range(l):
k = 1
while True:
J = phase_jac(Q, m, W=W)
g = np.sum(J, axis=0)
# difference
dm,dg = m - m_min, g - g_min
#alpha = np.dot(dm,dg)/np.dot(dg,dg)
alpha = np.dot(dm,dm)/(s*np.dot(dm,dg))
if (np.all(np.abs(m - m_min)<=p)) or (k>=j):
break
# update
m_min, g_min = np.copy(m), np.copy(g)
#if i ==0:
m -= alpha*dg
#else:
# m -= alpha*dg
print('di:{:+.4f}'.format(m[0])+' dj:{:+.4f}'.format(m[1]))
k += 1
# optimize weighting matrix
#phi = np.abs(QC*np.conjugate(QC))/2
C = 1j*-np.sin(Fx*m[1] + Fy*m[0])
C += np.cos(Fx*m[1] + Fy*m[0])
QC = (Q-C)**2 # np.abs(Q-C)#np.abs(Q-C)
dXY = np.abs(np.multiply(W, QC))
W = W*(1-(dXY/4))**n
# phi = np.multiply(2*W,\
# (1 - np.multiply(np.real(Q),
# +np.cos(Fx*m[1] + Fy*m[0])) - \
# np.multiply(np.imag(Q),
# -np.sin(Fx*m[1] + Fy*m[0]))))
#W = np.multiply(W, (1-(phi/4))**n)
# snr = 1 - (np.sum(phi)/(4*np.sum(W)))
snr = 0
m = -1*m
return (m, snr)
def phase_slope_1d(t, rad=.1):
""" estimate the slope and intercept for one-dimensional signal
Parameters
----------
t : numpy.array, size=(m,1), dtype=complex
angle values.
rad : float, range=(0.0,0.5)
radial inclusion, seen from the center
Returns
-------
x_hat : numpy.array, size=(2,1)
estimated slope and intercept.
See also
--------
phase_svd
"""
assert type(t)==np.ndarray, ("please provide an array")
idx_sub = np.arange(np.ceil((0.5-rad)*len(t)), \
np.ceil((0.5+rad)*len(t))+1).astype(int)
y_ang = np.unwrap(np.angle(t[idx_sub]),axis=0)
A = np.vstack([np.transpose(idx_sub-1), np.ones((len(idx_sub)))]).T
x_hat = np.linalg.lstsq(A, y_ang, rcond=None)[0]
return x_hat
def phase_svd(Q, W, rad=0.1):
"""get phase plane of cross-spectrum through single value decomposition
find slope of the phase plane through
single value decomposition
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
cross spectrum
W : numpy.array, size=(m,n), dtype=float
weigthing matrix
rad : float, range=(0.0,0.5)
radial inclusion, seen from the center
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_radon, phase_difference
References
----------
.. [1] <NAME>. "A subspace identification extension to the phase
correlation method", IEEE transactions on medical imaging, vol. 22(2)
pp.277-280, 2003.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_svd(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(Q)==np.ndarray, ("please provide an array")
assert type(W)==np.ndarray, ("please provide an array")
rad = np.minimum(rad, 0.5)
(m,n) = Q.shape
Q,W = np.fft.fftshift(Q), np.fft.fftshift(W)
# decompose axis
n_elements = 1
try:
u,s,v = np.linalg.svd(W*Q) # singular-value decomposition
except:
return 0, 0
sig = np.zeros((m,n))
sig[:m,:m] = np.diag(s)
sig = sig[:,:n_elements] # select first element only
# v = v[:n_elements,:]
# reconstruct
# b = u.dot(sig.dot(v))
t_m = np.transpose(v).dot(sig)
t_n = u.dot(sig)# transform
d_n = phase_slope_1d(t_n, rad)
d_m = phase_slope_1d(t_m, rad)
di = -d_n[0][0]*n / (2*np.pi)
dj = -d_m[0][0]*m / (2*np.pi)
return di, dj
def phase_difference_1d(Q, W=np.array([]), axis=0):
"""get displacement from phase plane along one axis through differencing
find slope of the phase plane through
local difference of the pahse angles
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
W : numpy.array, size=(m,n), dtype=boolean
weigthing matrix
Returns
-------
dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
References
----------
.. [1] <NAME>. "A fast and accurate frequency estimator", IEEE
transactions on acoustics, speech and signal processing, vol.37(12)
pp.1987-1990, 1989.
"""
assert type(Q)==np.ndarray, ("please provide an array")
assert type(W)==np.ndarray, ("please provide an array")
if axis==0:
Q = np.transpose(Q)
m,n = Q.shape
#estimate period
Q_dj = np.roll(Q, (0,1))
Q_diff = np.multiply(np.conj(Q),Q_dj)
Delta_dj = np.angle(Q_diff)/np.pi
if W.size==0:
# find coherent data
Qn = normalize_power_spectrum(Q)
C = local_coherence(Qn, ds=1)
C = np.minimum(C, np.roll(C, (0,1)))
W = C>np.quantile(C,0.9) # get the best 10%
dj = np.median(Delta_dj[W])*(m//2)
return dj
def phase_difference(Q, W=np.array([])):
"""get displacement from phase plane through neighbouring vector difference
find slope of the phase plane through
local difference of the pahse angles
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
W : numpy.array, size=(m,n), dtype=boolean
weigthing matrix
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
References
----------
.. [1] <NAME>. "A fast and accurate frequency estimator", IEEE
transactions on acoustics, speech and signal processing, vol.37(12)
pp.1987-1990, 1989.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_svd(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(Q)==np.ndarray, ("please provide an array")
assert type(W)==np.ndarray, ("please provide an array")
di = phase_difference_1d(Q, W, axis=0)
dj = phase_difference_1d(Q, W, axis=1)
return di,dj
def phase_lsq(data, W=np.array([])):
"""get phase plane of cross-spectrum through least squares plane fitting
find slope of the phase plane through
principle component analysis
Parameters
----------
data : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
or numpy.array, size=(m*n,3), dtype=complex
coordinate list with complex cross-sprectum at last
W : numpy.array, size=(m,n), dtype=boolean
index of data that is correct
or numpy.array, size=(m*n,1), dtype=boolean
list with classification of correct data
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_pca, phase_ransac, phase_hough
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_lsq(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(data)==np.ndarray, ("please provide an array")
assert type(W)==np.ndarray, ("please provide an array")
data = cross_spectrum_to_coordinate_list(data, W)
A,y = data[:,:-1], data[:,-1]
M = A.transpose().dot(A)
V = A.transpose().dot(y)
# pseudoinverse:
Mp = np.linalg.inv(M.transpose().dot(M)).dot(M.transpose())
#Least-squares Solution
plane_normal = Mp.dot(V)
di = 2*plane_normal[0]
dj = 2*plane_normal[1]
return di, dj
def phase_pca(data, W=np.array([])):
"""get phase plane of cross-spectrum through principle component analysis
find slope of the phase plane through
principle component analysis
Parameters
----------
data : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
or numpy.array, size=(m*n,3), dtype=complex
coordinate list with complex cross-sprectum at last
W : numpy.array, size=(m,n), dtype=boolean
index of data that is correct
or numpy.array, size=(m*n,1), dtype=boolean
list with classification of correct data
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_lsq
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_pca(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(data)==np.ndarray, ("please provide an array")
assert type(W)==np.ndarray, ("please provide an array")
data = cross_spectrum_to_coordinate_list(data, W)
eigen_vecs, eigen_vals = pca(data)
e3 = eigen_vecs[:,np.argmin(eigen_vals)] # normal vector
di = (-2*e3[0]/e3[-1])
dj = (-2*e3[1]/e3[-1])
return di, dj
# PCA is sensative to data contamination, see
# Hubert, Rousseeuw, <NAME>. 2008
# High-breakdown robust multivariate methods
def phase_weighted_pca(Q, W): #todo
"""get phase plane of cross-spectrum through principle component analysis
find slope of the phase plane through
principle component analysis
Parameters
----------
data : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
or numpy.array, size=(m*n,3), dtype=complex
coordinate list with complex cross-sprectum at last
W : numpy.array, size=(m,n), dtype=boolean
index of data that is correct
or numpy.array, size=(m*n,1), dtype=boolean
list with classification of correct data
Returns
-------
di,dj : float
sub-pixel displacement
See Also
--------
phase_lsq
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> W = gaussian_mask(Q)
>>> di,dj,_,_ = phase_weighted_pca(Q, W)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(Q)==np.ndarray, ('please provide an array')
assert type(W)==np.ndarray, ('please provide an array')
data = cross_spectrum_to_coordinate_list(Q)
weights = W.flatten()
covar = np.dot(data.T, data)
covar /= np.dot(weights.T, weights)
try: # eigenvalues might not converge
eigen_vals, eigen_vecs = np.linalg.eigh(covar)
e3 = eigen_vecs[:,np.argmin(eigen_vals)] # normal vector
di = (-2*e3[0]/e3[-1])
dj = (-2*e3[1]/e3[-1])
except:
di, dj = 0, 0
return di, dj
# from skimage.measure
def ransac(data, model_class, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None,
max_trials=100, stop_sample_num=np.inf, stop_residuals_sum=0,
stop_probability=1, random_state=None, initial_inliers=None,
params_bounds=0):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. Each iteration
performs the following tasks:
1. Select `min_samples` random samples from the original data and check
whether the set of data is valid (see `is_data_valid`).
2. Estimate a model to the random subset
(`model_cls.estimate(*data[random_subset]`) and check whether the
estimated model is valid (see `is_model_valid`).
3. Classify all data as inliers or outliers by calculating the residuals
to the estimated model (`model_cls.residuals(*data)`) - all data samples
with residuals smaller than the `residual_threshold` are considered as
inliers.
4. Save estimated model as best model if number of inlier samples is
maximal. In case the current estimated model has the same number of
inliers, it is only considered as the best model if it has less sum of
residuals.
These steps are performed either a maximum number of times or until one of
the special stop criteria are met. The final model is estimated using all
inlier samples of the previously determined best model.
Parameters
----------
data : [list, tuple of] (N, ...) array
Data set to which the model is fitted, where N is the number of data
points and the remaining dimension are depending on model requirements.
If the model class requires multiple input data arrays (e.g. source and
destination coordinates of ``skimage.transform.AffineTransform``),
they can be optionally passed as tuple or list. Note, that in this case
the functions ``estimate(*data)``, ``residuals(*data)``,
``is_model_valid(model, *random_data)`` and
``is_data_valid(*random_data)`` must all take each data array as
separate arguments.
model_class : object
Object with the following object methods:
* ``success = estimate(*data)``
* ``residuals(*data)``
where `success` indicates whether the model estimation succeeded
(`True` or `None` for success, `False` for failure).
min_samples : int in range (0, N)
The minimum number of data points to fit a model to.
residual_threshold : float larger than 0
Maximum distance for a data point to be classified as an inlier.
is_data_valid : function, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(*random_data)`.
is_model_valid : function, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, *random_data)`, .
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_sample_num : int, optional
Stop iteration if at least this number of inliers are found.
stop_residuals_sum : float, optional
Stop iteration if sum of residuals is less than or equal to this
threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the
training data is sampled with ``probability >= stop_probability``,
depending on the current best model's inlier ratio and the number
of trials. This requires to generate at least N samples (trials):
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to a high value
such as 0.99, e is the current fraction of inliers w.r.t. the
total number of samples, and m is the min_samples value.
random_state : {None, int, `numpy.random.Generator`}, optional
If `random_state` is None the `numpy.random.Generator` singleton is
used.
If `random_state` is an int, a new ``Generator`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` instance then that
instance is used.
initial_inliers : array-like of bool, shape (N,), optional
Initial samples selection for model estimation
Returns
-------
model : object
Best model with largest consensus set.
inliers : (N, ) array
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] "RANSAC", Wikipedia, https://en.wikipedia.org/wiki/RANSAC
Examples
--------
Generate ellipse data without tilt and add noise:
>>> t = np.linspace(0, 2 * np.pi, 50)
>>> xc, yc = 20, 30
>>> a, b = 5, 10
>>> x = xc + a * np.cos(t)
>>> y = yc + b * np.sin(t)
>>> data = np.column_stack([x, y])
>>> rng = np.random.default_rng(203560) # do not copy this value
>>> data += rng.normal(size=data.shape)
Add some faulty data:
>>> data[0] = (100, 100)
>>> data[1] = (110, 120)
>>> data[2] = (120, 130)
>>> data[3] = (140, 130)
Estimate ellipse model using all available data:
>>> model = EllipseModel()
>>> model.estimate(data)
True
>>> np.round(model.params) # doctest: +SKIP
array([ 72., 75., 77., 14., 1.])
Estimate ellipse model using RANSAC:
>>> ransac_model, inliers = ransac(data, EllipseModel, 20, 3, max_trials=50)
>>> abs(np.round(ransac_model.params))
array([20., 30., 10., 6., 2.])
>>> inliers # doctest: +SKIP
array([False, False, False, False, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True], dtype=bool)
>>> sum(inliers) > 40
True
RANSAC can be used to robustly estimate a geometric transformation. In this section,
we also show how to use a proportion of the total samples, rather than an absolute number.
>>> from skimage.transform import SimilarityTransform
>>> rng = np.random.default_rng()
>>> src = 100 * rng.random((50, 2))
>>> model0 = SimilarityTransform(scale=0.5, rotation=1,
... translation=(10, 20))
>>> dst = model0(src)
>>> dst[0] = (10000, 10000)
>>> dst[1] = (-100, 100)
>>> dst[2] = (50, 50)
>>> ratio = 0.5 # use half of the samples
>>> min_samples = int(ratio * len(src))
>>> model, inliers = ransac((src, dst), SimilarityTransform, min_samples,
... 10,
... initial_inliers=np.ones(len(src), dtype=bool))
>>> inliers # doctest: +SKIP
array([False, False, False, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True])
"""
best_model = None
best_inlier_num = 0
best_inlier_residuals_sum = np.inf
best_inliers = None
random_state = np.random.default_rng(random_state)
# in case data is not pair of input and output, male it like it
if not isinstance(data, (tuple, list)):
data = (data, )
num_samples = len(data[0])
if not (0 < min_samples < num_samples):
raise ValueError("`min_samples` must be in range (0, <number-of-samples>)")
if residual_threshold < 0:
raise ValueError("`residual_threshold` must be greater than zero")
if max_trials < 0:
raise ValueError("`max_trials` must be greater than zero")
if not (0 <= stop_probability <= 1):
raise ValueError("`stop_probability` must be in range [0, 1]")
if initial_inliers is not None and len(initial_inliers) != num_samples:
raise ValueError("RANSAC received a vector of initial inliers (length %i)"
" that didn't match the number of samples (%i)."
" The vector of initial inliers should have the same length"
" as the number of samples and contain only True (this sample"
" is an initial inlier) and False (this one isn't) values."
% (len(initial_inliers), num_samples))
# for the first run use initial guess of inliers
spl_idxs = (initial_inliers if initial_inliers is not None
else random_state.choice(num_samples, min_samples, replace=False))
for num_trials in range(max_trials):
# do sample selection according data pairs
samples = [d[spl_idxs] for d in data]
# for next iteration choose random sample set and be sure that no samples repeat
spl_idxs = random_state.choice(num_samples, min_samples, replace=False)
# optional check if random sample set is valid
if is_data_valid is not None and not is_data_valid(*samples):
continue
# estimate model for current random sample set
sample_model = model_class()
success = sample_model.estimate(*samples, params_bound=params_bounds)
# loop through potential solutions
for potential_param in np.arange(success):
potential_model = model_class()
potential_model.params = sample_model.params[potential_param][0:min_samples]
potential_model_residuals = np.abs(potential_model.residuals(*data))
# consensus set / inliers
potential_model_inliers = potential_model_residuals < \
residual_threshold
potential_model_residuals_sum = np.sum(potential_model_residuals**2)
# choose as new best model if number of inliers is maximal
potential_inlier_num = np.sum(potential_model_inliers)
if (
# more inliers
potential_inlier_num > best_inlier_num
# same number of inliers but less "error" in terms of residuals
or (potential_inlier_num == best_inlier_num
and potential_model_residuals_sum < best_inlier_residuals_sum)
):
best_model = potential_model
best_inlier_num = potential_inlier_num
best_inlier_residuals_sum = potential_model_residuals_sum
best_inliers = potential_model_inliers
dynamic_max_trials = _dynamic_max_trials(best_inlier_num,
num_samples,
min_samples,
stop_probability)
if (best_inlier_num >= stop_sample_num
or best_inlier_residuals_sum <= stop_residuals_sum
or num_trials >= dynamic_max_trials):
break
# estimate final model using all inliers
if not(best_inliers is not None and any(best_inliers)):
best_model = None
best_inliers = None
warnings("No inliers found. Model not fitted")
return best_model, best_inliers
class BaseModel(object):
def __init__(self):
self.params = None
class PlaneModel(BaseModel):
"""Least squares estimator for phase plane.
Vectors/lines are parameterized using polar coordinates as functional model::
z = x * dx + y * dy
This estimator minimizes the squared distances from all points to the
plane, independent of distance.
"""
def estimate(self, data):
"""Estimate plane from data using least squares.
Parameters
----------
data : (N, 3) array
N points with ``(x, y)`` coordinates of vector, respectively.
Returns
-------
success : bool
True, if model estimation succeeds.
"""
if data.shape[0] >= 2: # well determined
x_hat = np.linalg.lstsq(data[:,0:2], data[:,-1], rcond=None)[0]
else: # under-determined
raise ValueError('At least two vectors needed.')
self.params = (x_hat[0], x_hat[1])
number_of_params = 1
return number_of_params
def residuals(self, data):
"""Determine residuals of data to model
For each point the shortest distance to the plane is returned.
Parameters
----------
data : (N, 3) array
N points with x, y coordinates and z values, respectively.
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
x_hat = self.params
Q_hat = data[:,0]*x_hat[0] + data[:,1]*x_hat[1]
residuals = np.abs(data[:,-1] - Q_hat)
return residuals
def predict_xy(self, xy, params=None):
"""Predict vector using the estimated heading.
Parameters
----------
xy : numpy.array
x,y-coordinates.
params : (2, ) array, optional
Optional custom parameter set.
Returns
-------
Q_hat : numpy.array
Predicted plane height at x,y-coordinates.
"""
if params is None:
params = self.params
x_hat = params
if xy.ndim<2:
Q_hat = xy[0]*x_hat[0] + xy[1]*x_hat[1]
else:
Q_hat = xy[:,0]*x_hat[0] + xy[:,1]*x_hat[1]
return Q_hat
class SawtoothModel(BaseModel):
"""Least squares estimator for phase sawtooth.
"""
def estimate(self, data, params_bound=0):
"""Estimate plane from data using least squares.
Parameters
----------
data : (N, 3) array
N points with ``(x, y)`` coordinates of vector, respectively.
params_bound : float
bound of the parameter space
Returns
-------
success : bool
True, if model estimation succeeds.
"""
if data.shape[0] >= 2: # well determined
if params_bound != 0:
# create multitudes of cycles
param_cycle = np.mgrid[-params_bound:+params_bound+1, \
-params_bound:+params_bound+1]
cycle_0 = param_cycle[:,:,0].flatten() # 2*np.pi() if in radians
cycle_1 = param_cycle[:,:,1].flatten() # but -.5 ... +.5
x_stack = np.zeros((cycle_0.size, 2))
for val,idx in enumerate(cycle_0):
y = np.array([data[0,-1] + cycle_0[idx], \
data[1,-1] + cycle_1[idx]])
x_hat = np.linalg.lstsq(data[:,0:2], \
y, \
rcond=None)[0]
x_stack[idx,0] = x_hat[0]
x_stack[idx,1] = x_hat[1]
# multitutes = np.maximum(np.floor(params_bound/x_hat[0]), \
# np.floor(params_bound/x_hat[1]))
# x_stack = np.stack((np.arange(1,multitutes+1)*x_hat[0], \
# np.arange(1,multitutes+1)*x_hat[1])).T
else:
x_stack = np.linalg.lstsq(data[:,0:2], data[:,-1], rcond=None)[0]
else: # under-determined
raise ValueError('At least two vectors needed.')
self.params = tuple(map(tuple, x_stack))
return x_stack.shape[0]
def residuals(self, data):
"""Determine residuals of data to model
For each point the shortest distance to the plane is returned.
Parameters
----------
data : (N, 3) array
N points with x, y coordinates and z values, respectively.
Returns
-------
residuals : (N, ) array
Residual for each data point.
"""
x_hat = self.params
Q_hat = data[:,0]*x_hat[0] + data[:,1]*x_hat[1]
Q_hat = np.remainder(Q_hat+.5,1)-.5 # phase wrapping
residuals = np.abs(data[:,-1] - Q_hat)
return residuals
def predict_xy(self, xy, params=None):
"""Predict vector using the estimated heading.
Parameters
----------
xy : numpy.array
x,y-coordinates.
params : (2, ) array, optional
Optional custom parameter set.
Returns
-------
Q_hat : numpy.array
Predicted plane height at x,y-coordinates.
"""
if params is None:
params = self.params
x_hat = params
if xy.ndim<2:
Q_hat = xy[0]*x_hat[0] + xy[1]*x_hat[1]
Q_hat = np.remainder(Q_hat+.5,1)-.5
else:
Q_hat = xy[:,0]*x_hat[0] + xy[:,1]*x_hat[1]
Q_hat = np.remainder(Q_hat+.5,1)-.5
return Q_hat
def phase_ransac(data, max_displacement=0, precision_threshold=.05):
"""robustly fit plane using RANSAC algorithm
find slope of the phase plane through
random sampling and consensus
Parameters
----------
data : numpy.array, size=(m,n), dtype=complex
normalized cross spectrum
or numpy.array, size=(m*n,3), dtype=complex
coordinate list with complex cross-sprectum at last
Returns
-------
di : float
sub-pixel displacement along vertical axis
dj : float
sub-pixel displacement along horizontal axis
See Also
--------
phase_lsq, phase_svd, phase_hough, phase_pca
References
----------
.. [1] <NAME>. "Random sample consensus: a paradigm for model
fitting with applications to image analysis and automated cartography"
Communications of the ACM vol.24(6) pp.381-395, 1981.
.. [2] Tong et al. "A novel subpixel phase correlation method using
singular value decomposition and unified random sample consensus" IEEE
transactions on geoscience and remote sensing vol.53(8) pp.4143-4156,
2015.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_ransac(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(data)==np.ndarray, ('please provide an array')
# what type of data? either list of coordinates or a cross-spectral matrix
if data.shape[0]==data.shape[1]:
(m,n) = data.shape
Fx,Fy = make_fourier_grid(np.zeros((m,n)))
Fx,Fy = np.fft.fftshift(Fx), np.fft.fftshift(Fy)
Q = np.fft.fftshift(np.angle(data) / (2*np.pi))
if max_displacement==0:
max_displacement = m//2
data = np.vstack((Fy.flatten(),
Fx.flatten(),
Q.flatten() )).T
ransac_model, inliers = ransac(data, SawtoothModel, #PlaneModel,
min_samples=int(2),
residual_threshold=precision_threshold,
max_trials=int(1e3),
params_bounds=max_displacement)
# IN = np.reshape(inliers, (m,n)) # what data is within error bounds
di = ransac_model.params[0]
dj = ransac_model.params[1]
return di, dj
def phase_hough(data, max_displacement=64, param_spacing=1, sample_fraction=1.,
W=np.array([])):
assert type(data)==np.ndarray, ('please provide an array')
# what type of data? either list of coordinates or a cross-spectral matrix
if data.shape[0]==data.shape[1]:
(m,n) = data.shape
Fx,Fy = make_fourier_grid(np.zeros((m,n)))
Fx,Fy = np.fft.fftshift(Fx), np.fft.fftshift(Fy)
Q = np.fft.fftshift(np.angle(data) / (2*np.pi))
if max_displacement==64:
max_displacement = m//2
data = np.vstack((Fx.flatten(),
Fy.flatten(),
Q.flatten() )).T
# create voting space
(dj,di) = np.meshgrid(np.arange(-max_displacement, \
+max_displacement + param_spacing, \
param_spacing),
np.arange(-max_displacement, \
+max_displacement + param_spacing, \
param_spacing))
# create population that can vote
sample_size = data.shape[0]
if sample_fraction>=1:
idx = np.arange(0, sample_size)
elif W.size==0: # sample random from collection
idx = np.random.choice(sample_size,
np.round(sample_size*sample_fraction).astype(np.int32),
replace=False)
else: # use weights to select sampling
idx = np.flip(np.argsort(data[:,-1]))
idx = idx[0:np.round(sample_size*sample_fraction).astype(np.int32)]
#vote = np.zeros_like(di, dtype=np.int32)
vote = np.zeros_like(di, dtype=np.float32)
for counter in idx:
angle_diff = data[counter,-1] - \
(np.remainder((data[counter,0]*dj + data[counter,1]*di)+.5,1)-.5)
vote += 1 / (1 + (angle_diff/(.1*np.std(angle_diff)))**2) # cauchy weighting
# hard threshold
# vote += (np.abs(angle_diff) <= .1).astype(np.int32)
(i_hough,j_hough) = np.unravel_index(np.argmax(vote), di.shape)
di_hough,dj_hough = di[i_hough,j_hough], dj[i_hough,j_hough]
return di_hough, dj_hough
def phase_radon(Q, coord_system='ij'):
"""get direction and magnitude from phase plane through Radon transform
find slope of the phase plane through
single value decomposition
Parameters
----------
Q : numpy.array, size=(m,n), dtype=complex
cross spectrum
Returns
-------
theta,rho : float
magnitude and direction of displacement
See Also
--------
phase_tpss, phase_svd, phase_difference
References
----------
.. [1] Balci & Foroosh. "Subpixel registration directly from the phase
difference" EURASIP journal on advances in signal processing, pp.1-11,
2006.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,ti,tj,_ = create_sample_image_pair(d=2**5, max_range=1)
>>> Q = phase_corr(im1, im2)
>>> di,dj,_,_ = phase_radon(Q)
>>> assert(np.isclose(ti, di, atol=.2))
>>> assert(np.isclose(tj, dj, atol=.2))
"""
assert type(Q)==np.ndarray, ('please provide an array')
(m, n) = Q.shape
half = m // 2
Q = np.fft.fftshift(Q)
# estimate direction, through the radon transform
W = np.fft.fftshift(raised_cosine(Q, beta=1e-5)).astype(bool)
Q[~W] = 0 # make circular domain
theta = np.linspace(0., 180., max(m,n), endpoint=False)
R = radon(np.angle(Q), theta) # sinogram
#plt.imshow(R[:half,:]), plt.show()
#plt.imshow(np.flipud(R[half:,:])), plt.show()
R_fold = np.abs(np.multiply(R[:half,:], R[half:,:]))
radon_score = np.sum(R_fold, axis=0)
score_idx = np.argmax(radon_score)
theta = theta[score_idx]
del R_fold, radon_score, score_idx
# peaks can also be seen
# plt.plot(R[:,score_idx]), plt.show()
# estimate magnitude
Q_rot = ndimage.rotate(Q, -theta,
axes=(1, 0), reshape=False, output=None,
order=3, mode='constant')
rho = np.abs(phase_difference_1d(Q_rot, axis=1))
if coord_system=='ij':
di,dj = np.sin(np.radians(theta))*rho, -np.cos(np.radians(theta))*rho
return di, dj
else: # do polar coordinates
return theta, rho
#def phase_binairy_stripe():
# Notes
# -----
# [1] Zuo et al. "Registration method for infrared images under conditions
# of fixed-pattern noise", Optics communications, vol.285 pp.2293-2302,
# 2012.
# """
| [
"numpy.radians",
"numpy.random.default_rng",
"numpy.argsort",
"numpy.array",
"numpy.sin",
"scipy.ndimage.rotate",
"numpy.imag",
"numpy.arange",
"numpy.multiply",
"numpy.real",
"numpy.dot",
"numpy.linalg.lstsq",
"numpy.linalg.eigh",
"numpy.argmin",
"numpy.remainder",
"numpy.round",
"n... | [((622, 634), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (630, 634), True, 'import numpy as np\n'), ((653, 665), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (661, 665), True, 'import numpy as np\n'), ((670, 682), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (678, 682), True, 'import numpy as np\n'), ((3186, 3198), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3194, 3198), True, 'import numpy as np\n'), ((3204, 3215), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (3212, 3215), True, 'import numpy as np\n'), ((4836, 4848), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4844, 4848), True, 'import numpy as np\n'), ((4854, 4865), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4862, 4865), True, 'import numpy as np\n'), ((7638, 7651), 'numpy.squeeze', 'np.squeeze', (['m'], {}), '(m)\n', (7648, 7651), True, 'import numpy as np\n'), ((7852, 7874), 'numpy.array', 'np.array', (['[-0.1, -0.1]'], {}), '([-0.1, -0.1])\n', (7860, 7874), True, 'import numpy as np\n'), ((7923, 7944), 'numpy.sum', 'np.sum', (['J_min'], {'axis': '(0)'}), '(J_min, axis=0)\n', (7929, 7944), True, 'import numpy as np\n'), ((11415, 11435), 'numpy.minimum', 'np.minimum', (['rad', '(0.5)'], {}), '(rad, 0.5)\n', (11425, 11435), True, 'import numpy as np\n'), ((11663, 11679), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (11671, 11679), True, 'import numpy as np\n'), ((11696, 11706), 'numpy.diag', 'np.diag', (['s'], {}), '(s)\n', (11703, 11706), True, 'import numpy as np\n'), ((12092, 12104), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (12100, 12104), True, 'import numpy as np\n'), ((13026, 13044), 'numpy.roll', 'np.roll', (['Q', '(0, 1)'], {}), '(Q, (0, 1))\n', (13033, 13044), True, 'import numpy as np\n'), ((13431, 13443), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (13439, 13443), True, 'import numpy as np\n'), ((14742, 14754), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14750, 14754), True, 'import numpy as np\n'), ((16330, 16342), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (16338, 16342), True, 'import numpy as np\n'), ((19179, 19199), 'numpy.dot', 'np.dot', (['data.T', 'data'], {}), '(data.T, data)\n', (19185, 19199), True, 'import numpy as np\n'), ((19213, 19239), 'numpy.dot', 'np.dot', (['weights.T', 'weights'], {}), '(weights.T, weights)\n', (19219, 19239), True, 'import numpy as np\n'), ((27221, 27256), 'numpy.random.default_rng', 'np.random.default_rng', (['random_state'], {}), '(random_state)\n', (27242, 27256), True, 'import numpy as np\n'), ((39519, 39531), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (39527, 39531), True, 'import numpy as np\n'), ((41079, 41114), 'numpy.zeros_like', 'np.zeros_like', (['di'], {'dtype': 'np.float32'}), '(di, dtype=np.float32)\n', (41092, 41114), True, 'import numpy as np\n'), ((42716, 42734), 'numpy.fft.fftshift', 'np.fft.fftshift', (['Q'], {}), '(Q)\n', (42731, 42734), True, 'import numpy as np\n'), ((43167, 43189), 'numpy.sum', 'np.sum', (['R_fold'], {'axis': '(0)'}), '(R_fold, axis=0)\n', (43173, 43189), True, 'import numpy as np\n'), ((43206, 43228), 'numpy.argmax', 'np.argmax', (['radon_score'], {}), '(radon_score)\n', (43215, 43228), True, 'import numpy as np\n'), ((43408, 43504), 'scipy.ndimage.rotate', 'ndimage.rotate', (['Q', '(-theta)'], {'axes': '(1, 0)', 'reshape': '(False)', 'output': 'None', 'order': '(3)', 'mode': '"""constant"""'}), "(Q, -theta, axes=(1, 0), reshape=False, output=None, order=3,\n mode='constant')\n", (43422, 43504), False, 'from scipy import ndimage\n'), ((8789, 8818), 'numpy.cos', 'np.cos', (['(Fx * m[1] + Fy * m[0])'], {}), '(Fx * m[1] + Fy * m[0])\n', (8795, 8818), True, 'import numpy as np\n'), ((9978, 9998), 'numpy.angle', 'np.angle', (['t[idx_sub]'], {}), '(t[idx_sub])\n', (9986, 9998), True, 'import numpy as np\n'), ((10091, 10128), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y_ang'], {'rcond': 'None'}), '(A, y_ang, rcond=None)\n', (10106, 10128), True, 'import numpy as np\n'), ((11466, 11484), 'numpy.fft.fftshift', 'np.fft.fftshift', (['Q'], {}), '(Q)\n', (11481, 11484), True, 'import numpy as np\n'), ((11486, 11504), 'numpy.fft.fftshift', 'np.fft.fftshift', (['W'], {}), '(W)\n', (11501, 11504), True, 'import numpy as np\n'), ((11571, 11591), 'numpy.linalg.svd', 'np.linalg.svd', (['(W * Q)'], {}), '(W * Q)\n', (11584, 11591), True, 'import numpy as np\n'), ((12959, 12974), 'numpy.transpose', 'np.transpose', (['Q'], {}), '(Q)\n', (12971, 12974), True, 'import numpy as np\n'), ((13070, 13080), 'numpy.conj', 'np.conj', (['Q'], {}), '(Q)\n', (13077, 13080), True, 'import numpy as np\n'), ((13103, 13119), 'numpy.angle', 'np.angle', (['Q_diff'], {}), '(Q_diff)\n', (13111, 13119), True, 'import numpy as np\n'), ((13360, 13382), 'numpy.median', 'np.median', (['Delta_dj[W]'], {}), '(Delta_dj[W])\n', (13369, 13382), True, 'import numpy as np\n'), ((19316, 19337), 'numpy.linalg.eigh', 'np.linalg.eigh', (['covar'], {}), '(covar)\n', (19330, 19337), True, 'import numpy as np\n'), ((29324, 29342), 'numpy.arange', 'np.arange', (['success'], {}), '(success)\n', (29333, 29342), True, 'import numpy as np\n'), ((31158, 31204), 'warnings', 'warnings', (['"""No inliers found. Model not fitted"""'], {}), "('No inliers found. Model not fitted')\n", (31166, 31204), False, 'import warnings\n'), ((32809, 32836), 'numpy.abs', 'np.abs', (['(data[:, -1] - Q_hat)'], {}), '(data[:, -1] - Q_hat)\n', (32815, 32836), True, 'import numpy as np\n'), ((36091, 36118), 'numpy.abs', 'np.abs', (['(data[:, -1] - Q_hat)'], {}), '(data[:, -1] - Q_hat)\n', (36097, 36118), True, 'import numpy as np\n'), ((40151, 40229), 'numpy.arange', 'np.arange', (['(-max_displacement)', '(+max_displacement + param_spacing)', 'param_spacing'], {}), '(-max_displacement, +max_displacement + param_spacing, param_spacing)\n', (40160, 40229), True, 'import numpy as np\n'), ((40333, 40411), 'numpy.arange', 'np.arange', (['(-max_displacement)', '(+max_displacement + param_spacing)', 'param_spacing'], {}), '(-max_displacement, +max_displacement + param_spacing, param_spacing)\n', (40342, 40411), True, 'import numpy as np\n'), ((40601, 40626), 'numpy.arange', 'np.arange', (['(0)', 'sample_size'], {}), '(0, sample_size)\n', (40610, 40626), True, 'import numpy as np\n'), ((41475, 41490), 'numpy.argmax', 'np.argmax', (['vote'], {}), '(vote)\n', (41484, 41490), True, 'import numpy as np\n'), ((42968, 42979), 'numpy.angle', 'np.angle', (['Q'], {}), '(Q)\n', (42976, 42979), True, 'import numpy as np\n'), ((43112, 43149), 'numpy.multiply', 'np.multiply', (['R[:half, :]', 'R[half:, :]'], {}), '(R[:half, :], R[half:, :])\n', (43123, 43149), True, 'import numpy as np\n'), ((2365, 2411), 'numpy.ones', 'np.ones', (['(Q.shape[0], Q.shape[1])'], {'dtype': 'float'}), '((Q.shape[0], Q.shape[1]), dtype=float)\n', (2372, 2411), True, 'import numpy as np\n'), ((2604, 2632), 'numpy.ones_like', 'np.ones_like', (['Q'], {'dtype': 'float'}), '(Q, dtype=float)\n', (2616, 2632), True, 'import numpy as np\n'), ((8122, 8139), 'numpy.sum', 'np.sum', (['J'], {'axis': '(0)'}), '(J, axis=0)\n', (8128, 8139), True, 'import numpy as np\n'), ((8884, 8902), 'numpy.multiply', 'np.multiply', (['W', 'QC'], {}), '(W, QC)\n', (8895, 8902), True, 'import numpy as np\n'), ((11847, 11862), 'numpy.transpose', 'np.transpose', (['v'], {}), '(v)\n', (11859, 11862), True, 'import numpy as np\n'), ((13278, 13296), 'numpy.roll', 'np.roll', (['C', '(0, 1)'], {}), '(C, (0, 1))\n', (13285, 13296), True, 'import numpy as np\n'), ((13312, 13331), 'numpy.quantile', 'np.quantile', (['C', '(0.9)'], {}), '(C, 0.9)\n', (13323, 13331), True, 'import numpy as np\n'), ((17622, 17643), 'numpy.argmin', 'np.argmin', (['eigen_vals'], {}), '(eigen_vals)\n', (17631, 17643), True, 'import numpy as np\n'), ((29745, 29783), 'numpy.sum', 'np.sum', (['(potential_model_residuals ** 2)'], {}), '(potential_model_residuals ** 2)\n', (29751, 29783), True, 'import numpy as np\n'), ((29889, 29920), 'numpy.sum', 'np.sum', (['potential_model_inliers'], {}), '(potential_model_inliers)\n', (29895, 29920), True, 'import numpy as np\n'), ((36026, 36054), 'numpy.remainder', 'np.remainder', (['(Q_hat + 0.5)', '(1)'], {}), '(Q_hat + 0.5, 1)\n', (36038, 36054), True, 'import numpy as np\n'), ((38620, 38636), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (38628, 38636), True, 'import numpy as np\n'), ((38653, 38672), 'numpy.fft.fftshift', 'np.fft.fftshift', (['Fx'], {}), '(Fx)\n', (38668, 38672), True, 'import numpy as np\n'), ((38674, 38693), 'numpy.fft.fftshift', 'np.fft.fftshift', (['Fy'], {}), '(Fy)\n', (38689, 38693), True, 'import numpy as np\n'), ((39775, 39791), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (39783, 39791), True, 'import numpy as np\n'), ((39808, 39827), 'numpy.fft.fftshift', 'np.fft.fftshift', (['Fx'], {}), '(Fx)\n', (39823, 39827), True, 'import numpy as np\n'), ((39829, 39848), 'numpy.fft.fftshift', 'np.fft.fftshift', (['Fy'], {}), '(Fy)\n', (39844, 39848), True, 'import numpy as np\n'), ((2756, 2766), 'numpy.imag', 'np.imag', (['Q'], {}), '(Q)\n', (2763, 2766), True, 'import numpy as np\n'), ((2960, 2978), 'numpy.multiply', 'np.multiply', (['W', 'QC'], {}), '(W, QC)\n', (2971, 2978), True, 'import numpy as np\n'), ((8277, 8291), 'numpy.dot', 'np.dot', (['dm', 'dm'], {}), '(dm, dm)\n', (8283, 8291), True, 'import numpy as np\n'), ((8438, 8448), 'numpy.copy', 'np.copy', (['m'], {}), '(m)\n', (8445, 8448), True, 'import numpy as np\n'), ((8450, 8460), 'numpy.copy', 'np.copy', (['g'], {}), '(g)\n', (8457, 8460), True, 'import numpy as np\n'), ((8750, 8779), 'numpy.sin', 'np.sin', (['(Fx * m[1] + Fy * m[0])'], {}), '(Fx * m[1] + Fy * m[0])\n', (8756, 8779), True, 'import numpy as np\n'), ((10026, 10051), 'numpy.transpose', 'np.transpose', (['(idx_sub - 1)'], {}), '(idx_sub - 1)\n', (10038, 10051), True, 'import numpy as np\n'), ((19364, 19385), 'numpy.argmin', 'np.argmin', (['eigen_vals'], {}), '(eigen_vals)\n', (19373, 19385), True, 'import numpy as np\n'), ((30528, 30613), 'skimage.measure.fit._dynamic_max_trials', '_dynamic_max_trials', (['best_inlier_num', 'num_samples', 'min_samples', 'stop_probability'], {}), '(best_inlier_num, num_samples, min_samples, stop_probability\n )\n', (30547, 30613), False, 'from skimage.measure.fit import _dynamic_max_trials\n'), ((32040, 32094), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['data[:, 0:2]', 'data[:, -1]'], {'rcond': 'None'}), '(data[:, 0:2], data[:, -1], rcond=None)\n', (32055, 32094), True, 'import numpy as np\n'), ((34483, 34510), 'numpy.zeros', 'np.zeros', (['(cycle_0.size, 2)'], {}), '((cycle_0.size, 2))\n', (34491, 34510), True, 'import numpy as np\n'), ((36721, 36749), 'numpy.remainder', 'np.remainder', (['(Q_hat + 0.5)', '(1)'], {}), '(Q_hat + 0.5, 1)\n', (36733, 36749), True, 'import numpy as np\n'), ((36839, 36867), 'numpy.remainder', 'np.remainder', (['(Q_hat + 0.5)', '(1)'], {}), '(Q_hat + 0.5, 1)\n', (36851, 36867), True, 'import numpy as np\n'), ((38722, 38736), 'numpy.angle', 'np.angle', (['data'], {}), '(data)\n', (38730, 38736), True, 'import numpy as np\n'), ((39877, 39891), 'numpy.angle', 'np.angle', (['data'], {}), '(data)\n', (39885, 39891), True, 'import numpy as np\n'), ((40921, 40944), 'numpy.argsort', 'np.argsort', (['data[:, -1]'], {}), '(data[:, -1])\n', (40931, 40944), True, 'import numpy as np\n'), ((41194, 41262), 'numpy.remainder', 'np.remainder', (['(data[counter, 0] * dj + data[counter, 1] * di + 0.5)', '(1)'], {}), '(data[counter, 0] * dj + data[counter, 1] * di + 0.5, 1)\n', (41206, 41262), True, 'import numpy as np\n'), ((2690, 2700), 'numpy.real', 'np.real', (['Q'], {}), '(Q)\n', (2697, 2700), True, 'import numpy as np\n'), ((2769, 2798), 'numpy.sin', 'np.sin', (['(F1 * m[0] + F2 * m[1])'], {}), '(F1 * m[0] + F2 * m[1])\n', (2775, 2798), True, 'import numpy as np\n'), ((8294, 8308), 'numpy.dot', 'np.dot', (['dm', 'dg'], {}), '(dm, dg)\n', (8300, 8308), True, 'import numpy as np\n'), ((34587, 34653), 'numpy.array', 'np.array', (['[data[0, -1] + cycle_0[idx], data[1, -1] + cycle_1[idx]]'], {}), '([data[0, -1] + cycle_0[idx], data[1, -1] + cycle_1[idx]])\n', (34595, 34653), True, 'import numpy as np\n'), ((35285, 35339), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['data[:, 0:2]', 'data[:, -1]'], {'rcond': 'None'}), '(data[:, 0:2], data[:, -1], rcond=None)\n', (35300, 35339), True, 'import numpy as np\n'), ((43662, 43679), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (43672, 43679), True, 'import numpy as np\n'), ((2703, 2732), 'numpy.cos', 'np.cos', (['(F1 * m[0] + F2 * m[1])'], {}), '(F1 * m[0] + F2 * m[1])\n', (2709, 2732), True, 'import numpy as np\n'), ((8333, 8350), 'numpy.abs', 'np.abs', (['(m - m_min)'], {}), '(m - m_min)\n', (8339, 8350), True, 'import numpy as np\n'), ((34716, 34760), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['data[:, 0:2]', 'y'], {'rcond': 'None'}), '(data[:, 0:2], y, rcond=None)\n', (34731, 34760), True, 'import numpy as np\n'), ((40754, 40793), 'numpy.round', 'np.round', (['(sample_size * sample_fraction)'], {}), '(sample_size * sample_fraction)\n', (40762, 40793), True, 'import numpy as np\n'), ((43694, 43711), 'numpy.radians', 'np.radians', (['theta'], {}), '(theta)\n', (43704, 43711), True, 'import numpy as np\n'), ((40965, 41004), 'numpy.round', 'np.round', (['(sample_size * sample_fraction)'], {}), '(sample_size * sample_fraction)\n', (40973, 41004), True, 'import numpy as np\n'), ((41301, 41319), 'numpy.std', 'np.std', (['angle_diff'], {}), '(angle_diff)\n', (41307, 41319), True, 'import numpy as np\n')] |
"""
Plots mooring records.
Specific to a request from <NAME>, 2020_11.
"""
# setup
import netCDF4 as nc
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from datetime import datetime
import os; import sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zfun
Ldir = Lfun.Lstart()
indir0 = Ldir['LOo'] + 'moor/'
if False:
# choose the mooring extraction to plot
item = Lfun.choose_item(indir0)
indir = indir0 + item + '/'
infile = Lfun.choose_item(indir, tag='.nc')
else:
mname = 'cas6_v3_lo8b_2019.05.01_2019.06.30'
fname = 'Lahr1_hourly.nc'
fn = indir0 + mname + '/' + fname
ds = nc.Dataset(fn)
# time
ot_vec = ds['ocean_time'][:].data
mdate_vec = Lfun.modtime_to_mdate_vec(ot_vec)
mdt = mdates.num2date(mdate_vec) # list of datetimes of data
# space
zr = ds['z_rho'][:]
Zr = zr.mean(axis=0)
# NOTE: velocities are packed (t,z)
# so we transpose when plotting using pcolormesh
u = ds['u'][:]
v = ds['v'][:]
if True:
# Godin filter
ulp = zfun.filt_godin_mat(u)
vlp = zfun.filt_godin_mat(v)
scl = .2
else:
# Shorter time Hanning Filter
ulp = zfun.filt_hanning_mat(u, n=6)
vlp = zfun.filt_hanning_mat(v, n=6)
scl = .1
up = u - ulp
vp = v - vlp
# rotate
Up = up[:,0]
Vp = vp[:,0]
th = 0.5 * np.arctan2(2*np.nanmean(Up*Vp),(np.nanvar(Up)-np.nanvar(Vp)))
cth = np.cos(th)
sth = np.sin(th)
urp = cth*up + sth*vp
vrp = cth*vp - sth*up
# plotting
plt.close('all')
fs=14
plt.rc('font', size=fs)
fig = plt.figure(figsize=(18,10))
cmap = 'coolwarm'
dt0 = datetime(2019,5,26)
dt1 = datetime(2019,6,5)
ax = fig.add_subplot(211)
cs = ax.pcolormesh(mdt,Zr, urp.T, cmap=cmap, vmin = -scl, vmax=scl)
fig.colorbar(cs, ax=ax)
ax.set_xlim(dt0,dt1)
ax.grid(True)
#
ax.text(.05,.1,r'High-Passed Velocity [$m\ s^{-1}$] along $%d^{\circ}$ ($0^{\circ}=East$)' % (np.rad2deg(th)),
transform=ax.transAxes)
#
ax.set_xticklabels([])
ax.set_ylabel('Z [m]')
ax.set_title(mname + ' ' + fname)
ax = fig.add_subplot(212)
cs = ax.pcolormesh(mdt,Zr, vrp.T, cmap=cmap, vmin = -scl, vmax=scl)
fig.colorbar(cs, ax=ax)
ax.set_xlim(dt0,dt1)
ax.grid(True)
#
ax.text(.05,.1,r'High-Passed Velocity [$m\ s^{-1}$] normal to $%d^{\circ}$' % (np.rad2deg(th)),
transform=ax.transAxes)
#
ax.xaxis.set_major_locator(mdates.DayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y.%m.%d'))
ax.xaxis.set_tick_params(labelrotation=25)
ax.set_xlabel('Date [UTC]')
ax.set_ylabel('Z [m]')
plt.show()
plt.rcdefaults()
| [
"numpy.nanmean",
"matplotlib.dates.DayLocator",
"numpy.sin",
"datetime.datetime",
"netCDF4.Dataset",
"matplotlib.pyplot.close",
"numpy.rad2deg",
"matplotlib.dates.num2date",
"Lfun.modtime_to_mdate_vec",
"matplotlib.dates.DateFormatter",
"zfun.filt_hanning_mat",
"numpy.cos",
"matplotlib.pyplo... | [((323, 336), 'Lfun.Lstart', 'Lfun.Lstart', ([], {}), '()\n', (334, 336), False, 'import Lfun\n'), ((664, 678), 'netCDF4.Dataset', 'nc.Dataset', (['fn'], {}), '(fn)\n', (674, 678), True, 'import netCDF4 as nc\n'), ((733, 766), 'Lfun.modtime_to_mdate_vec', 'Lfun.modtime_to_mdate_vec', (['ot_vec'], {}), '(ot_vec)\n', (758, 766), False, 'import Lfun\n'), ((773, 799), 'matplotlib.dates.num2date', 'mdates.num2date', (['mdate_vec'], {}), '(mdate_vec)\n', (788, 799), True, 'import matplotlib.dates as mdates\n'), ((1383, 1393), 'numpy.cos', 'np.cos', (['th'], {}), '(th)\n', (1389, 1393), True, 'import numpy as np\n'), ((1400, 1410), 'numpy.sin', 'np.sin', (['th'], {}), '(th)\n', (1406, 1410), True, 'import numpy as np\n'), ((1467, 1483), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (1476, 1483), True, 'import matplotlib.pyplot as plt\n'), ((1490, 1513), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'fs'}), "('font', size=fs)\n", (1496, 1513), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1548), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(18, 10)'}), '(figsize=(18, 10))\n', (1530, 1548), True, 'import matplotlib.pyplot as plt\n'), ((1574, 1595), 'datetime.datetime', 'datetime', (['(2019)', '(5)', '(26)'], {}), '(2019, 5, 26)\n', (1582, 1595), False, 'from datetime import datetime\n'), ((1600, 1620), 'datetime.datetime', 'datetime', (['(2019)', '(6)', '(5)'], {}), '(2019, 6, 5)\n', (1608, 1620), False, 'from datetime import datetime\n'), ((2484, 2494), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2492, 2494), True, 'import matplotlib.pyplot as plt\n'), ((2495, 2511), 'matplotlib.pyplot.rcdefaults', 'plt.rcdefaults', ([], {}), '()\n', (2509, 2511), True, 'import matplotlib.pyplot as plt\n'), ((260, 287), 'os.path.abspath', 'os.path.abspath', (['"""../alpha"""'], {}), "('../alpha')\n", (275, 287), False, 'import os\n'), ((434, 458), 'Lfun.choose_item', 'Lfun.choose_item', (['indir0'], {}), '(indir0)\n', (450, 458), False, 'import Lfun\n'), ((504, 538), 'Lfun.choose_item', 'Lfun.choose_item', (['indir'], {'tag': '""".nc"""'}), "(indir, tag='.nc')\n", (520, 538), False, 'import Lfun\n'), ((1034, 1056), 'zfun.filt_godin_mat', 'zfun.filt_godin_mat', (['u'], {}), '(u)\n', (1053, 1056), False, 'import zfun\n'), ((1067, 1089), 'zfun.filt_godin_mat', 'zfun.filt_godin_mat', (['v'], {}), '(v)\n', (1086, 1089), False, 'import zfun\n'), ((1153, 1182), 'zfun.filt_hanning_mat', 'zfun.filt_hanning_mat', (['u'], {'n': '(6)'}), '(u, n=6)\n', (1174, 1182), False, 'import zfun\n'), ((1193, 1222), 'zfun.filt_hanning_mat', 'zfun.filt_hanning_mat', (['v'], {'n': '(6)'}), '(v, n=6)\n', (1214, 1222), False, 'import zfun\n'), ((2305, 2324), 'matplotlib.dates.DayLocator', 'mdates.DayLocator', ([], {}), '()\n', (2322, 2324), True, 'import matplotlib.dates as mdates\n'), ((2355, 2387), 'matplotlib.dates.DateFormatter', 'mdates.DateFormatter', (['"""%Y.%m.%d"""'], {}), "('%Y.%m.%d')\n", (2375, 2387), True, 'import matplotlib.dates as mdates\n'), ((1869, 1883), 'numpy.rad2deg', 'np.rad2deg', (['th'], {}), '(th)\n', (1879, 1883), True, 'import numpy as np\n'), ((2231, 2245), 'numpy.rad2deg', 'np.rad2deg', (['th'], {}), '(th)\n', (2241, 2245), True, 'import numpy as np\n'), ((1328, 1347), 'numpy.nanmean', 'np.nanmean', (['(Up * Vp)'], {}), '(Up * Vp)\n', (1338, 1347), True, 'import numpy as np\n'), ((1347, 1360), 'numpy.nanvar', 'np.nanvar', (['Up'], {}), '(Up)\n', (1356, 1360), True, 'import numpy as np\n'), ((1361, 1374), 'numpy.nanvar', 'np.nanvar', (['Vp'], {}), '(Vp)\n', (1370, 1374), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Filename: field.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import numpy as np
import fitsio
import scipy.optimize as optimize
import matplotlib.pyplot as plt
import ortools.sat.python.cp_model as cp_model
import kaiju
import kaiju.robotGrid
# import observesim.robot as robot
__all__ = ['Design']
"""Design module class.
Dependencies:
numpy
fitsio
matplotlib
roboscheduler
kaiju
"""
_target_array_dtype = np.dtype([('ra', np.float64),
('dec', np.float64),
('catalogid', np.int64),
('category', np.unicode_, 30),
('program', np.unicode_, 30),
('fiberType', np.unicode_, 30),
('priority', np.int32),
('within', np.int32)])
alphaLen = 7.4
betaLen = 15
class DesignBase(object):
"""Design base class
Parameters:
----------
racen : np.float64
boresight RA, J2000 deg
deccen : np.float64
boresight Dec, J2000 deg
pa : np.float32
position angle of field (deg E of N)
observatory : str
observatory field observed from, 'apo' or 'lco' (default 'apo')
Attributes:
----------
racen : np.float64
boresight RA, J2000 deg
deccen : np.float64
boresight Dec, J2000 deg
pa : np.float32
position angle of field (deg E of N)
observatory : str
observatory field observed from ('apo' or 'lco')
robotgrid : RobotGrid object
instance of RobotGrid
xVert : ndarray of np.float32
x positions of vertices of hexagon bounding the field (mm)
yVert : ndarray of np.float32
y positions of vertices of hexagon bounding the field (mm)
raVert : ndarray of np.float32
RA positions of vertices of hexagon bounding the field (deg J2000)
decVert : ndarray of np.float32
Dec positions of vertices of hexagon bounding the field (deg J2000)
ntarget : int or np.int32
number of targets
target_array : ndarray
ndarray with target info, exact format varies
target_ra : ndarray of np.float64
RA of targets, J2000 deg
target_dec : ndarray of np.float64
Dec of targets, J2000 deg
target_x : ndarray of np.float64
x positions of targets, mm
target_y : ndarray of np.float64
y positions of targets, mm
target_within : ndarray of np.int32
1 if target is within the robot hexagon, 0 otherwise
target_priority : ndarray of np.int32
priorities of targets (lower is considered first)
target_program : ndarray of strings
program of targets
target_category : ndarray of strings
category of targets ('APOGEE_SKY', 'APOGEE_STANDARD',
'BOSS_SKY', 'BOSS_STANDARD', 'SCIENCE')
target_catalogid : ndarray of np.int64
unique catalogid for each target
target_assigned : ndarray of np.int32
(ntarget) array of 0 or 1, indicating whether target is assigned
target_assignments : ndarray of np.int32
(ntarget) array of positionerid for each target
assignment : ndarray of np.int32
(npositioner) array of catalogid for each positioner
"""
def __init__(self, racen=None, deccen=None, pa=0.,
observatory='apo'):
self.stepSize = 1 # for kaiju
self.collisionBuffer = 2.0 # for kaiju
self.robotgrid = self._robotGrid()
self.robotID2indx = dict()
self.indx2RobotID = dict()
for i, k in enumerate(self.robotgrid.robotDict):
self.robotID2indx[k] = i
self.indx2RobotID[i] = k
self.racen = racen
self.deccen = deccen
self.pa = pa # assume deg E of N
self.observatory = observatory
if((self.racen != None) &
(self.deccen != None)):
self.set_vertices()
self.assignments = None
self.target_assigned = None
self.target_assignments = None
self.target_incadence = None
self.nsky_apogee = 20
self.nstandard_apogee = 20
self.nsky_boss = 50
self.nstandard_boss = 20
return
def _arrayify(self, quantity=None, dtype=np.float64):
"""Cast quantity as ndarray of numpy.float64"""
try:
length = len(quantity)
except TypeError:
length = 1
return np.zeros(length, dtype=dtype) + quantity
def _robotGrid(self):
"""Return a RobotGrid instance"""
rg = kaiju.robotGrid.RobotGridFilledHex(collisionBuffer=self.collisionBuffer)
for k in rg.robotDict.keys():
rg.robotDict[k].setAlphaBeta(0., 180.)
return(rg)
def set_vertices(self):
"""Set vertices bounding the field"""
maxReach = self.robotgrid.robotDict[1].getMaxReach()
xPos = np.array([self.robotgrid.robotDict[r].xPos
for r in self.robotgrid.robotDict])
yPos = np.array([self.robotgrid.robotDict[r].yPos
for r in self.robotgrid.robotDict])
rPos = np.sqrt(xPos**2 + yPos**2)
ivert = np.argsort(rPos)[-6:]
xVert = np.zeros(6, dtype=np.float64)
yVert = np.zeros(6, dtype=np.float64)
for i, cvert in enumerate(ivert):
rOut = rPos[cvert] + maxReach
xVert[i] = xPos[cvert] * rOut / rPos[cvert]
yVert[i] = yPos[cvert] * rOut / rPos[cvert]
thVert = np.arctan2(yVert, xVert)
isort = np.argsort(thVert)
self.xVert = np.zeros(7, dtype=np.float64)
self.yVert = np.zeros(7, dtype=np.float64)
self.xVert[0:6] = xVert[isort]
self.yVert[0:6] = yVert[isort]
self.xVert[6] = self.xVert[0]
self.yVert[6] = self.yVert[0]
self.raVert, self.decVert = self.xy2radec(self.xVert, self.yVert)
return
def set_assignments(self):
"""Convert robotgrid assignments to array
Notes:
------
Sets attributes assignments and target_assignments
"""
self.assignments = np.zeros(len(self.robotgrid.robotDict),
dtype=np.int32) - 1
self.target_assignments = np.zeros(self.ntarget,
dtype=np.int32) - 1
for robotID in self.robotgrid.robotDict:
irobot = self.robotID2indx[robotID]
if(self.robotgrid.robotDict[robotID].isAssigned()):
catalogid = self.robotgrid.robotDict[robotID].assignedTargetID
self.assignments[irobot] = catalogid
tindx = self.catalogid2indx[catalogid]
self.target_assignments[tindx] = robotID
return
def radec2xy(self, ra=None, dec=None):
# Yikes!
if(self.observatory == 'apo'):
scale = 218.
if(self.observatory == 'lco'):
scale = 329.
# From <NAME>. 17
deccen_rad = self.deccen * np.pi / 180.
racen_rad = self.racen * np.pi / 180.
dec_rad = dec * np.pi / 180.
ra_rad = ra * np.pi / 180.
x = (np.cos(deccen_rad) * np.sin(dec_rad) -
np.sin(deccen_rad) * np.cos(dec_rad) *
np.cos(ra_rad - racen_rad))
y = np.cos(dec_rad) * np.sin(ra_rad - racen_rad)
z = (np.sin(deccen_rad) * np.sin(dec_rad) +
np.cos(deccen_rad) * np.cos(dec_rad) *
np.cos(ra_rad - racen_rad))
d_rad = np.arctan2(np.sqrt(x**2 + y**2), z)
pay = np.sin(ra_rad - racen_rad)
pax = (np.cos(deccen_rad) * np.tan(dec_rad) -
np.sin(deccen_rad) * np.cos(ra_rad - racen_rad))
pa_rad = np.arctan2(pay, pax) # I think E of N?
pa_rad = pa_rad - self.pa * np.pi / 180.
x = d_rad * 180. / np.pi * scale * np.sin(pa_rad)
y = d_rad * 180. / np.pi * scale * np.cos(pa_rad)
return(x, y)
def _min_xy_diff(self, radec, xt, yt):
x, y = self.radec2xy(ra=radec[0], dec=radec[1])
resid2 = (x - xt)**2 + (y - yt)**2
return(resid2)
def xy2radec(self, x=None, y=None):
# This doesn't handle poles well
# Yikes!
if(self.observatory == 'apo'):
scale = 218.
if(self.observatory == 'lco'):
scale = 329.
xa = self._arrayify(x, dtype=np.float64)
ya = self._arrayify(y, dtype=np.float64)
rast = self.racen - xa / scale / np.cos(self.deccen * np.pi / 180.)
decst = self.deccen + ya / scale
ra = np.zeros(len(xa), dtype=np.float64)
dec = np.zeros(len(xa), dtype=np.float64)
for i in np.arange(len(xa)):
res = optimize.minimize(self._min_xy_diff, [rast[i], decst[i]],
(xa[i], ya[i]))
ra[i] = res.x[0]
dec[i] = res.x[1]
return(ra, dec)
def _targets_fromarray_robotgrid(self):
# Add all targets to robot grid.
for itarget in np.arange(self.ntarget, dtype=np.int32):
if(self.target_apogee[itarget]):
fiberType = kaiju.ApogeeFiber
else:
fiberType = kaiju.BossFiber
self.robotgrid.addTarget(targetID=self.target_catalogid[itarget],
x=self.target_x[itarget],
y=self.target_y[itarget],
priority=self.target_priority[itarget],
fiberType=fiberType)
return
def _targets_fromarray_within(self):
self.target_within = np.zeros(self.ntarget, dtype=np.bool)
for tid, t in self.robotgrid.targetDict.items():
itarget = self.catalogid2indx[tid]
self.target_within[itarget] = len(t.validRobotIDs) > 0
return
def targets_fromarray(self, target_array=None):
"""Read targets from an ndarray
Parameters:
----------
target_array : ndarray
ndarray with columns below
Notes:
------
Required columns of array:
'ra', 'dec' should be np.float64
'catalogid' should be np.int64
'category' should be str or bytes
'fiberType' should be 'APOGEE' or 'BOSS'
Optional columns of array:
'priority'
'category'
'program'
"""
# Copy over data from array
self.target_array = target_array
self.ntarget = len(self.target_array)
self.target_ra = self.target_array['ra']
if(type(self.target_ra[0]) != np.float64):
print("WARNING: TARGET_RA NOT 64-bit")
self.target_dec = self.target_array['dec']
if(type(self.target_dec[0]) != np.float64):
print("WARNING: TARGET_DEC NOT 64-bit")
self.target_catalogid = self.target_array['catalogid']
# Optional data
if('priority' in self.target_array.dtype.names):
self.target_priority = self.target_array['priority']
else:
self.target_priority = np.ones(self.ntarget, dtype=np.int32)
if('category' in self.target_array.dtype.names):
self.target_category = np.array(
[c.strip() for c in self.target_array['category']])
else:
self.target_category = np.array(['SCIENCE'] * self.ntarget)
if('program' in self.target_array.dtype.names):
self.target_program = np.array(
[c.strip() for c in self.target_array['program']])
else:
self.target_program = np.array(['PROGRAM'] * self.ntarget)
# Build dictionary for catalogid2indx
self.catalogid2indx = dict()
for itarget in np.arange(self.ntarget, dtype=np.int32):
self.catalogid2indx[self.target_catalogid[itarget]] = itarget
self.target_x, self.target_y = self.radec2xy(self.target_ra,
self.target_dec)
self.target_apogee = np.array(self.target_array['fiberType'] ==
'APOGEE')
self.target_boss = np.array(self.target_array['fiberType'] ==
'BOSS')
self._targets_fromarray_robotgrid()
self._targets_fromarray_within()
return
def targets_fromfits(self, filename=None):
"""Read targets from a FITS file
Parameters:
----------
filename : str
FITS file name, for file with columns listed below
Notes:
------
Required columns of array:
'ra', 'dec' should be np.float64
'catalogid' should be np.int64
'category' should be str or bytes
'fiberType' should be 'APOGEE' or 'BOSS'
Optional columns of array:
'priority'
'category'
'program'
"""
target_array = fitsio.read(filename)
self.targets_fromarray(target_array)
return
def fromfits(self, filename=None):
"""Read design from a FITS file
Parameters:
----------
filename : str
FITS file name, where HDU 2 has array of assignments
"""
hdr = fitsio.read_header(filename, ext=1)
self.racen = np.float64(hdr['RACEN'])
self.deccen = np.float64(hdr['DECCEN'])
self.pa = np.float32(hdr['PA'])
self.observatory = hdr['OBS']
self.targets_fromfits(filename)
f = fitsio.FITS(filename)
if(len(f) > 2):
self.assignments = fitsio.read(filename, ext=2)
self.set_target_assignments()
return
def targets_toarray(self):
"""Write targets to an ndarray
Returns:
-------
target_array : ndarray
Array of targets, with columns:
'ra', 'dec' (np.float64)
'catalogid' (np.int64)
'fiberType' (np.int32)
'within' (np.int32)
'priority' (np.int32)
'category' ('a30')
'program' ('a30')
"""
target_array = np.zeros(self.ntarget, dtype=_target_array_dtype)
target_array['ra'] = self.target_ra
target_array['dec'] = self.target_dec
target_array['catalogid'] = self.target_catalogid
target_array['category'] = self.target_category
target_array['program'] = self.target_program
target_array['fiberType'] = ['BOSS' if c else 'APOGEE'
for c in self.target_boss]
target_array['priority'] = self.target_priority
target_array['within'] = self.target_within
return(target_array)
def tofits(self, filename=None, clobber=True):
"""Write targets to a FITS file
Parameters:
----------
filename : str
file name to write to
clobber : boolean
if True overwrite file, otherwise add an extension
Notes:
-----
Writes header keywords:
RACEN
DECCEN
PA
Tables has columns:
'ra', 'dec' (np.float64)
'pa' (np.float32)
'cadence', 'type' (np.unicode_)
'priority' (np.int32)
'category' (np.unicode_)
'program' (np.unicode_)
'fiberType' (np.unicode_)
"""
hdr = dict()
hdr['RACEN'] = self.racen
hdr['DECCEN'] = self.deccen
hdr['PA'] = self.pa
hdr['OBS'] = self.observatory
tarray = self.targets_toarray()
fitsio.write(filename, tarray, header=hdr, clobber=clobber)
if(self.assignments is not None):
fitsio.write(filename, self.assignments, clobber=False)
return
def plot_robot(self, robot, color=None):
xr = robot.xPos
yr = robot.yPos
xa = xr + alphaLen * np.cos(robot.alpha /
180. * np.pi)
ya = yr + alphaLen * np.sin(robot.alpha /
180. * np.pi)
xb = xa + betaLen * np.cos((robot.alpha + robot.beta) /
180. * np.pi)
yb = ya + betaLen * np.sin((robot.alpha + robot.beta) /
180. * np.pi)
plt.plot(np.array([xr, xa]), np.array([yr, ya]),
color=color, alpha=0.5)
plt.plot(np.array([xa, xb]), np.array([ya, yb]),
color=color, linewidth=3)
def plot(self, robotID=False, catalogid=False):
"""Plot assignments of robots to targets for field """
target_programs = np.sort(np.unique(self.target_program))
colors = ['black', 'green', 'blue', 'cyan', 'purple', 'red',
'magenta', 'grey']
if(self.assignments is not None):
target_got = np.zeros(self.ntarget, dtype=np.int32)
target_robotid = np.zeros(self.ntarget, dtype=np.int32)
iassigned = np.where(self.assignments >= 0)[0]
itarget = np.array([self.catalogid2indx[x] for x in
self.assignments[iassigned]])
target_got[itarget] = 1
target_robotid[itarget] = self.target_assignments[itarget]
for indx in np.arange(len(target_programs)):
itarget = np.where((target_got > 0) &
(self.target_program ==
target_programs[indx]))[0]
plt.scatter(self.target_x[itarget],
self.target_y[itarget], s=4)
icolor = indx % len(colors)
for i in itarget:
robot = self.robotgrid.robotDict[target_robotid[i]]
self.plot_robot(robot, color=colors[icolor])
for indx in np.arange(len(target_programs)):
itarget = np.where(self.target_program ==
target_programs[indx])[0]
icolor = indx % len(colors)
plt.scatter(self.target_x[itarget],
self.target_y[itarget], s=2, color=colors[icolor])
xcen = np.array([self.robotgrid.robotDict[r].xPos
for r in self.robotgrid.robotDict],
dtype=np.float32)
ycen = np.array([self.robotgrid.robotDict[r].yPos
for r in self.robotgrid.robotDict],
dtype=np.float32)
robotid = np.array([str(r)
for r in self.robotgrid.robotDict])
plt.scatter(xcen, ycen, s=6, color='grey', label='Used robot')
if(robotID):
for cx, cy, cr in zip(xcen, ycen, robotid):
plt.text(cx, cy, cr, color='grey', fontsize=8,
clip_on=True)
if(catalogid):
for cx, cy, ct in zip(self.target_x, self.target_y,
self.target_catalogid):
plt.text(cx, cy, ct, fontsize=8, clip_on=True)
used = (self.assignments >= 0)
inot = np.where(used == False)[0]
plt.scatter(xcen[inot], ycen[inot], s=20, color='grey',
label='Unused robot')
for i in robotid[inot]:
self.plot_robot(self.robotgrid.robotDict[int(i)],
color='grey')
plt.xlim([-370., 370.])
plt.ylim([-370., 370.])
plt.legend()
class DesignGreedy(DesignBase):
def __init__(self, racen=None, deccen=None, pa=0.,
observatory='apo'):
super().__init__(racen=racen, deccen=deccen, pa=pa,
observatory=observatory)
return
def assign(self):
# Sort by priority, and randomly for ties
iorder = np.arange(self.ntarget, dtype=np.int32)
np.random.shuffle(iorder)
isort = iorder[np.argsort(self.target_priority[iorder])]
count = 0
for i in isort:
catalogid = self.target_catalogid[i]
t = self.robotgrid.targetDict[catalogid]
for robotID in t.validRobotIDs:
if(self.robotgrid.robotDict[robotID].isAssigned() == False):
self.robotgrid.assignRobot2Target(robotID, catalogid)
if(self.robotgrid.isCollidedWithAssigned(robotID) == True):
self.robotgrid.decollideRobot(robotID)
else:
count = count + 1
for robotID in self.robotgrid.robotDict:
if(self.robotgrid.isCollidedWithAssigned(robotID) == True):
if(self.robotgrid.robotDict[robotID].isAssigned()):
print("INCONSISTENCY.")
self.robotgrid.decollideRobot(robotID)
self.set_assignments()
class DesignOptimize(DesignBase):
def __init__(self, racen=None, deccen=None, pa=0.,
observatory='apo'):
super().__init__(racen=racen, deccen=deccen, pa=pa,
observatory=observatory)
return
def assign(self, check_collisions=True):
"""Assigns using CP-SAT to optimize number of targets
Parameters
----------
check_collisions : boolean
whether to add collision constraints (default True)
Notes
-----
Assigns the robots in the robotGrid object attribute "robotgrid"
Sets ndarray attributes "assignments" and "target_assignments"
"""
rg = self.robotgrid
# Initialize Model
model = cp_model.CpModel()
# Add variables; one for each robot-target pair
# Make a dictionary to organize them as wwrt[robotID][catalogid],
# and one to organize them as wwtr[catalogid][robotID], and
# also a flattened list
wwrt = dict()
wwtr = dict()
for robotID in rg.robotDict:
r = rg.robotDict[robotID]
for catalogid in r.validTargetIDs:
name = 'ww[{r}][{c}]'.format(r=robotID, c=catalogid)
if(catalogid not in wwtr):
wwtr[catalogid] = dict()
if(robotID not in wwrt):
wwrt[robotID] = dict()
wwrt[robotID][catalogid] = model.NewBoolVar(name)
wwtr[catalogid][robotID] = wwrt[robotID][catalogid]
ww_list = [wwrt[y][x] for y in wwrt for x in wwrt[y]]
# Constrain only one target per robot
wwsum_robot = dict()
for robotID in wwrt:
rlist = [wwrt[robotID][c] for c in wwrt[robotID]]
wwsum_robot[robotID] = cp_model.LinearExpr.Sum(rlist)
model.Add(wwsum_robot[robotID] <= 1)
# Constrain only one robot per target
wwsum_target = dict()
for catalogid in wwtr:
tlist = [wwtr[catalogid][r] for r in wwtr[catalogid]]
wwsum_target[catalogid] = cp_model.LinearExpr.Sum(tlist)
model.Add(wwsum_target[catalogid] <= 1)
# Do not allow collisions
if(check_collisions):
# Find potention collisions
collisions = []
for robotID1 in rg.robotDict:
r1 = rg.robotDict[robotID1]
for catalogid1 in r1.validTargetIDs:
rg.assignRobot2Target(robotID1, catalogid1)
for robotID2 in r1.robotNeighbors:
r2 = rg.robotDict[robotID2]
for catalogid2 in r2.validTargetIDs:
if(catalogid1 != catalogid2):
rg.assignRobot2Target(robotID2, catalogid2)
if(rg.isCollidedWithAssigned(robotID1)):
collisions.append((robotID1,
catalogid1,
robotID2,
catalogid2))
rg.homeRobot(robotID2)
rg.homeRobot(robotID1)
# Now add constraint that collisions can't occur
for robotID1, catalogid1, robotID2, catalogid2 in collisions:
ww1 = wwrt[robotID1][catalogid1]
ww2 = wwrt[robotID2][catalogid2]
tmp_collision = cp_model.LinearExpr.Sum([ww1, ww2])
model.Add(tmp_collision <= 1)
# Maximize the total sum
wwsum_all = cp_model.LinearExpr.Sum(ww_list)
model.Maximize(wwsum_all)
model.AddDecisionStrategy(ww_list,
cp_model.CHOOSE_FIRST,
cp_model.SELECT_MAX_VALUE)
solver = cp_model.CpSolver()
solver.parameters.num_search_workers = 16
status = solver.Solve(model)
if status == cp_model.OPTIMAL:
print('Count: {ov}'.format(ov=solver.ObjectiveValue()))
for robotID in wwrt:
for catalogid in wwrt[robotID]:
assigned = solver.Value(wwrt[robotID][catalogid])
if(assigned):
rg.assignRobot2Target(robotID, catalogid)
if(rg.isCollidedWithAssigned(robotID)):
print("Unexpected collision occurred:")
print(" r1, c1: {r} {c}".format(r=robotID,
c=catalogid))
rids = rg.robotColliders(robotID)
for rid in rids:
cid = rg.robotDict[rid].assignedTargetID
print(" ro, co: {r} {c}".format(r=rid,
c=cid))
for robotID in rg.robotDict:
if(rg.robotDict[robotID].isAssigned() is False):
if(rg.isCollided(robotID)):
rg.decollideRobot(robotID)
self.set_assignments()
class DesignOptimalFast(DesignBase):
"""Test class. Not actually faster."""
def __init__(self, racen=None, deccen=None, pa=0.,
observatory='apo'):
super().__init__(racen=racen, deccen=deccen, pa=pa,
observatory=observatory)
return
def assign(self):
# Initialize Model
model = cp_model.CpModel()
# Add variables; one for each robot; store as a
# dictionary but also as a list.
ww = dict()
assigned = dict()
for robotID in self.robotgrid.robotDict:
r = self.robotgrid.robotDict[robotID]
tlist = [int(x) for x in r.validTargetIDs] # allowed targets
tlist = [int(- robotID - 1)] + tlist # unassigned -> robotID (unique)
dom = cp_model.Domain.FromValues(tlist)
name = 'ww[{r}]'.format(r=robotID)
ww[robotID] = model.NewIntVarFromDomain(dom, name)
name = 'assigned[{r}]'.format(r=robotID)
assigned[robotID] = model.NewBoolVar(name)
model.Add(ww[robotID] >= 0).OnlyEnforceIf(assigned[robotID])
model.Add(ww[robotID] < 0).OnlyEnforceIf(assigned[robotID].Not())
ww_list = [ww[robotID] for robotID in ww]
assigned_list = [assigned[robotID] for robotID in assigned]
# Constrain only one robot per target (i.e. you can't set two
# robots to the same target). Every robot has the option
# of not being assigned (i.e. of "- robotID - 1", which is unique)
model.AddAllDifferent(ww_list)
# Maximize the total sum
assigned_sum = cp_model.LinearExpr.Sum(assigned_list)
model.Maximize(assigned_sum)
# Search for values in decreasing order.
model.AddDecisionStrategy(assigned_list,
cp_model.CHOOSE_FIRST,
cp_model.SELECT_MAX_VALUE)
solver = cp_model.CpSolver()
status = solver.Solve(model)
print(solver.StatusName(status=status))
if status == cp_model.OPTIMAL:
print('Count: {ov}'.format(ov=solver.ObjectiveValue()))
| [
"numpy.sqrt",
"numpy.argsort",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"numpy.arange",
"kaiju.robotGrid.RobotGridFilledHex",
"numpy.where",
"numpy.float64",
"fitsio.read",
"fitsio.FITS",
"fitsio.read_header",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.ylim",
"numpy.dtype",
"o... | [((504, 733), 'numpy.dtype', 'np.dtype', (["[('ra', np.float64), ('dec', np.float64), ('catalogid', np.int64), (\n 'category', np.unicode_, 30), ('program', np.unicode_, 30), (\n 'fiberType', np.unicode_, 30), ('priority', np.int32), ('within', np.int32)\n ]"], {}), "([('ra', np.float64), ('dec', np.float64), ('catalogid', np.int64),\n ('category', np.unicode_, 30), ('program', np.unicode_, 30), (\n 'fiberType', np.unicode_, 30), ('priority', np.int32), ('within', np.\n int32)])\n", (512, 733), True, 'import numpy as np\n'), ((4673, 4745), 'kaiju.robotGrid.RobotGridFilledHex', 'kaiju.robotGrid.RobotGridFilledHex', ([], {'collisionBuffer': 'self.collisionBuffer'}), '(collisionBuffer=self.collisionBuffer)\n', (4707, 4745), False, 'import kaiju\n'), ((5005, 5083), 'numpy.array', 'np.array', (['[self.robotgrid.robotDict[r].xPos for r in self.robotgrid.robotDict]'], {}), '([self.robotgrid.robotDict[r].xPos for r in self.robotgrid.robotDict])\n', (5013, 5083), True, 'import numpy as np\n'), ((5124, 5202), 'numpy.array', 'np.array', (['[self.robotgrid.robotDict[r].yPos for r in self.robotgrid.robotDict]'], {}), '([self.robotgrid.robotDict[r].yPos for r in self.robotgrid.robotDict])\n', (5132, 5202), True, 'import numpy as np\n'), ((5243, 5273), 'numpy.sqrt', 'np.sqrt', (['(xPos ** 2 + yPos ** 2)'], {}), '(xPos ** 2 + yPos ** 2)\n', (5250, 5273), True, 'import numpy as np\n'), ((5324, 5353), 'numpy.zeros', 'np.zeros', (['(6)'], {'dtype': 'np.float64'}), '(6, dtype=np.float64)\n', (5332, 5353), True, 'import numpy as np\n'), ((5370, 5399), 'numpy.zeros', 'np.zeros', (['(6)'], {'dtype': 'np.float64'}), '(6, dtype=np.float64)\n', (5378, 5399), True, 'import numpy as np\n'), ((5613, 5637), 'numpy.arctan2', 'np.arctan2', (['yVert', 'xVert'], {}), '(yVert, xVert)\n', (5623, 5637), True, 'import numpy as np\n'), ((5654, 5672), 'numpy.argsort', 'np.argsort', (['thVert'], {}), '(thVert)\n', (5664, 5672), True, 'import numpy as np\n'), ((5694, 5723), 'numpy.zeros', 'np.zeros', (['(7)'], {'dtype': 'np.float64'}), '(7, dtype=np.float64)\n', (5702, 5723), True, 'import numpy as np\n'), ((5745, 5774), 'numpy.zeros', 'np.zeros', (['(7)'], {'dtype': 'np.float64'}), '(7, dtype=np.float64)\n', (5753, 5774), True, 'import numpy as np\n'), ((7655, 7681), 'numpy.sin', 'np.sin', (['(ra_rad - racen_rad)'], {}), '(ra_rad - racen_rad)\n', (7661, 7681), True, 'import numpy as np\n'), ((7817, 7837), 'numpy.arctan2', 'np.arctan2', (['pay', 'pax'], {}), '(pay, pax)\n', (7827, 7837), True, 'import numpy as np\n'), ((9110, 9149), 'numpy.arange', 'np.arange', (['self.ntarget'], {'dtype': 'np.int32'}), '(self.ntarget, dtype=np.int32)\n', (9119, 9149), True, 'import numpy as np\n'), ((9729, 9766), 'numpy.zeros', 'np.zeros', (['self.ntarget'], {'dtype': 'np.bool'}), '(self.ntarget, dtype=np.bool)\n', (9737, 9766), True, 'import numpy as np\n'), ((11840, 11879), 'numpy.arange', 'np.arange', (['self.ntarget'], {'dtype': 'np.int32'}), '(self.ntarget, dtype=np.int32)\n', (11849, 11879), True, 'import numpy as np\n'), ((12125, 12177), 'numpy.array', 'np.array', (["(self.target_array['fiberType'] == 'APOGEE')"], {}), "(self.target_array['fiberType'] == 'APOGEE')\n", (12133, 12177), True, 'import numpy as np\n'), ((12243, 12293), 'numpy.array', 'np.array', (["(self.target_array['fiberType'] == 'BOSS')"], {}), "(self.target_array['fiberType'] == 'BOSS')\n", (12251, 12293), True, 'import numpy as np\n'), ((13012, 13033), 'fitsio.read', 'fitsio.read', (['filename'], {}), '(filename)\n', (13023, 13033), False, 'import fitsio\n'), ((13321, 13356), 'fitsio.read_header', 'fitsio.read_header', (['filename'], {'ext': '(1)'}), '(filename, ext=1)\n', (13339, 13356), False, 'import fitsio\n'), ((13378, 13402), 'numpy.float64', 'np.float64', (["hdr['RACEN']"], {}), "(hdr['RACEN'])\n", (13388, 13402), True, 'import numpy as np\n'), ((13425, 13450), 'numpy.float64', 'np.float64', (["hdr['DECCEN']"], {}), "(hdr['DECCEN'])\n", (13435, 13450), True, 'import numpy as np\n'), ((13469, 13490), 'numpy.float32', 'np.float32', (["hdr['PA']"], {}), "(hdr['PA'])\n", (13479, 13490), True, 'import numpy as np\n'), ((13581, 13602), 'fitsio.FITS', 'fitsio.FITS', (['filename'], {}), '(filename)\n', (13592, 13602), False, 'import fitsio\n'), ((14200, 14249), 'numpy.zeros', 'np.zeros', (['self.ntarget'], {'dtype': '_target_array_dtype'}), '(self.ntarget, dtype=_target_array_dtype)\n', (14208, 14249), True, 'import numpy as np\n'), ((15663, 15722), 'fitsio.write', 'fitsio.write', (['filename', 'tarray'], {'header': 'hdr', 'clobber': 'clobber'}), '(filename, tarray, header=hdr, clobber=clobber)\n', (15675, 15722), False, 'import fitsio\n'), ((18225, 18326), 'numpy.array', 'np.array', (['[self.robotgrid.robotDict[r].xPos for r in self.robotgrid.robotDict]'], {'dtype': 'np.float32'}), '([self.robotgrid.robotDict[r].xPos for r in self.robotgrid.\n robotDict], dtype=np.float32)\n', (18233, 18326), True, 'import numpy as np\n'), ((18386, 18487), 'numpy.array', 'np.array', (['[self.robotgrid.robotDict[r].yPos for r in self.robotgrid.robotDict]'], {'dtype': 'np.float32'}), '([self.robotgrid.robotDict[r].yPos for r in self.robotgrid.\n robotDict], dtype=np.float32)\n', (18394, 18487), True, 'import numpy as np\n'), ((18639, 18701), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xcen', 'ycen'], {'s': '(6)', 'color': '"""grey"""', 'label': '"""Used robot"""'}), "(xcen, ycen, s=6, color='grey', label='Used robot')\n", (18650, 18701), True, 'import matplotlib.pyplot as plt\n'), ((19182, 19259), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xcen[inot]', 'ycen[inot]'], {'s': '(20)', 'color': '"""grey"""', 'label': '"""Unused robot"""'}), "(xcen[inot], ycen[inot], s=20, color='grey', label='Unused robot')\n", (19193, 19259), True, 'import matplotlib.pyplot as plt\n'), ((19425, 19450), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-370.0, 370.0]'], {}), '([-370.0, 370.0])\n', (19433, 19450), True, 'import matplotlib.pyplot as plt\n'), ((19457, 19482), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-370.0, 370.0]'], {}), '([-370.0, 370.0])\n', (19465, 19482), True, 'import matplotlib.pyplot as plt\n'), ((19489, 19501), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19499, 19501), True, 'import matplotlib.pyplot as plt\n'), ((19844, 19883), 'numpy.arange', 'np.arange', (['self.ntarget'], {'dtype': 'np.int32'}), '(self.ntarget, dtype=np.int32)\n', (19853, 19883), True, 'import numpy as np\n'), ((19892, 19917), 'numpy.random.shuffle', 'np.random.shuffle', (['iorder'], {}), '(iorder)\n', (19909, 19917), True, 'import numpy as np\n'), ((21606, 21624), 'ortools.sat.python.cp_model.CpModel', 'cp_model.CpModel', ([], {}), '()\n', (21622, 21624), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((24513, 24545), 'ortools.sat.python.cp_model.LinearExpr.Sum', 'cp_model.LinearExpr.Sum', (['ww_list'], {}), '(ww_list)\n', (24536, 24545), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((24760, 24779), 'ortools.sat.python.cp_model.CpSolver', 'cp_model.CpSolver', ([], {}), '()\n', (24777, 24779), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((26431, 26449), 'ortools.sat.python.cp_model.CpModel', 'cp_model.CpModel', ([], {}), '()\n', (26447, 26449), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((27697, 27735), 'ortools.sat.python.cp_model.LinearExpr.Sum', 'cp_model.LinearExpr.Sum', (['assigned_list'], {}), '(assigned_list)\n', (27720, 27735), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((28008, 28027), 'ortools.sat.python.cp_model.CpSolver', 'cp_model.CpSolver', ([], {}), '()\n', (28025, 28027), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((4550, 4579), 'numpy.zeros', 'np.zeros', (['length'], {'dtype': 'dtype'}), '(length, dtype=dtype)\n', (4558, 4579), True, 'import numpy as np\n'), ((5286, 5302), 'numpy.argsort', 'np.argsort', (['rPos'], {}), '(rPos)\n', (5296, 5302), True, 'import numpy as np\n'), ((6352, 6390), 'numpy.zeros', 'np.zeros', (['self.ntarget'], {'dtype': 'np.int32'}), '(self.ntarget, dtype=np.int32)\n', (6360, 6390), True, 'import numpy as np\n'), ((7398, 7413), 'numpy.cos', 'np.cos', (['dec_rad'], {}), '(dec_rad)\n', (7404, 7413), True, 'import numpy as np\n'), ((7416, 7442), 'numpy.sin', 'np.sin', (['(ra_rad - racen_rad)'], {}), '(ra_rad - racen_rad)\n', (7422, 7442), True, 'import numpy as np\n'), ((7615, 7639), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (7622, 7639), True, 'import numpy as np\n'), ((7951, 7965), 'numpy.sin', 'np.sin', (['pa_rad'], {}), '(pa_rad)\n', (7957, 7965), True, 'import numpy as np\n'), ((8009, 8023), 'numpy.cos', 'np.cos', (['pa_rad'], {}), '(pa_rad)\n', (8015, 8023), True, 'import numpy as np\n'), ((8808, 8881), 'scipy.optimize.minimize', 'optimize.minimize', (['self._min_xy_diff', '[rast[i], decst[i]]', '(xa[i], ya[i])'], {}), '(self._min_xy_diff, [rast[i], decst[i]], (xa[i], ya[i]))\n', (8825, 8881), True, 'import scipy.optimize as optimize\n'), ((11187, 11224), 'numpy.ones', 'np.ones', (['self.ntarget'], {'dtype': 'np.int32'}), '(self.ntarget, dtype=np.int32)\n', (11194, 11224), True, 'import numpy as np\n'), ((11444, 11480), 'numpy.array', 'np.array', (["(['SCIENCE'] * self.ntarget)"], {}), "(['SCIENCE'] * self.ntarget)\n", (11452, 11480), True, 'import numpy as np\n'), ((11696, 11732), 'numpy.array', 'np.array', (["(['PROGRAM'] * self.ntarget)"], {}), "(['PROGRAM'] * self.ntarget)\n", (11704, 11732), True, 'import numpy as np\n'), ((13658, 13686), 'fitsio.read', 'fitsio.read', (['filename'], {'ext': '(2)'}), '(filename, ext=2)\n', (13669, 13686), False, 'import fitsio\n'), ((15777, 15832), 'fitsio.write', 'fitsio.write', (['filename', 'self.assignments'], {'clobber': '(False)'}), '(filename, self.assignments, clobber=False)\n', (15789, 15832), False, 'import fitsio\n'), ((16385, 16403), 'numpy.array', 'np.array', (['[xr, xa]'], {}), '([xr, xa])\n', (16393, 16403), True, 'import numpy as np\n'), ((16405, 16423), 'numpy.array', 'np.array', (['[yr, ya]'], {}), '([yr, ya])\n', (16413, 16423), True, 'import numpy as np\n'), ((16483, 16501), 'numpy.array', 'np.array', (['[xa, xb]'], {}), '([xa, xb])\n', (16491, 16501), True, 'import numpy as np\n'), ((16503, 16521), 'numpy.array', 'np.array', (['[ya, yb]'], {}), '([ya, yb])\n', (16511, 16521), True, 'import numpy as np\n'), ((16716, 16746), 'numpy.unique', 'np.unique', (['self.target_program'], {}), '(self.target_program)\n', (16725, 16746), True, 'import numpy as np\n'), ((16923, 16961), 'numpy.zeros', 'np.zeros', (['self.ntarget'], {'dtype': 'np.int32'}), '(self.ntarget, dtype=np.int32)\n', (16931, 16961), True, 'import numpy as np\n'), ((16991, 17029), 'numpy.zeros', 'np.zeros', (['self.ntarget'], {'dtype': 'np.int32'}), '(self.ntarget, dtype=np.int32)\n', (16999, 17029), True, 'import numpy as np\n'), ((17111, 17182), 'numpy.array', 'np.array', (['[self.catalogid2indx[x] for x in self.assignments[iassigned]]'], {}), '([self.catalogid2indx[x] for x in self.assignments[iassigned]])\n', (17119, 17182), True, 'import numpy as np\n'), ((18098, 18189), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.target_x[itarget]', 'self.target_y[itarget]'], {'s': '(2)', 'color': 'colors[icolor]'}), '(self.target_x[itarget], self.target_y[itarget], s=2, color=\n colors[icolor])\n', (18109, 18189), True, 'import matplotlib.pyplot as plt\n'), ((19147, 19170), 'numpy.where', 'np.where', (['(used == False)'], {}), '(used == False)\n', (19155, 19170), True, 'import numpy as np\n'), ((19941, 19981), 'numpy.argsort', 'np.argsort', (['self.target_priority[iorder]'], {}), '(self.target_priority[iorder])\n', (19951, 19981), True, 'import numpy as np\n'), ((22661, 22691), 'ortools.sat.python.cp_model.LinearExpr.Sum', 'cp_model.LinearExpr.Sum', (['rlist'], {}), '(rlist)\n', (22684, 22691), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((22953, 22983), 'ortools.sat.python.cp_model.LinearExpr.Sum', 'cp_model.LinearExpr.Sum', (['tlist'], {}), '(tlist)\n', (22976, 22983), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((26868, 26901), 'ortools.sat.python.cp_model.Domain.FromValues', 'cp_model.Domain.FromValues', (['tlist'], {}), '(tlist)\n', (26894, 26901), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((7254, 7272), 'numpy.cos', 'np.cos', (['deccen_rad'], {}), '(deccen_rad)\n', (7260, 7272), True, 'import numpy as np\n'), ((7275, 7290), 'numpy.sin', 'np.sin', (['dec_rad'], {}), '(dec_rad)\n', (7281, 7290), True, 'import numpy as np\n'), ((7358, 7384), 'numpy.cos', 'np.cos', (['(ra_rad - racen_rad)'], {}), '(ra_rad - racen_rad)\n', (7364, 7384), True, 'import numpy as np\n'), ((7456, 7474), 'numpy.sin', 'np.sin', (['deccen_rad'], {}), '(deccen_rad)\n', (7462, 7474), True, 'import numpy as np\n'), ((7477, 7492), 'numpy.sin', 'np.sin', (['dec_rad'], {}), '(dec_rad)\n', (7483, 7492), True, 'import numpy as np\n'), ((7560, 7586), 'numpy.cos', 'np.cos', (['(ra_rad - racen_rad)'], {}), '(ra_rad - racen_rad)\n', (7566, 7586), True, 'import numpy as np\n'), ((7697, 7715), 'numpy.cos', 'np.cos', (['deccen_rad'], {}), '(deccen_rad)\n', (7703, 7715), True, 'import numpy as np\n'), ((7718, 7733), 'numpy.tan', 'np.tan', (['dec_rad'], {}), '(dec_rad)\n', (7724, 7733), True, 'import numpy as np\n'), ((7751, 7769), 'numpy.sin', 'np.sin', (['deccen_rad'], {}), '(deccen_rad)\n', (7757, 7769), True, 'import numpy as np\n'), ((7772, 7798), 'numpy.cos', 'np.cos', (['(ra_rad - racen_rad)'], {}), '(ra_rad - racen_rad)\n', (7778, 7798), True, 'import numpy as np\n'), ((8578, 8613), 'numpy.cos', 'np.cos', (['(self.deccen * np.pi / 180.0)'], {}), '(self.deccen * np.pi / 180.0)\n', (8584, 8613), True, 'import numpy as np\n'), ((15971, 16006), 'numpy.cos', 'np.cos', (['(robot.alpha / 180.0 * np.pi)'], {}), '(robot.alpha / 180.0 * np.pi)\n', (15977, 16006), True, 'import numpy as np\n'), ((16071, 16106), 'numpy.sin', 'np.sin', (['(robot.alpha / 180.0 * np.pi)'], {}), '(robot.alpha / 180.0 * np.pi)\n', (16077, 16106), True, 'import numpy as np\n'), ((16170, 16220), 'numpy.cos', 'np.cos', (['((robot.alpha + robot.beta) / 180.0 * np.pi)'], {}), '((robot.alpha + robot.beta) / 180.0 * np.pi)\n', (16176, 16220), True, 'import numpy as np\n'), ((16283, 16333), 'numpy.sin', 'np.sin', (['((robot.alpha + robot.beta) / 180.0 * np.pi)'], {}), '((robot.alpha + robot.beta) / 180.0 * np.pi)\n', (16289, 16333), True, 'import numpy as np\n'), ((17054, 17085), 'numpy.where', 'np.where', (['(self.assignments >= 0)'], {}), '(self.assignments >= 0)\n', (17062, 17085), True, 'import numpy as np\n'), ((17572, 17636), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.target_x[itarget]', 'self.target_y[itarget]'], {'s': '(4)'}), '(self.target_x[itarget], self.target_y[itarget], s=4)\n', (17583, 17636), True, 'import matplotlib.pyplot as plt\n'), ((17957, 18011), 'numpy.where', 'np.where', (['(self.target_program == target_programs[indx])'], {}), '(self.target_program == target_programs[indx])\n', (17965, 18011), True, 'import numpy as np\n'), ((18796, 18856), 'matplotlib.pyplot.text', 'plt.text', (['cx', 'cy', 'cr'], {'color': '"""grey"""', 'fontsize': '(8)', 'clip_on': '(True)'}), "(cx, cy, cr, color='grey', fontsize=8, clip_on=True)\n", (18804, 18856), True, 'import matplotlib.pyplot as plt\n'), ((19044, 19090), 'matplotlib.pyplot.text', 'plt.text', (['cx', 'cy', 'ct'], {'fontsize': '(8)', 'clip_on': '(True)'}), '(cx, cy, ct, fontsize=8, clip_on=True)\n', (19052, 19090), True, 'import matplotlib.pyplot as plt\n'), ((24377, 24412), 'ortools.sat.python.cp_model.LinearExpr.Sum', 'cp_model.LinearExpr.Sum', (['[ww1, ww2]'], {}), '([ww1, ww2])\n', (24400, 24412), True, 'import ortools.sat.python.cp_model as cp_model\n'), ((7306, 7324), 'numpy.sin', 'np.sin', (['deccen_rad'], {}), '(deccen_rad)\n', (7312, 7324), True, 'import numpy as np\n'), ((7327, 7342), 'numpy.cos', 'np.cos', (['dec_rad'], {}), '(dec_rad)\n', (7333, 7342), True, 'import numpy as np\n'), ((7508, 7526), 'numpy.cos', 'np.cos', (['deccen_rad'], {}), '(deccen_rad)\n', (7514, 7526), True, 'import numpy as np\n'), ((7529, 7544), 'numpy.cos', 'np.cos', (['dec_rad'], {}), '(dec_rad)\n', (7535, 7544), True, 'import numpy as np\n'), ((17405, 17480), 'numpy.where', 'np.where', (['((target_got > 0) & (self.target_program == target_programs[indx]))'], {}), '((target_got > 0) & (self.target_program == target_programs[indx]))\n', (17413, 17480), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
from pathlib import Path
from typing import Dict, List, Union
from collections import OrderedDict
from pathos.multiprocessing import ThreadPool as Pool
from tqdm import tqdm
from src.utils import remap_label, get_type_instances
from .metrics import PQ, AJI, AJI_plus, DICE2, split_and_merge
class Benchmarker:
def compute_metrics(
self,
true_pred: List[np.ndarray]
) -> Dict[str, float]:
"""
Computes metrics for one (inst_map, gt_mask) pair.
If GT does not contain any nuclear objects, returns None
Args:
-----------
true_pred (List[np.ndarray]):
Ground truth annotations in true_pred[1] and
corresponding predicted instance map in true_pred[2]
Returns:
-----------
A Dict[str, float] of the metrics
"""
name = true_pred[0]
true = true_pred[1]
pred = true_pred[2]
# Skip empty GTs
if len(np.unique(true)) > 1:
true = remap_label(true)
pred = remap_label(pred)
pq = PQ(true, pred)
aji = AJI(true, pred)
aji_p = AJI_plus(true, pred)
dice2 = DICE2(true, pred)
splits, merges = split_and_merge(true, pred)
result = {
"name":name,
"AJI": aji,
"AJI_plus": aji_p,
"DICE2": dice2,
"PQ": pq["pq"],
"SQ": pq["sq"],
"DQ": pq["dq"],
"inst_recall": pq["recall"],
"inst_precision": pq["precision"],
"splits": splits,
"merges": merges
}
return result
def benchmark_insts(
self,
inst_maps: Dict[str, np.ndarray],
gt_masks: Dict[str, np.ndarray],
pattern_list: List[str]=None,
save_dir: Union[str, Path]=None,
prefix: str=""
) -> pd.DataFrame:
"""
Run benchmarking metrics for instance maps for all of the files
in the dataset. Note that the inst_maps and gt_masks need to
share exact same keys and be sorted so that they align when
computing metrics.
Args:
-----------
inst_maps (OrderedDict[str, np.ndarray]):
A dict of file_name:inst_map key vals in order
gt_masks (OrderedDict[str, np.ndarray]):
A dict of file_name:gt_inst_map key vals in order
pattern_list (List[str], default=None):
A list of patterns contained in the gt_mask and inst_map
names. Averages for the masks containing these patterns
will be added to the result df.
save_dir (str or Path):
directory where to save the result .csv
prefix (str, default=""):
adds a prefix to the .csv file name
Returns:
----------
a pandas dataframe of the metrics. Samples are rows and
metrics are columns:
_____________________
|sample|PQ|SQ|DQ|AJI|
|img1 |.5|.4|.6|.6 |
|img2 |.5|.4|.6|.6 |
"""
assert isinstance(inst_maps, dict), (
f"inst_maps: {type(inst_maps)} is not a dict of inst_maps"
)
assert isinstance(gt_masks, dict), (
f"inst_maps: {type(gt_masks)} is not a dict of inst_maps"
)
# Sort by file name
inst_maps = OrderedDict(sorted(inst_maps.items()))
gt_masks = OrderedDict(sorted(gt_masks.items()))
assert inst_maps.keys() == gt_masks.keys(), (
"inst_maps have different names as gt masks. insts: ",
f"{inst_maps.keys()}. gt's: {gt_masks.keys()}"
)
masks = list(
zip(inst_maps.keys(), gt_masks.values(), inst_maps.values())
)
metrics = []
with Pool() as pool:
for x in tqdm(
pool.imap_unordered(self.compute_metrics, masks),
total=len(masks),
desc="Runnning metrics"
):
metrics.append(x)
# drop Nones if no nuclei are found in an image
metrics = [metric for metric in metrics if metric]
score_df = pd.DataFrame.from_records(metrics)
score_df = score_df.set_index("name").sort_index()
score_df.loc["averages_for_the_set"] = score_df.mean(axis=0)
# Add averages to the df of files which contain patterns
if pattern_list is not None:
pattern_avgs = {
f"{p}_avg": score_df[score_df.index.str.contains(f"{p}")].mean(axis=0)
for p in pattern_list
}
score_df = pd.concat(
[score_df, pd.DataFrame(pattern_avgs).transpose()]
)
# Save results to .csv
if save_dir is not None:
save_dir = Path(save_dir)
score_df.to_csv(Path(save_dir / f"{prefix}_inst_benchmark.csv"))
return score_df
def benchmark_per_type(
self,
inst_maps: Dict[str, np.ndarray],
type_maps: Dict[str, np.ndarray],
gt_mask_insts: Dict[str, np.ndarray],
gt_mask_types: Dict[str, np.ndarray],
classes: Dict[str, int],
pattern_list: List[str]=None,
save_dir: Union[str, Path]=None,
prefix: str=""
) -> pd.DataFrame:
"""
Run benchmarking metrics per class type for all of the files in
the dataset. Note that the inst_maps and gt_masks need to share
exact same keys and be sorted so that they align when computing
metrics.
Args:
-----------
inst_maps (Dict[str, np.ndarray]):
A dict of file_name:inst_map key vals in order
type_maps (Dict[str, np.ndarray]):
A dict of file_name:panoptic_map key vals in order
gt_masks_insts (Dict[str, np.ndarray]):
A dict of file_name:gt_inst_map key vals in order
gt_masks_types (Dict[str, np.ndarray]):
A dict of file_name:gt_panoptic_map key vals in order
classes (Dict[str, int]):
The class dict e.g. {bg: 0, immune: 1, epithel: 2}.
background must be 0 class
pattern_list (List[str], default=None):
A list of patterns contained in the gt_mask and inst_map
names. Averages for the masks containing these patterns
will be added to the result df.
save_dir (str or Path):
directory where to save the result .csv
prefix (str, default=""):
adds a prefix to the .csv file name
Returns:
-----------
a pandas dataframe of the metrics. Samples are rows and
metrics are columns:
__________________________
|sample |PQ|SQ|DQ|AJI|
|img1_type1 |.5|.4|.6|.6 |
|img1_type2 |.5|.4|.6|.6 |
|img2_type1 |.5|.4|.6|.6 |
|img2_type2 |.5|.4|.6|.6 |
"""
assert isinstance(inst_maps, dict), (
f"inst_maps: {type(inst_maps)} is not a dict of inst_maps"
)
assert isinstance(type_maps, dict), (
f"inst_maps: {type(type_maps)} is not a dict of panoptic_maps"
)
assert isinstance(gt_mask_insts, dict), (
f"inst_maps: {type(gt_mask_insts)} is not a dict of inst_maps"
)
assert isinstance(gt_mask_types, dict), (
f"inst_maps: {type(gt_mask_types)} is not a dict of inst_maps"
)
# sort by name
inst_maps = OrderedDict(sorted(inst_maps.items()))
type_maps = OrderedDict(sorted(type_maps.items()))
gt_mask_insts = OrderedDict(sorted(gt_mask_insts.items()))
gt_mask_types = OrderedDict(sorted(gt_mask_types.items()))
assert inst_maps.keys() == gt_mask_insts.keys(), (
"inst_maps have different names as gt masks. insts: ",
f"{inst_maps.keys()}. gt's: {gt_mask_insts.keys()}"
)
# Loop masks per class
df_total = pd.DataFrame()
for c, ix in list(classes.items())[1:]: # skip bg
gts_per_class = [
get_type_instances(i, t, ix)
for i, t in zip(gt_mask_insts.values(), gt_mask_types.values())
]
insts_per_class = [
get_type_instances(i, t, ix)
for i, t in zip(inst_maps.values(), type_maps.values())
]
masks = list(zip(inst_maps.keys(), gts_per_class, insts_per_class))
metrics = []
with Pool() as pool:
for x in tqdm(
pool.imap_unordered(self.compute_metrics, masks),
total=len(masks), desc=f"Running metrics for {c}"
):
metrics.append(x)
# drop Nones if no classes are found in an image
metrics = [metric for metric in metrics if metric]
score_df = pd.DataFrame.from_records(metrics)
score_df = score_df.set_index("name").sort_index()
score_df.loc[f"{c}_avg_for_the_set"] = score_df.mean(axis=0)
# Add averages to the df of files which contain patterns i
# in the pattern list
if pattern_list is not None:
pattern_avgs = {
f"{c}_{p}_avg": score_df[score_df.index.str.contains(f"{p}")].mean(axis=0)
for p in pattern_list
}
score_df = pd.concat([score_df, pd.DataFrame(pattern_avgs).transpose()])
df_total = pd.concat([df_total, score_df])
# Save results to .csv
if save_dir is not None:
save_dir = Path(save_dir)
df_total.to_csv(Path(save_dir / f"{prefix}_type_benchmark.csv"))
return df_total
| [
"pandas.DataFrame.from_records",
"numpy.unique",
"pathlib.Path",
"src.utils.remap_label",
"pathos.multiprocessing.ThreadPool",
"src.utils.get_type_instances",
"pandas.DataFrame",
"pandas.concat"
] | [((4430, 4464), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['metrics'], {}), '(metrics)\n', (4455, 4464), True, 'import pandas as pd\n'), ((8377, 8391), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8389, 8391), True, 'import pandas as pd\n'), ((1074, 1091), 'src.utils.remap_label', 'remap_label', (['true'], {}), '(true)\n', (1085, 1091), False, 'from src.utils import remap_label, get_type_instances\n'), ((1111, 1128), 'src.utils.remap_label', 'remap_label', (['pred'], {}), '(pred)\n', (1122, 1128), False, 'from src.utils import remap_label, get_type_instances\n'), ((4052, 4058), 'pathos.multiprocessing.ThreadPool', 'Pool', ([], {}), '()\n', (4056, 4058), True, 'from pathos.multiprocessing import ThreadPool as Pool\n'), ((5068, 5082), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (5072, 5082), False, 'from pathlib import Path\n'), ((9313, 9347), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['metrics'], {}), '(metrics)\n', (9338, 9347), True, 'import pandas as pd\n'), ((9933, 9964), 'pandas.concat', 'pd.concat', (['[df_total, score_df]'], {}), '([df_total, score_df])\n', (9942, 9964), True, 'import pandas as pd\n'), ((10053, 10067), 'pathlib.Path', 'Path', (['save_dir'], {}), '(save_dir)\n', (10057, 10067), False, 'from pathlib import Path\n'), ((1033, 1048), 'numpy.unique', 'np.unique', (['true'], {}), '(true)\n', (1042, 1048), True, 'import numpy as np\n'), ((5111, 5158), 'pathlib.Path', 'Path', (["(save_dir / f'{prefix}_inst_benchmark.csv')"], {}), "(save_dir / f'{prefix}_inst_benchmark.csv')\n", (5115, 5158), False, 'from pathlib import Path\n'), ((8496, 8524), 'src.utils.get_type_instances', 'get_type_instances', (['i', 't', 'ix'], {}), '(i, t, ix)\n', (8514, 8524), False, 'from src.utils import remap_label, get_type_instances\n'), ((8668, 8696), 'src.utils.get_type_instances', 'get_type_instances', (['i', 't', 'ix'], {}), '(i, t, ix)\n', (8686, 8696), False, 'from src.utils import remap_label, get_type_instances\n'), ((8908, 8914), 'pathos.multiprocessing.ThreadPool', 'Pool', ([], {}), '()\n', (8912, 8914), True, 'from pathos.multiprocessing import ThreadPool as Pool\n'), ((10096, 10143), 'pathlib.Path', 'Path', (["(save_dir / f'{prefix}_type_benchmark.csv')"], {}), "(save_dir / f'{prefix}_type_benchmark.csv')\n", (10100, 10143), False, 'from pathlib import Path\n'), ((4926, 4952), 'pandas.DataFrame', 'pd.DataFrame', (['pattern_avgs'], {}), '(pattern_avgs)\n', (4938, 4952), True, 'import pandas as pd\n'), ((9868, 9894), 'pandas.DataFrame', 'pd.DataFrame', (['pattern_avgs'], {}), '(pattern_avgs)\n', (9880, 9894), True, 'import pandas as pd\n')] |
import numpy as np
from sympy import sin, Abs
from devito import (Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension,
ConditionalDimension, switchconfig)
from devito.tools import memoized_meth
__all__ = ['Model']
def initialize_damp(damp, nbpml, spacing, mask=False):
"""
Initialise damping field with an absorbing PML layer.
Parameters
----------
damp : Function
The damping field for absorbing boundary condition.
nbpml : int
Number of points in the damping layer.
spacing :
Grid spacing coefficient.
mask : bool, optional
whether the dampening is a mask or layer.
mask => 1 inside the domain and decreases in the layer
not mask => 0 inside the domain and increase in the layer
"""
dampcoeff = 1.5 * np.log(1.0 / 0.001) / (40)
eqs = [Eq(damp, 1.0)] if mask else []
for d in damp.dimensions:
# left
dim_l = SubDimension.left(name='abc_%s_l' % d.name, parent=d,
thickness=nbpml)
pos = Abs((nbpml - (dim_l - d.symbolic_min) + 1) / float(nbpml))
val = dampcoeff * (pos - sin(2*np.pi*pos)/(2*np.pi))
val = -val if mask else val
eqs += [Inc(damp.subs({d: dim_l}), val/d.spacing)]
# right
dim_r = SubDimension.right(name='abc_%s_r' % d.name, parent=d,
thickness=nbpml)
pos = Abs((nbpml - (d.symbolic_max - dim_r) + 1) / float(nbpml))
val = dampcoeff * (pos - sin(2*np.pi*pos)/(2*np.pi))
val = -val if mask else val
eqs += [Inc(damp.subs({d: dim_r}), val/d.spacing)]
# TODO: Figure out why yask doesn't like it with dse/dle
Operator(eqs, name='initdamp', dse='noop', dle='noop')()
def initialize_function(function, data, nbpml):
"""
Initialize a `Function` with the given ``data``. ``data``
does *not* include the PML layers for the absorbing boundary conditions;
these are added via padding by this function.
Parameters
----------
function : Function
The initialised object.
data : ndarray
The data array used for initialisation.
nbpml : int
Number of PML layers for boundary damping.
"""
slices = tuple([slice(nbpml, -nbpml) for _ in range(function.grid.dim)])
function.data[slices] = data
eqs = []
for d in function.dimensions:
dim_l = SubDimension.left(name='abc_%s_l' % d.name, parent=d,
thickness=nbpml)
to_copy = nbpml
eqs += [Eq(function.subs({d: dim_l}), function.subs({d: to_copy}))]
dim_r = SubDimension.right(name='abc_%s_r' % d.name, parent=d,
thickness=nbpml)
to_copy = d.symbolic_max - nbpml
eqs += [Eq(function.subs({d: dim_r}), function.subs({d: to_copy}))]
# TODO: Figure out why yask doesn't like it with dse/dle
Operator(eqs, name='padfunc', dse='noop', dle='noop')()
class PhysicalDomain(SubDomain):
name = 'phydomain'
def __init__(self, nbpml):
super(PhysicalDomain, self).__init__()
self.nbpml = nbpml
def define(self, dimensions):
return {d: ('middle', self.nbpml, self.nbpml) for d in dimensions}
class GenericModel(object):
"""
General model class with common properties
"""
def __init__(self, origin, spacing, shape, space_order, nbpml=20,
dtype=np.float32, subdomains=(), damp_mask=True):
self.shape = shape
self.nbpml = int(nbpml)
self.origin = tuple([dtype(o) for o in origin])
# Origin of the computational domain with PML to inject/interpolate
# at the correct index
origin_pml = tuple([dtype(o - s*nbpml) for o, s in zip(origin, spacing)])
phydomain = PhysicalDomain(self.nbpml)
subdomains = subdomains + (phydomain, )
shape_pml = np.array(shape) + 2 * self.nbpml
# Physical extent is calculated per cell, so shape - 1
extent = tuple(np.array(spacing) * (shape_pml - 1))
self.grid = Grid(extent=extent, shape=shape_pml, origin=origin_pml, dtype=dtype,
subdomains=subdomains)
# Create dampening field as symbol `damp`
self.damp = Function(name="damp", grid=self.grid)
initialize_damp(self.damp, self.nbpml, self.spacing, mask=damp_mask)
self._physical_parameters = ['damp']
def physical_params(self, **kwargs):
"""
Return all set physical parameters and update to input values if provided
"""
is_born = kwargs.pop('is_born', False)
known = [getattr(self, i) for i in self.physical_parameters]
dict = {i.name: kwargs.get(i.name, i) or i for i in known}
if not is_born and 'dm' in dict.keys():
dict.pop('dm')
return dict
def _gen_phys_param(self, field, name, space_order, is_param=False,
default_value=0, init_empty=False):
if field is None and not init_empty:
return default_value
if isinstance(field, np.ndarray) or init_empty:
function = Function(name=name, grid=self.grid, space_order=space_order,
parameter=is_param)
if not init_empty:
if name is 'rho':
initialize_function(function, 1/field, self.nbpml)
else:
initialize_function(function, field, self.nbpml)
else:
function = Constant(name=name, value=field)
self._physical_parameters.append(name)
return function
@property
def physical_parameters(self):
return tuple(self._physical_parameters)
@property
def dim(self):
"""
Spatial dimension of the problem and model domain.
"""
return self.grid.dim
@property
def spacing(self):
"""
Grid spacing for all fields in the physical model.
"""
return self.grid.spacing
@property
def space_dimensions(self):
"""
Spatial dimensions of the grid
"""
return self.grid.dimensions
@property
def spacing_map(self):
"""
Map between spacing symbols and their values for each `SpaceDimension`.
"""
subs = self.grid.spacing_map
subs[self.grid.time_dim.spacing] = self.critical_dt
return subs
@property
def dtype(self):
"""
Data type for all assocaited data objects.
"""
return self.grid.dtype
@property
def domain_size(self):
"""
Physical size of the domain as determined by shape and spacing
"""
return tuple((d-1) * s for d, s in zip(self.shape, self.spacing))
class Model(GenericModel):
"""
The physical model used in seismic inversion processes.
Parameters
----------
origin : tuple of floats
Origin of the model in m as a tuple in (x,y,z) order.
spacing : tuple of floats
Grid size in m as a Tuple in (x,y,z) order.
shape : tuple of int
Number of grid points size in (x,y,z) order.
space_order : int
Order of the spatial stencil discretisation.
vp : array_like or float
Velocity in km/s.
nbpml : int, optional
The number of PML layers for boundary damping.
dtype : np.float32 or np.float64
Defaults to 32.
epsilon : array_like or float, optional
Thomsen epsilon parameter (0<epsilon<1).
delta : array_like or float
Thomsen delta parameter (0<delta<1), delta<epsilon.
theta : array_like or float
Tilt angle in radian.
phi : array_like or float
Asymuth angle in radian.
The `Model` provides two symbolic data objects for the
creation of seismic wave propagation operators:
m : array_like or float
The square slowness of the wave.
damp : Function
The damping field for absorbing boundary condition.
"""
def __init__(self, origin, spacing, shape, space_order, vp=None, nbpml=20,
dtype=np.float32, epsilon=None, delta=None, theta=None, phi=None,
subdomains=(), dm=None, rho=None, **kwargs):
super(Model, self).__init__(origin, spacing, shape, space_order, nbpml, dtype,
subdomains)
tti_empty = kwargs.get('init_tti', False)
self._dt = kwargs.get('dt', None)
# Create square slowness of the wave as symbol `m`
self._vp = self._gen_phys_param(vp, 'vp', space_order, init_empty=tti_empty)
self.rho = self._gen_phys_param(rho, 'rho', space_order, init_empty=tti_empty, default_value=1.0)
self._max_vp = kwargs.get('max_vp', np.max(vp))
# Additional parameter fields for TTI operators
self.epsilon = self._gen_phys_param(epsilon, 'epsilon', space_order, init_empty=tti_empty)
self.scale = kwargs.get('scale', 1 if epsilon is None else np.sqrt(1 + 2 * np.max(epsilon)))
self.delta = self._gen_phys_param(delta, 'delta', space_order, init_empty=tti_empty)
self.theta = self._gen_phys_param(theta, 'theta', space_order, init_empty=tti_empty)
self.phi = self._gen_phys_param(phi, 'phi', space_order, init_empty=tti_empty)
self.dm = self._gen_phys_param(dm, 'dm', space_order, init_empty=tti_empty)
@property
def critical_dt(self):
"""
Critical computational time step value from the CFL condition.
"""
# For a fixed time order this number decreases as the space order increases.
#
# The CFL condtion is then given by
# dt <= coeff * h / (max(velocity))
coeff = 0.38 if len(self.shape) == 3 else 0.42
dt = self.dtype(coeff * np.min(self.spacing) / (self.scale*self._max_vp))
return self.dtype("%.3f" % dt)
@property
def vp(self):
"""
`numpy.ndarray` holding the model velocity in km/s.
Notes
-----
Updating the velocity field also updates the square slowness
``self.m``. However, only ``self.m`` should be used in seismic
operators, since it is of type `Function`.
"""
return self._vp
@vp.setter
def vp(self, vp):
"""
Set a new velocity model and update square slowness.
Parameters
----------
vp : float or array
New velocity in km/s.
"""
# Update the square slowness according to new value
if isinstance(vp, np.ndarray):
if vp.shape == self.vp.shape:
self.vp.data[:] = vp[:]
else:
initialize_function(self._vp, vp, self.nbpml)
else:
self._vp.data = vp
self._max_vp = np.max(vp)
@property
def m(self):
return 1 / (self.vp * self.vp)
@memoized_meth
def subgrid(self, factor=1):
sub_dim = []
for dim in self.grid.dimensions:
sub_dim += [ConditionalDimension(dim.name + 'sub', parent=dim, factor=factor)]
grid = Grid(shape=tuple([i//factor for i in self.shape]),
comm=self.grid.distributor.comm,
extent=self.grid.extent,
dimensions=tuple(sub_dim))
return grid
| [
"sympy.sin",
"devito.SubDimension.left",
"devito.ConditionalDimension",
"devito.Function",
"devito.Operator",
"numpy.log",
"numpy.max",
"numpy.array",
"devito.Eq",
"numpy.min",
"devito.SubDimension.right",
"devito.Grid"
] | [((955, 1025), 'devito.SubDimension.left', 'SubDimension.left', ([], {'name': "('abc_%s_l' % d.name)", 'parent': 'd', 'thickness': 'nbpml'}), "(name='abc_%s_l' % d.name, parent=d, thickness=nbpml)\n", (972, 1025), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((1321, 1392), 'devito.SubDimension.right', 'SubDimension.right', ([], {'name': "('abc_%s_r' % d.name)", 'parent': 'd', 'thickness': 'nbpml'}), "(name='abc_%s_r' % d.name, parent=d, thickness=nbpml)\n", (1339, 1392), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((1723, 1777), 'devito.Operator', 'Operator', (['eqs'], {'name': '"""initdamp"""', 'dse': '"""noop"""', 'dle': '"""noop"""'}), "(eqs, name='initdamp', dse='noop', dle='noop')\n", (1731, 1777), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((2430, 2500), 'devito.SubDimension.left', 'SubDimension.left', ([], {'name': "('abc_%s_l' % d.name)", 'parent': 'd', 'thickness': 'nbpml'}), "(name='abc_%s_l' % d.name, parent=d, thickness=nbpml)\n", (2447, 2500), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((2651, 2722), 'devito.SubDimension.right', 'SubDimension.right', ([], {'name': "('abc_%s_r' % d.name)", 'parent': 'd', 'thickness': 'nbpml'}), "(name='abc_%s_r' % d.name, parent=d, thickness=nbpml)\n", (2669, 2722), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((2941, 2994), 'devito.Operator', 'Operator', (['eqs'], {'name': '"""padfunc"""', 'dse': '"""noop"""', 'dle': '"""noop"""'}), "(eqs, name='padfunc', dse='noop', dle='noop')\n", (2949, 2994), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((4098, 4193), 'devito.Grid', 'Grid', ([], {'extent': 'extent', 'shape': 'shape_pml', 'origin': 'origin_pml', 'dtype': 'dtype', 'subdomains': 'subdomains'}), '(extent=extent, shape=shape_pml, origin=origin_pml, dtype=dtype,\n subdomains=subdomains)\n', (4102, 4193), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((4286, 4323), 'devito.Function', 'Function', ([], {'name': '"""damp"""', 'grid': 'self.grid'}), "(name='damp', grid=self.grid)\n", (4294, 4323), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((10830, 10840), 'numpy.max', 'np.max', (['vp'], {}), '(vp)\n', (10836, 10840), True, 'import numpy as np\n'), ((824, 843), 'numpy.log', 'np.log', (['(1.0 / 0.001)'], {}), '(1.0 / 0.001)\n', (830, 843), True, 'import numpy as np\n'), ((863, 876), 'devito.Eq', 'Eq', (['damp', '(1.0)'], {}), '(damp, 1.0)\n', (865, 876), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((3922, 3937), 'numpy.array', 'np.array', (['shape'], {}), '(shape)\n', (3930, 3937), True, 'import numpy as np\n'), ((5162, 5247), 'devito.Function', 'Function', ([], {'name': 'name', 'grid': 'self.grid', 'space_order': 'space_order', 'parameter': 'is_param'}), '(name=name, grid=self.grid, space_order=space_order, parameter=is_param\n )\n', (5170, 5247), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((8791, 8801), 'numpy.max', 'np.max', (['vp'], {}), '(vp)\n', (8797, 8801), True, 'import numpy as np\n'), ((4041, 4058), 'numpy.array', 'np.array', (['spacing'], {}), '(spacing)\n', (4049, 4058), True, 'import numpy as np\n'), ((11051, 11116), 'devito.ConditionalDimension', 'ConditionalDimension', (["(dim.name + 'sub')"], {'parent': 'dim', 'factor': 'factor'}), "(dim.name + 'sub', parent=dim, factor=factor)\n", (11071, 11116), False, 'from devito import Grid, Inc, Operator, Function, SubDomain, Eq, SubDimension, ConditionalDimension, switchconfig\n'), ((1166, 1186), 'sympy.sin', 'sin', (['(2 * np.pi * pos)'], {}), '(2 * np.pi * pos)\n', (1169, 1186), False, 'from sympy import sin, Abs\n'), ((1534, 1554), 'sympy.sin', 'sin', (['(2 * np.pi * pos)'], {}), '(2 * np.pi * pos)\n', (1537, 1554), False, 'from sympy import sin, Abs\n'), ((9826, 9846), 'numpy.min', 'np.min', (['self.spacing'], {}), '(self.spacing)\n', (9832, 9846), True, 'import numpy as np\n'), ((9042, 9057), 'numpy.max', 'np.max', (['epsilon'], {}), '(epsilon)\n', (9048, 9057), True, 'import numpy as np\n')] |
#!/people/chen423/sw/anaconda3/bin/python
import numpy as np
import xarray as xr
import scipy.io as sio
import sys
scenario = 'HIST'
year = int(sys.argv[1])
month = int(sys.argv[2])
para_b = int(10)
def compute_moisture_intensity(in_ARtag, in_uIVT, in_ET, ref_mask):
uIVT_total = in_uIVT[:,0][in_ARtag[:,0]==1].sum()*6000*86400
ET_total = in_ET[(in_ARtag==1)&(ref_mask==0)].sum()*6000*6000
out_ratio = -9999
sub_ocean_grids = ((in_ARtag==1)&(ref_mask==0)).sum()
if (ET_total+uIVT_total)!=0:
out_ratio = ET_total/(ET_total+uIVT_total)
return sub_ocean_grids, ET_total, uIVT_total
reffile = '/pic/projects/hyperion/chen423/data/papers/AR-SST/data/ref/WRF_latlon.nc'
landmask = xr.open_dataset(reffile).LANDMASK.values[para_b:(450-para_b),para_b:(450-para_b)]
ETdir = '/pic/projects/next_gen_idf/chen423/data/WRF_hist/raw/ET_by_year/'
uIVTdir = '/pic/projects/next_gen_idf/chen423/data/WRF_hist/diagnosic_vars.correct_SST/organized/monthly/'
ARdir = '/pic/projects/hyperion/chen423/data/papers/AR-SST/data/%s/AR_tagged/Gershunov/SERDP6km_adj/' % (scenario)
ETfile = ETdir + 'WRF_NARR.%s.SFCEVP.%d.%d.nc' % (scenario, year, month)
uIVTfile = uIVTdir + 'WRF_NARR.%s.uIVT.%d.%d.nc' % (scenario, year, month)
ARfile = ARdir + 'WRF_ARtag_adj.%s.Gershunov.%d.%d.ARp85.nc' % (scenario, year, month)
ETdata = xr.open_dataset(ETfile).SFCEVP.values[:,para_b:(450-para_b),para_b:(450-para_b)]
uIVTdata = xr.open_dataset(uIVTfile).uIVT.values[:,para_b:(450-para_b),para_b:(450-para_b)]
ARtag = xr.open_dataset(ARfile).AR_tag.values[:,para_b:(450-para_b),para_b:(450-para_b)]
nt = ARtag.shape[0]
array_grids = np.zeros(nt)
array_ET = np.zeros(nt)
array_uIVT = np.zeros(nt)
for t in np.arange(nt):
array_grids[t], array_ET[t], array_uIVT[t] = compute_moisture_intensity(ARtag[t], uIVTdata[t], ETdata[int(np.floor(t/4))], landmask)
outfile = '/pic/projects/hyperion/chen423/data/papers/AR-SST/data/HIST/moisture/ETratio.%s.ARp85.%d.%d.mat' % (scenario, year, month)
sio.savemat(outfile, {'array_grids':array_grids, 'array_ET':array_ET, 'array_uIVT':array_uIVT})
| [
"scipy.io.savemat",
"numpy.floor",
"numpy.zeros",
"xarray.open_dataset",
"numpy.arange"
] | [((1654, 1666), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (1662, 1666), True, 'import numpy as np\n'), ((1678, 1690), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (1686, 1690), True, 'import numpy as np\n'), ((1704, 1716), 'numpy.zeros', 'np.zeros', (['nt'], {}), '(nt)\n', (1712, 1716), True, 'import numpy as np\n'), ((1726, 1739), 'numpy.arange', 'np.arange', (['nt'], {}), '(nt)\n', (1735, 1739), True, 'import numpy as np\n'), ((2017, 2119), 'scipy.io.savemat', 'sio.savemat', (['outfile', "{'array_grids': array_grids, 'array_ET': array_ET, 'array_uIVT': array_uIVT}"], {}), "(outfile, {'array_grids': array_grids, 'array_ET': array_ET,\n 'array_uIVT': array_uIVT})\n", (2028, 2119), True, 'import scipy.io as sio\n'), ((730, 754), 'xarray.open_dataset', 'xr.open_dataset', (['reffile'], {}), '(reffile)\n', (745, 754), True, 'import xarray as xr\n'), ((1357, 1380), 'xarray.open_dataset', 'xr.open_dataset', (['ETfile'], {}), '(ETfile)\n', (1372, 1380), True, 'import xarray as xr\n'), ((1449, 1474), 'xarray.open_dataset', 'xr.open_dataset', (['uIVTfile'], {}), '(uIVTfile)\n', (1464, 1474), True, 'import xarray as xr\n'), ((1538, 1561), 'xarray.open_dataset', 'xr.open_dataset', (['ARfile'], {}), '(ARfile)\n', (1553, 1561), True, 'import xarray as xr\n'), ((1851, 1866), 'numpy.floor', 'np.floor', (['(t / 4)'], {}), '(t / 4)\n', (1859, 1866), True, 'import numpy as np\n')] |
import random
import math
import csv
import pickle
import os
import numpy as np
from .helpers.transforms import is_valid_vertical_offset, deg2rad
controller_dir = os.path.dirname(os.path.realpath(__file__))
TEST_CASES_PKL = os.path.join(controller_dir, "test_cases/test_cases.pkl")
TEST_CASES_CSV = os.path.join(controller_dir, "test_cases/test_cases.csv")
def sample_from_range(range):
return np.random.uniform(min(range), max(range))
def sample_sign():
return random.choice([-1, 1])
def complete_vec_to_length(var1, var2, length):
return math.sqrt(pow(length, 2) - pow(var1, 2) - pow(var2, 2))
def get_vector_with_length(length, x_error_min, x_error_max, y_error_min, y_error_max, z_error_min, z_error_max):
while True:
# sample two values from range and get other one from pythagoras
x = np.clip(sample_from_range([0, sample_sign() * length]), x_error_min, x_error_max)
z = np.clip(sample_from_range([0, sample_sign() * length]), z_error_min, z_error_max)
try:
y = sample_sign() * complete_vec_to_length(x, z, length)
if y < y_error_min or y > y_error_max:
continue
return [x, y, z]
except ValueError: # this combination doesn't work
continue
def gen_object(object_type):
if object_type == "sphere":
return RandomSphere()
elif object_type == "cylinder":
return RandomCylinder()
elif object_type == "box":
return RandomBox()
else:
raise ValueError("Unsupported object name.")
def gen_valid_wrist_error_from_l2(object, trans_l2_error, rot_l2_error, hparams):
wrist_error = RandomWristErrorFromL2(trans_l2_error, rot_l2_error, hparams)
# if this wrist_error, object combination would crash hand into ground, we re-generate a new one
while not is_valid_vertical_offset(wrist_error.y, object.get_height()):
wrist_error = RandomWristErrorFromL2(trans_l2_error, rot_l2_error, hparams)
return wrist_error
def gen_valid_wrist_error_obj_combination_from_ranges(object_type, hparams):
object = gen_object(object_type)
x_range = [hparams["x_error_min"], hparams["x_error_max"]]
y_range = [hparams["y_error_min"], hparams["y_error_max"]]
z_range = [hparams["z_error_min"], hparams["z_error_max"]]
roll_range = [hparams["roll_error_min"], hparams["roll_error_max"]]
pitch_range = [hparams["pitch_error_min"], hparams["pitch_error_max"]]
yaw_range = [hparams["yaw_error_min"], hparams["yaw_error_max"]]
wrist_error = RandomWristErrorFromRanges(x_range, y_range, z_range, roll_range, pitch_range, yaw_range)
# if this (wrist_error, object) combination would crash hand into ground, we re-generate a new one
while not is_valid_vertical_offset(wrist_error.y, object.get_height()):
wrist_error = RandomWristErrorFromRanges(x_range, y_range, z_range, roll_range, pitch_range, yaw_range)
object = gen_object(object_type)
return object, wrist_error
class Sphere:
def __init__(self, radius, mass, inertia_scaling_factor):
self.radius = radius
self.mass = mass
self.inertia_scaling_factor = inertia_scaling_factor
self.name = "sphere_" + str(self.radius)
self.type = "sphere"
def get_csv_data(self):
return dict(
{"dimension_1": self.radius, "dimension_2": 0, "dimension_3": 0, "mass": self.mass, "inertia_scaling_factor": self.inertia_scaling_factor}
)
def get_height(self):
return self.radius * 2
class Cylinder:
def __init__(self, radius, length, mass, inertia_scaling_factor):
self.radius = radius
self.length = length
self.mass = mass
self.inertia_scaling_factor = inertia_scaling_factor
self.name = "cylinder_" + str(self.radius) + "_" + str(self.length)
self.type = "cylinder"
def get_csv_data(self):
return dict(
{
"dimension_1": self.radius,
"dimension_2": self.length,
"dimension_3": 0,
"mass": self.mass,
"inertia_scaling_factor": self.inertia_scaling_factor,
}
)
def get_height(self):
return self.length
class Box:
def __init__(self, x, y, z, mass, inertia_scaling_factor):
self.x = x
self.y = y
self.z = z
self.mass = mass
self.inertia_scaling_factor = inertia_scaling_factor
self.name = "box_" + str(self.x) + "_" + str(self.y) + "_" + str(self.z)
self.type = "box"
def get_csv_data(self):
return dict(
{
"dimension_1": self.x,
"dimension_2": self.y,
"dimension_3": self.z,
"mass": self.mass,
"inertia_scaling_factor": self.inertia_scaling_factor,
}
)
def get_height(self):
return self.z
class RandomSphere(Sphere):
def __init__(self, radius_range=[0.065, 0.08], mass_range=[0.1, 0.4], inertia_scaling_factor=10):
super().__init__(sample_from_range(radius_range), sample_from_range(mass_range), inertia_scaling_factor)
class RandomCylinder(Cylinder):
def __init__(self, radius_range=[0.03, 0.05], length_range=[0.13, 0.23], mass_range=[0.1, 0.4], inertia_scaling_factor=10):
super().__init__(sample_from_range(radius_range), sample_from_range(length_range), sample_from_range(mass_range), inertia_scaling_factor)
class RandomBox(Box):
def __init__(self, x_range=[0.04, 0.10], y_range=[0.04, 0.10], z_range=[0.13, 0.23], mass_range=[0.1, 0.4], inertia_scaling_factor=10):
super().__init__(
sample_from_range(x_range), sample_from_range(y_range), sample_from_range(z_range), sample_from_range(mass_range), inertia_scaling_factor
)
class RandomWristError:
def __init__(self):
self.x = 0
self.y = 0
self.z = 0
self.roll = 0
self.pitch = 0
self.yaw = 0
class RandomWristErrorFromL2(RandomWristError):
def __init__(self, trans_l2_error, rot_l2_error, hparams):
super().__init__()
result = get_vector_with_length(
trans_l2_error,
hparams["x_error_min"],
hparams["x_error_max"],
hparams["y_error_min"],
hparams["y_error_max"],
hparams["z_error_min"],
hparams["z_error_max"],
)
self.x = result[0]
self.y = result[1]
self.z = result[2]
result = get_vector_with_length(
rot_l2_error,
hparams["roll_error_min"],
hparams["roll_error_max"],
hparams["pitch_error_min"],
hparams["pitch_error_max"],
hparams["yaw_error_min"],
hparams["yaw_error_max"],
)
self.roll = result[0]
self.pitch = result[1]
self.yaw = result[2]
class RandomWristErrorFromRanges(RandomWristError):
def __init__(self, x_range, y_range, z_range, roll_range, pitch_range, yaw_range):
super().__init__()
self.x = sample_from_range(x_range)
self.y = sample_from_range(y_range)
self.z = sample_from_range(z_range)
self.roll = sample_from_range(roll_range)
self.pitch = sample_from_range(pitch_range)
self.yaw = sample_from_range(yaw_range)
class TestCase:
def __init__(self, hparams):
# default parameters (will be overriden by child classes)
self.object = RandomSphere()
self.trans_l2_error = 0
self.rot_l2_error = 0
self.wrist_error = RandomWristErrorFromL2(0, 0, hparams)
def get_csv_data(self):
data = dict(
{
"object_type": self.object.type,
"trans_l2_error": self.trans_l2_error,
"rot_l2_error": self.rot_l2_error,
"wrist_x": self.wrist_error.x,
"wrist_y": self.wrist_error.y,
"wrist_z": self.wrist_error.z,
"wrist_roll": self.wrist_error.roll,
"wrist_pitch": self.wrist_error.pitch,
"wrist_yaw": self.wrist_error.yaw,
}
)
data.update(self.object.get_csv_data())
return data
def get_csv_header(self):
d = self.get_csv_data()
return [*d]
class TestCaseFromObjectAndL2Errors(TestCase):
def __init__(self, object, trans_l2_error, rot_l2_error, hparams):
# for this test case we know the object and want a suitable l2 wrist error
super().__init__(hparams)
self.object = object
self.trans_l2_error = trans_l2_error
self.rot_l2_error = rot_l2_error
self.wrist_error = gen_valid_wrist_error_from_l2(self.object, self.trans_l2_error, self.rot_l2_error, hparams)
class TestCaseFromRanges(TestCase):
def __init__(self, hparams):
# for this case we only know the ranges of object size and wrist error
super().__init__(hparams)
object_type = random.choice(["sphere", "cylinder", "box"])
self.object, self.wrist_error = gen_valid_wrist_error_obj_combination_from_ranges(object_type, hparams)
self.trans_l2_error = np.linalg.norm([self.wrist_error.x, self.wrist_error.y, self.wrist_error.z])
self.rot_l2_error = np.linalg.norm([self.wrist_error.roll, self.wrist_error.pitch, self.wrist_error.yaw])
class TestCases:
def __init__(self, hparams, num_exp_per_obj=10):
self.test_cases = []
object_types = ["sphere", "cylinder", "box"]
self.trans_l2_errors = np.arange(0, 8) / 100 # 0 to 7 cm
self.rot_l2_errors = np.arange(0, 8) * 2 # 0, 2, ... , 14 deg
for object_type in object_types:
for _ in range(num_exp_per_obj):
object = gen_object(object_type)
for i in range(len(self.trans_l2_errors)):
self.test_cases.append(TestCaseFromObjectAndL2Errors(object, self.trans_l2_errors[i], deg2rad(self.rot_l2_errors[i]), hparams))
def generate_test_cases(hparams):
# generate test cases and save to disk
t = TestCases(hparams)
with open(TEST_CASES_PKL, "wb") as file:
pickle.dump(t, file)
# also save csv to disk
with open(TEST_CASES_CSV, "w") as file:
writer = csv.DictWriter(file, fieldnames=t.test_cases[0].get_csv_header())
writer.writeheader()
for case in t.test_cases:
writer.writerows([case.get_csv_data()])
def test(model, env, log_path, log_name, deterministic=False, all_test_cases=True, trans_l2_errors=[0.07]):
# load test cases from disk
with open(TEST_CASES_PKL, "rb") as file:
t = pickle.load(file)
metrics = ["sustained_lifting", "sustained_holding"]
# create output csv file
path = os.path.join(log_path, log_name + ".csv")
with open(path, "w") as file:
fieldnames = t.test_cases[0].get_csv_header()
for metric in metrics:
fieldnames.append(metric)
writer = csv.DictWriter(file, fieldnames=fieldnames)
writer.writeheader()
# running on subset of test cases: select only the interesting test cases with large wirst errors
if not all_test_cases:
t.test_cases = [x for x in t.test_cases if x.trans_l2_error in trans_l2_errors]
t.test_cases = [t.test_cases[4], t.test_cases[10], t.test_cases[21]]
for test_case in t.test_cases:
obs = env.reset(test_case)
while True:
action, state = model.predict(obs, deterministic=deterministic)
obs, reward, done, info = env.step(action)
if done:
# save experiment outcome
data = test_case.get_csv_data()
outcome = {metric: info[metric] for metric in metrics}
data.update(outcome)
writer.writerows([data])
file.flush()
break
| [
"csv.DictWriter",
"random.choice",
"pickle.dump",
"os.path.join",
"pickle.load",
"os.path.realpath",
"numpy.linalg.norm",
"numpy.arange"
] | [((227, 284), 'os.path.join', 'os.path.join', (['controller_dir', '"""test_cases/test_cases.pkl"""'], {}), "(controller_dir, 'test_cases/test_cases.pkl')\n", (239, 284), False, 'import os\n'), ((302, 359), 'os.path.join', 'os.path.join', (['controller_dir', '"""test_cases/test_cases.csv"""'], {}), "(controller_dir, 'test_cases/test_cases.csv')\n", (314, 359), False, 'import os\n'), ((182, 208), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (198, 208), False, 'import os\n'), ((477, 499), 'random.choice', 'random.choice', (['[-1, 1]'], {}), '([-1, 1])\n', (490, 499), False, 'import random\n'), ((10811, 10852), 'os.path.join', 'os.path.join', (['log_path', "(log_name + '.csv')"], {}), "(log_path, log_name + '.csv')\n", (10823, 10852), False, 'import os\n'), ((9033, 9077), 'random.choice', 'random.choice', (["['sphere', 'cylinder', 'box']"], {}), "(['sphere', 'cylinder', 'box'])\n", (9046, 9077), False, 'import random\n'), ((9220, 9296), 'numpy.linalg.norm', 'np.linalg.norm', (['[self.wrist_error.x, self.wrist_error.y, self.wrist_error.z]'], {}), '([self.wrist_error.x, self.wrist_error.y, self.wrist_error.z])\n', (9234, 9296), True, 'import numpy as np\n'), ((9325, 9415), 'numpy.linalg.norm', 'np.linalg.norm', (['[self.wrist_error.roll, self.wrist_error.pitch, self.wrist_error.yaw]'], {}), '([self.wrist_error.roll, self.wrist_error.pitch, self.\n wrist_error.yaw])\n', (9339, 9415), True, 'import numpy as np\n'), ((10203, 10223), 'pickle.dump', 'pickle.dump', (['t', 'file'], {}), '(t, file)\n', (10214, 10223), False, 'import pickle\n'), ((10694, 10711), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (10705, 10711), False, 'import pickle\n'), ((11027, 11070), 'csv.DictWriter', 'csv.DictWriter', (['file'], {'fieldnames': 'fieldnames'}), '(file, fieldnames=fieldnames)\n', (11041, 11070), False, 'import csv\n'), ((9596, 9611), 'numpy.arange', 'np.arange', (['(0)', '(8)'], {}), '(0, 8)\n', (9605, 9611), True, 'import numpy as np\n'), ((9660, 9675), 'numpy.arange', 'np.arange', (['(0)', '(8)'], {}), '(0, 8)\n', (9669, 9675), True, 'import numpy as np\n')] |
"""
=======================================================
Reconstruction with Constrained Spherical Deconvolution
=======================================================
This example shows how to use Constrained Spherical Deconvolution (CSD)
introduced by Tournier et al. [Tournier2007]_.
This method is mainly useful with datasets with gradient directions acquired on
a spherical grid.
The basic idea with this method is that if we could estimate the response function of a
single fiber then we could deconvolve the measured signal and obtain the underlying
fiber distribution.
Lets first load the data. We will use a dataset with 10 b0s and 150 non-b0s with b-value 2000.
"""
import numpy as np
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
fetch_stanford_hardi()
img, gtab = read_stanford_hardi()
"""
You can verify the b-values of the datasets by looking at the attribute `gtab.bvals`.
In CSD there is an important pre-processing step: the estimation of the fiber response function. In order to
do this we look for voxel with very anisotropic configurations. For example here we use an ROI (20x20x20) at the center
of the volume and store the signal values for the voxels with FA values higher than 0.7. Of course, if we haven't
precalculated FA we need to fit a Tensor model to the datasets. Which is what we do here.
"""
from dipy.reconst.dti import TensorModel
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)
affine = img.get_affine()
zooms = img.get_header().get_zooms()[:3]
mask = data[..., 0] > 50
tenmodel = TensorModel(gtab)
ci, cj, ck = np.array(data.shape[:3]) / 2
w = 10
roi = data[ci - w: ci + w,
cj - w: cj + w,
ck - w: ck + w]
tenfit = tenmodel.fit(roi)
from dipy.reconst.dti import fractional_anisotropy
FA = fractional_anisotropy(tenfit.evals)
FA[np.isnan(FA)] = 0
indices = np.where(FA > 0.7)
lambdas = tenfit.evals[indices][:, :2]
"""
Using `gtab.b0s_mask()` we can find all the S0 volumes (which correspond to b-values equal 0) in the dataset.
"""
S0s = roi[indices][:, np.nonzero(gtab.b0s_mask)[0]]
"""
The response function in this example consists of a prolate tensor created
by averaging the highest and second highest eigenvalues. We also include the
average S0s.
"""
S0 = np.mean(S0s)
l01 = np.mean(lambdas, axis=0)
evals = np.array([l01[0], l01[1], l01[1]])
response = (evals, S0)
"""
Now we are ready to import the CSD model and fit the datasets.
"""
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
csd_model = ConstrainedSphericalDeconvModel(gtab, response)
"""
For illustration purposes we will fit only a slice of the datasets.
"""
data_small = data[20:50, 55:85, 38:39]
csd_fit = csd_model.fit(data_small)
"""
Show the CSD-based ODFs also known as FODFs (fiber ODFs).
"""
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
csd_odf = csd_fit.odf(sphere)
from dipy.viz import fvtk
r = fvtk.ren()
"""
Here we visualize only a 30x30 region.
"""
fodf_spheres = fvtk.sphere_funcs(csd_odf, sphere, scale=1.3, norm=False)
fvtk.add(r, fodf_spheres)
print('Saving illustration as csd_odfs.png')
fvtk.record(r, n_frames=1, out_path='csd_odfs.png', size=(600, 600))
"""
.. figure:: csd_odfs.png
:align: center
**CSD ODFs**.
.. [Tournier2007] <NAME>, <NAME> and <NAME>, "Robust determination of the fibre orientation distribution in diffusion MRI: Non-negativity constrained super-resolved spherical deconvolution", Neuroimage, vol. 35, no. 4, pp. 1459-1472, 2007.
.. include:: ../links_names.inc
"""
| [
"numpy.mean",
"dipy.data.fetch_stanford_hardi",
"dipy.reconst.dti.fractional_anisotropy",
"dipy.reconst.dti.TensorModel",
"dipy.viz.fvtk.sphere_funcs",
"numpy.where",
"dipy.data.get_sphere",
"numpy.array",
"dipy.viz.fvtk.add",
"numpy.isnan",
"numpy.nonzero",
"dipy.reconst.csdeconv.ConstrainedS... | [((770, 792), 'dipy.data.fetch_stanford_hardi', 'fetch_stanford_hardi', ([], {}), '()\n', (790, 792), False, 'from dipy.data import fetch_stanford_hardi, read_stanford_hardi\n'), ((805, 826), 'dipy.data.read_stanford_hardi', 'read_stanford_hardi', ([], {}), '()\n', (824, 826), False, 'from dipy.data import fetch_stanford_hardi, read_stanford_hardi\n'), ((1577, 1594), 'dipy.reconst.dti.TensorModel', 'TensorModel', (['gtab'], {}), '(gtab)\n', (1588, 1594), False, 'from dipy.reconst.dti import TensorModel\n'), ((1814, 1849), 'dipy.reconst.dti.fractional_anisotropy', 'fractional_anisotropy', (['tenfit.evals'], {}), '(tenfit.evals)\n', (1835, 1849), False, 'from dipy.reconst.dti import fractional_anisotropy\n'), ((1882, 1900), 'numpy.where', 'np.where', (['(FA > 0.7)'], {}), '(FA > 0.7)\n', (1890, 1900), True, 'import numpy as np\n'), ((2295, 2307), 'numpy.mean', 'np.mean', (['S0s'], {}), '(S0s)\n', (2302, 2307), True, 'import numpy as np\n'), ((2315, 2339), 'numpy.mean', 'np.mean', (['lambdas'], {'axis': '(0)'}), '(lambdas, axis=0)\n', (2322, 2339), True, 'import numpy as np\n'), ((2349, 2383), 'numpy.array', 'np.array', (['[l01[0], l01[1], l01[1]]'], {}), '([l01[0], l01[1], l01[1]])\n', (2357, 2383), True, 'import numpy as np\n'), ((2560, 2607), 'dipy.reconst.csdeconv.ConstrainedSphericalDeconvModel', 'ConstrainedSphericalDeconvModel', (['gtab', 'response'], {}), '(gtab, response)\n', (2591, 2607), False, 'from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel\n'), ((2873, 2899), 'dipy.data.get_sphere', 'get_sphere', (['"""symmetric724"""'], {}), "('symmetric724')\n", (2883, 2899), False, 'from dipy.data import get_sphere\n'), ((2963, 2973), 'dipy.viz.fvtk.ren', 'fvtk.ren', ([], {}), '()\n', (2971, 2973), False, 'from dipy.viz import fvtk\n'), ((3038, 3095), 'dipy.viz.fvtk.sphere_funcs', 'fvtk.sphere_funcs', (['csd_odf', 'sphere'], {'scale': '(1.3)', 'norm': '(False)'}), '(csd_odf, sphere, scale=1.3, norm=False)\n', (3055, 3095), False, 'from dipy.viz import fvtk\n'), ((3097, 3122), 'dipy.viz.fvtk.add', 'fvtk.add', (['r', 'fodf_spheres'], {}), '(r, fodf_spheres)\n', (3105, 3122), False, 'from dipy.viz import fvtk\n'), ((3169, 3237), 'dipy.viz.fvtk.record', 'fvtk.record', (['r'], {'n_frames': '(1)', 'out_path': '"""csd_odfs.png"""', 'size': '(600, 600)'}), "(r, n_frames=1, out_path='csd_odfs.png', size=(600, 600))\n", (3180, 3237), False, 'from dipy.viz import fvtk\n'), ((1609, 1633), 'numpy.array', 'np.array', (['data.shape[:3]'], {}), '(data.shape[:3])\n', (1617, 1633), True, 'import numpy as np\n'), ((1853, 1865), 'numpy.isnan', 'np.isnan', (['FA'], {}), '(FA)\n', (1861, 1865), True, 'import numpy as np\n'), ((2083, 2108), 'numpy.nonzero', 'np.nonzero', (['gtab.b0s_mask'], {}), '(gtab.b0s_mask)\n', (2093, 2108), True, 'import numpy as np\n')] |
######################################################################
# #
# Copyright 2009-2019 <NAME>. #
# This file is part of gdspy, distributed under the terms of the #
# Boost Software License - Version 1.0. See the accompanying #
# LICENSE file or <http://www.boost.org/LICENSE_1_0.txt> #
# #
######################################################################
"""
Curve class.
"""
import numpy
from gdspy import _func_bezier, _hobby, _zero
class Curve(object):
"""
Generation of curves loosely based on SVG paths.
Short summary of available methods:
====== =============================
Method Primitive
====== =============================
L/l Line segments
H/h Horizontal line segments
V/v Vertical line segments
C/c Cubic Bezier curve
S/s Smooth cubic Bezier curve
Q/q Quadratic Bezier curve
T/t Smooth quadratic Bezier curve
B/b General degree Bezier curve
I/i Smooth interpolating curve
arc Elliptical arc
====== =============================
The uppercase version of the methods considers that all coordinates
are absolute, whereas the lowercase considers that they are relative
to the current end point of the curve.
Parameters
----------
x : number
X-coordinate of the starting point of the curve. If this is a
complex number, the value of `y` is ignored and the starting
point becomes ``(x.real, x.imag)``.
y : number
Y-coordinate of the starting point of the curve.
tolerance : number
Tolerance used to calculate a polygonal approximation to the
curve.
Notes
-----
In all methods of this class that accept coordinate pairs, a single
complex number can be passed to be split into its real and imaginary
parts.
This feature can be useful in expressing coordinates in polar form.
All commands follow the SVG 2 specification, except for elliptical
arcs and smooth interpolating curves, which are inspired by the
Metapost syntax.
Examples
--------
>>> curve = gdspy.Curve(3, 4).H(1).q(0.5, 1, 2j).L(2 + 3j, 2, 2)
>>> pol = gdspy.Polygon(curve.get_points())
"""
__slots__ = "points", "tol", "last_c", "last_q"
def __init__(self, x, y=0, tolerance=0.01):
self.last_c = self.last_q = None
self.tol = tolerance ** 2
if isinstance(x, complex):
self.points = [numpy.array((x.real, x.imag))]
else:
self.points = [numpy.array((x, y))]
def get_points(self):
"""
Get the polygonal points that approximate this curve.
Returns
-------
out : Numpy array[N, 2]
Vertices of the polygon.
"""
delta = (self.points[-1] - self.points[0]) ** 2
if delta[0] + delta[1] < self.tol:
return numpy.array(self.points[:-1])
return numpy.array(self.points)
def L(self, *xy):
"""
Add straight line segments to the curve.
Parameters
----------
xy : numbers
Endpoint coordinates of the line segments.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
i = 0
while i < len(xy):
if isinstance(xy[i], complex):
self.points.append(numpy.array((xy[i].real, xy[i].imag)))
i += 1
else:
self.points.append(numpy.array((xy[i], xy[i + 1])))
i += 2
return self
def l(self, *xy):
"""
Add straight line segments to the curve.
Parameters
----------
xy : numbers
Endpoint coordinates of the line segments relative to the
current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
o = self.points[-1]
i = 0
while i < len(xy):
if isinstance(xy[i], complex):
self.points.append(o + numpy.array((xy[i].real, xy[i].imag)))
i += 1
else:
self.points.append(o + numpy.array((xy[i], xy[i + 1])))
i += 2
return self
def H(self, *x):
"""
Add horizontal line segments to the curve.
Parameters
----------
x : numbers
Endpoint x-coordinates of the line segments.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
y0 = self.points[-1][1]
self.points.extend(numpy.array((xx, y0)) for xx in x)
return self
def h(self, *x):
"""
Add horizontal line segments to the curve.
Parameters
----------
x : numbers
Endpoint x-coordinates of the line segments relative to the
current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
x0, y0 = self.points[-1]
self.points.extend(numpy.array((x0 + xx, y0)) for xx in x)
return self
def V(self, *y):
"""
Add vertical line segments to the curve.
Parameters
----------
y : numbers
Endpoint y-coordinates of the line segments.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
x0 = self.points[-1][0]
self.points.extend(numpy.array((x0, yy)) for yy in y)
return self
def v(self, *y):
"""
Add vertical line segments to the curve.
Parameters
----------
y : numbers
Endpoint y-coordinates of the line segments relative to the
current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
x0, y0 = self.points[-1]
self.points.extend(numpy.array((x0, y0 + yy)) for yy in y)
return self
def arc(self, radius, initial_angle, final_angle, rotation=0):
"""
Add an elliptical arc to the curve.
Parameters
----------
radius : number, array-like[2]
Arc radius. An elliptical arc can be created by passing an
array with 2 radii.
initial_angle : number
Initial angle of the arc (in *radians*).
final_angle : number
Final angle of the arc (in *radians*).
rotation : number
Rotation of the axis of the ellipse.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
if hasattr(radius, "__iter__"):
rx, ry = radius
radius = max(radius)
else:
rx = ry = radius
full_angle = abs(final_angle - initial_angle)
number_of_points = max(
3,
1
+ int(0.5 * full_angle / numpy.arccos(1 - self.tol ** 0.5 / radius) + 0.5),
)
angles = numpy.linspace(
initial_angle - rotation, final_angle - rotation, number_of_points
)
pts = numpy.vstack((rx * numpy.cos(angles), ry * numpy.sin(angles))).T
if rotation != 0:
rot = numpy.empty_like(pts)
c = numpy.cos(rotation)
s = numpy.sin(rotation)
rot[:, 0] = pts[:, 0] * c - pts[:, 1] * s
rot[:, 1] = pts[:, 0] * s + pts[:, 1] * c
else:
rot = pts
pts = rot[1:] - rot[0] + self.points[-1]
self.points.extend(xy for xy in pts)
return self
def C(self, *xy):
"""
Add cubic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 3 pairs are interpreted as
the control point at the beginning of the curve, the control
point at the end of the curve and the endpoint of the curve.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0] = self.points[-1]
for j in range(1, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = xy[i].real
ctrl[j, 1] = xy[i].imag
i += 1
else:
ctrl[j, 0] = xy[i]
ctrl[j, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def c(self, *xy):
"""
Add cubic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 3 pairs are interpreted as
the control point at the beginning of the curve, the control
point at the end of the curve and the endpoint of the curve.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
for j in range(1, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = x0 + xy[i].real
ctrl[j, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[j, 0] = x0 + xy[i]
ctrl[j, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def S(self, *xy):
"""
Add smooth cubic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point at the end of the curve and the endpoint
of the curve. The control point at the beginning of the
curve is assumed to be the reflection of the control point
at the end of the last curve relative to the starting point
of the curve. If the previous curve is not a cubic Bezier,
the control point is coincident with the starting point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
if self.last_c is None:
self.last_c = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0] = self.points[-1]
ctrl[1] = 2 * ctrl[0] - self.last_c
for j in range(2, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = xy[i].real
ctrl[j, 1] = xy[i].imag
i += 1
else:
ctrl[j, 0] = xy[i]
ctrl[j, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def s(self, *xy):
"""
Add smooth cubic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point at the end of the curve and the endpoint
of the curve. The control point at the beginning of the
curve is assumed to be the reflection of the control point
at the end of the last curve relative to the starting point
of the curve. If the previous curve is not a cubic Bezier,
the control point is coincident with the starting point.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_q = None
if self.last_c is None:
self.last_c = self.points[-1]
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((4, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
ctrl[1] = 2 * ctrl[0] - self.last_c
for j in range(2, 4):
if isinstance(xy[i], complex):
ctrl[j, 0] = x0 + xy[i].real
ctrl[j, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[j, 0] = x0 + xy[i]
ctrl[j, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_c = ctrl[2]
return self
def Q(self, *xy):
"""
Add quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point and the endpoint of the curve.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0] = self.points[-1]
for j in range(1, 3):
if isinstance(xy[i], complex):
ctrl[j, 0] = xy[i].real
ctrl[j, 1] = xy[i].imag
i += 1
else:
ctrl[j, 0] = xy[i]
ctrl[j, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def q(self, *xy):
"""
Add quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinate pairs. Each set of 2 pairs are interpreted as
the control point and the endpoint of the curve.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
for j in range(1, 3):
if isinstance(xy[i], complex):
ctrl[j, 0] = x0 + xy[i].real
ctrl[j, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[j, 0] = x0 + xy[i]
ctrl[j, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def T(self, *xy):
"""
Add smooth quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinates of the endpoints of the curves. The control
point is assumed to be the reflection of the control point
of the last curve relative to the starting point of the
curve. If the previous curve is not a quadratic Bezier,
the control point is coincident with the starting point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
if self.last_q is None:
self.last_q = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0] = self.points[-1]
ctrl[1] = 2 * ctrl[0] - self.last_q
if isinstance(xy[i], complex):
ctrl[2, 0] = xy[i].real
ctrl[2, 1] = xy[i].imag
i += 1
else:
ctrl[2, 0] = xy[i]
ctrl[2, 1] = xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def t(self, *xy):
"""
Add smooth quadratic Bezier curves to the curve.
Parameters
----------
xy : numbers
Coordinates of the endpoints of the curves. The control
point is assumed to be the reflection of the control point
of the last curve relative to the starting point of the
curve. If the previous curve is not a quadratic Bezier,
the control point is coincident with the starting point.
All coordinates are relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = None
if self.last_q is None:
self.last_q = self.points[-1]
x0, y0 = self.points[-1]
i = 0
while i < len(xy):
ctrl = numpy.empty((3, 2))
ctrl[0, 0] = x0
ctrl[0, 1] = y0
ctrl[1] = 2 * ctrl[0] - self.last_q
if isinstance(xy[i], complex):
ctrl[2, 0] = x0 + xy[i].real
ctrl[2, 1] = y0 + xy[i].imag
i += 1
else:
ctrl[2, 0] = x0 + xy[i]
ctrl[2, 1] = y0 + xy[i + 1]
i += 2
f = _func_bezier(ctrl)
uu = [0, 0.2, 0.5, 0.8, 1]
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
self.last_q = ctrl[1]
return self
def B(self, *xy):
"""
Add a general degree Bezier curve.
Parameters
----------
xy : numbers
Coordinate pairs. The last coordinate is the endpoint of
curve and all other are control points.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
i = 0
ctrl = [self.points[-1]]
while i < len(xy):
if isinstance(xy[i], complex):
ctrl.append((xy[i].real, xy[i].imag))
i += 1
else:
ctrl.append((xy[i], xy[i + 1]))
i += 2
ctrl = numpy.array(ctrl)
f = _func_bezier(ctrl)
uu = numpy.linspace(-1, 1, ctrl.shape[0] + 1)
uu = list(0.5 * (1 + numpy.sign(uu) * numpy.abs(uu) ** 0.8))
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
return self
def b(self, *xy):
"""
Add a general degree Bezier curve.
Parameters
----------
xy : numbers
Coordinate pairs. The last coordinate is the endpoint of
curve and all other are control points. All coordinates are
relative to the current end point.
Returns
-------
out : `Curve`
This curve.
"""
self.last_c = self.last_q = None
x0, y0 = self.points[-1]
i = 0
ctrl = [self.points[-1]]
while i < len(xy):
if isinstance(xy[i], complex):
ctrl.append((x0 + xy[i].real, y0 + xy[i].imag))
i += 1
else:
ctrl.append((x0 + xy[i], y0 + xy[i + 1]))
i += 2
ctrl = numpy.array(ctrl)
f = _func_bezier(ctrl)
uu = numpy.linspace(-1, 1, ctrl.shape[0] + 1)
uu = list(0.5 * (1 + numpy.sign(uu) * numpy.abs(uu) ** 0.8))
fu = [f(u) for u in uu]
iu = 1
while iu < len(fu):
test_u = 0.5 * (uu[iu - 1] + uu[iu])
test_pt = f(test_u)
test_err = 0.5 * (fu[iu - 1] + fu[iu]) - test_pt
if test_err[0] ** 2 + test_err[1] ** 2 > self.tol:
uu.insert(iu, test_u)
fu.insert(iu, test_pt)
else:
iu += 1
self.points.extend(xy for xy in fu[1:])
return self
def I(
self,
points,
angles=None,
curl_start=1,
curl_end=1,
t_in=1,
t_out=1,
cycle=False,
):
"""
Add a smooth interpolating curve through the given points.
Uses the Hobby algorithm [1]_ to calculate a smooth
interpolating curve made of cubic Bezier segments between each
pair of points.
Parameters
----------
points : array-like[N][2]
Vertices in the interpolating curve.
angles : array-like[N + 1] or None
Tangent angles at each point (in *radians*). Any angles
defined as None are automatically calculated.
curl_start : number
Ratio between the mock curvatures at the first point and at
its neighbor. A value of 1 renders the first segment a good
approximation for a circular arc. A value of 0 will better
approximate a straight segment. It has no effect for closed
curves or when an angle is defined for the first point.
curl_end : number
Ratio between the mock curvatures at the last point and at
its neighbor. It has no effect for closed curves or when an
angle is defined for the first point.
t_in : number or array-like[N + 1]
Tension parameter when arriving at each point. One value
per point or a single value used for all points.
t_out : number or array-like[N + 1]
Tension parameter when leaving each point. One value per
point or a single value used for all points.
cycle : bool
If True, calculates control points for a closed curve,
with an additional segment connecting the first and last
points.
Returns
-------
out : `Curve`
This curve.
Examples
--------
>>> c1 = gdspy.Curve(0, 1).I([(1, 1), (2, 1), (1, 0)])
>>> c2 = gdspy.Curve(0, 2).I([(1, 2), (2, 2), (1, 1)],
... cycle=True)
>>> ps = gdspy.PolygonSet([c1.get_points(), c2.get_points()])
References
----------
.. [1] <NAME>. *Discrete Comput. Geom.* (1986) 1: 123.
`DOI: 10.1007/BF02187690
<https://doi.org/10.1007/BF02187690>`_
"""
pts = numpy.vstack((self.points[-1:], points))
cta, ctb = _hobby(pts, angles, curl_start, curl_end, t_in, t_out, cycle)
args = []
args.extend(
x
for i in range(pts.shape[0] - 1)
for x in [
cta[i, 0],
cta[i, 1],
ctb[i, 0],
ctb[i, 1],
pts[i + 1, 0],
pts[i + 1, 1],
]
)
if cycle:
args.extend(
[cta[-1, 0], cta[-1, 1], ctb[-1, 0], ctb[-1, 1], pts[0, 0], pts[0, 1]]
)
return self.C(*args)
def i(
self,
points,
angles=None,
curl_start=1,
curl_end=1,
t_in=1,
t_out=1,
cycle=False,
):
"""
Add a smooth interpolating curve through the given points.
Uses the Hobby algorithm [1]_ to calculate a smooth
interpolating curve made of cubic Bezier segments between each
pair of points.
Parameters
----------
points : array-like[N][2]
Vertices in the interpolating curve (relative to teh current
endpoint).
angles : array-like[N + 1] or None
Tangent angles at each point (in *radians*). Any angles
defined as None are automatically calculated.
curl_start : number
Ratio between the mock curvatures at the first point and at
its neighbor. A value of 1 renders the first segment a good
approximation for a circular arc. A value of 0 will better
approximate a straight segment. It has no effect for closed
curves or when an angle is defined for the first point.
curl_end : number
Ratio between the mock curvatures at the last point and at
its neighbor. It has no effect for closed curves or when an
angle is defined for the first point.
t_in : number or array-like[N + 1]
Tension parameter when arriving at each point. One value
per point or a single value used for all points.
t_out : number or array-like[N + 1]
Tension parameter when leaving each point. One value per
point or a single value used for all points.
cycle : bool
If True, calculates control points for a closed curve,
with an additional segment connecting the first and last
points.
Returns
-------
out : `Curve`
This curve.
Examples
--------
>>> c1 = gdspy.Curve(0, 1).i([(1, 0), (2, 0), (1, -1)])
>>> c2 = gdspy.Curve(0, 2).i([(1, 0), (2, 0), (1, -1)],
... cycle=True)
>>> ps = gdspy.PolygonSet([c1.get_points(), c2.get_points()])
References
----------
.. [1] <NAME>. *Discrete Comput. Geom.* (1986) 1: 123.
`DOI: 10.1007/BF02187690
<https://doi.org/10.1007/BF02187690>`_
"""
pts = numpy.vstack((_zero.reshape((1, 2)), points)) + self.points[-1]
cta, ctb = _hobby(pts, angles, curl_start, curl_end, t_in, t_out, cycle)
args = []
args.extend(
x
for i in range(pts.shape[0] - 1)
for x in [
cta[i, 0],
cta[i, 1],
ctb[i, 0],
ctb[i, 1],
pts[i + 1, 0],
pts[i + 1, 1],
]
)
if cycle:
args.extend(
[cta[-1, 0], cta[-1, 1], ctb[-1, 0], ctb[-1, 1], pts[0, 0], pts[0, 1]]
)
return self.C(*args)
| [
"gdspy._hobby",
"numpy.abs",
"numpy.arccos",
"gdspy._zero.reshape",
"gdspy._func_bezier",
"numpy.array",
"numpy.linspace",
"numpy.empty_like",
"numpy.vstack",
"numpy.cos",
"numpy.empty",
"numpy.sin",
"numpy.sign"
] | [((3112, 3136), 'numpy.array', 'numpy.array', (['self.points'], {}), '(self.points)\n', (3123, 3136), False, 'import numpy\n'), ((7440, 7526), 'numpy.linspace', 'numpy.linspace', (['(initial_angle - rotation)', '(final_angle - rotation)', 'number_of_points'], {}), '(initial_angle - rotation, final_angle - rotation,\n number_of_points)\n', (7454, 7526), False, 'import numpy\n'), ((22626, 22643), 'numpy.array', 'numpy.array', (['ctrl'], {}), '(ctrl)\n', (22637, 22643), False, 'import numpy\n'), ((22656, 22674), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (22668, 22674), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((22688, 22728), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(1)', '(ctrl.shape[0] + 1)'], {}), '(-1, 1, ctrl.shape[0] + 1)\n', (22702, 22728), False, 'import numpy\n'), ((24076, 24093), 'numpy.array', 'numpy.array', (['ctrl'], {}), '(ctrl)\n', (24087, 24093), False, 'import numpy\n'), ((24106, 24124), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (24118, 24124), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((24138, 24178), 'numpy.linspace', 'numpy.linspace', (['(-1)', '(1)', '(ctrl.shape[0] + 1)'], {}), '(-1, 1, ctrl.shape[0] + 1)\n', (24152, 24178), False, 'import numpy\n'), ((27110, 27150), 'numpy.vstack', 'numpy.vstack', (['(self.points[-1:], points)'], {}), '((self.points[-1:], points))\n', (27122, 27150), False, 'import numpy\n'), ((27170, 27231), 'gdspy._hobby', '_hobby', (['pts', 'angles', 'curl_start', 'curl_end', 't_in', 't_out', 'cycle'], {}), '(pts, angles, curl_start, curl_end, t_in, t_out, cycle)\n', (27176, 27231), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((30247, 30308), 'gdspy._hobby', '_hobby', (['pts', 'angles', 'curl_start', 'curl_end', 't_in', 't_out', 'cycle'], {}), '(pts, angles, curl_start, curl_end, t_in, t_out, cycle)\n', (30253, 30308), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((3067, 3096), 'numpy.array', 'numpy.array', (['self.points[:-1]'], {}), '(self.points[:-1])\n', (3078, 3096), False, 'import numpy\n'), ((7668, 7689), 'numpy.empty_like', 'numpy.empty_like', (['pts'], {}), '(pts)\n', (7684, 7689), False, 'import numpy\n'), ((7706, 7725), 'numpy.cos', 'numpy.cos', (['rotation'], {}), '(rotation)\n', (7715, 7725), False, 'import numpy\n'), ((7742, 7761), 'numpy.sin', 'numpy.sin', (['rotation'], {}), '(rotation)\n', (7751, 7761), False, 'import numpy\n'), ((8554, 8573), 'numpy.empty', 'numpy.empty', (['(4, 2)'], {}), '((4, 2))\n', (8565, 8573), False, 'import numpy\n'), ((8955, 8973), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (8967, 8973), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((10192, 10211), 'numpy.empty', 'numpy.empty', (['(4, 2)'], {}), '((4, 2))\n', (10203, 10211), False, 'import numpy\n'), ((10631, 10649), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (10643, 10649), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((12126, 12145), 'numpy.empty', 'numpy.empty', (['(4, 2)'], {}), '((4, 2))\n', (12137, 12145), False, 'import numpy\n'), ((12575, 12593), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (12587, 12593), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((14174, 14193), 'numpy.empty', 'numpy.empty', (['(4, 2)'], {}), '((4, 2))\n', (14185, 14193), False, 'import numpy\n'), ((14661, 14679), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (14673, 14679), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((15721, 15740), 'numpy.empty', 'numpy.empty', (['(3, 2)'], {}), '((3, 2))\n', (15732, 15740), False, 'import numpy\n'), ((16122, 16140), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (16134, 16140), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((17278, 17297), 'numpy.empty', 'numpy.empty', (['(3, 2)'], {}), '((3, 2))\n', (17289, 17297), False, 'import numpy\n'), ((17717, 17735), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (17729, 17735), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((19069, 19088), 'numpy.empty', 'numpy.empty', (['(3, 2)'], {}), '((3, 2))\n', (19080, 19088), False, 'import numpy\n'), ((19452, 19470), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (19464, 19470), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((20908, 20927), 'numpy.empty', 'numpy.empty', (['(3, 2)'], {}), '((3, 2))\n', (20919, 20927), False, 'import numpy\n'), ((21329, 21347), 'gdspy._func_bezier', '_func_bezier', (['ctrl'], {}), '(ctrl)\n', (21341, 21347), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((2641, 2670), 'numpy.array', 'numpy.array', (['(x.real, x.imag)'], {}), '((x.real, x.imag))\n', (2652, 2670), False, 'import numpy\n'), ((2713, 2732), 'numpy.array', 'numpy.array', (['(x, y)'], {}), '((x, y))\n', (2724, 2732), False, 'import numpy\n'), ((4893, 4914), 'numpy.array', 'numpy.array', (['(xx, y0)'], {}), '((xx, y0))\n', (4904, 4914), False, 'import numpy\n'), ((5387, 5413), 'numpy.array', 'numpy.array', (['(x0 + xx, y0)'], {}), '((x0 + xx, y0))\n', (5398, 5413), False, 'import numpy\n'), ((5837, 5858), 'numpy.array', 'numpy.array', (['(x0, yy)'], {}), '((x0, yy))\n', (5848, 5858), False, 'import numpy\n'), ((6329, 6355), 'numpy.array', 'numpy.array', (['(x0, y0 + yy)'], {}), '((x0, y0 + yy))\n', (6340, 6355), False, 'import numpy\n'), ((3587, 3624), 'numpy.array', 'numpy.array', (['(xy[i].real, xy[i].imag)'], {}), '((xy[i].real, xy[i].imag))\n', (3598, 3624), False, 'import numpy\n'), ((3702, 3733), 'numpy.array', 'numpy.array', (['(xy[i], xy[i + 1])'], {}), '((xy[i], xy[i + 1]))\n', (3713, 3733), False, 'import numpy\n'), ((30178, 30199), 'gdspy._zero.reshape', '_zero.reshape', (['(1, 2)'], {}), '((1, 2))\n', (30191, 30199), False, 'from gdspy import _func_bezier, _hobby, _zero\n'), ((4306, 4343), 'numpy.array', 'numpy.array', (['(xy[i].real, xy[i].imag)'], {}), '((xy[i].real, xy[i].imag))\n', (4317, 4343), False, 'import numpy\n'), ((4425, 4456), 'numpy.array', 'numpy.array', (['(xy[i], xy[i + 1])'], {}), '((xy[i], xy[i + 1]))\n', (4436, 4456), False, 'import numpy\n'), ((7578, 7595), 'numpy.cos', 'numpy.cos', (['angles'], {}), '(angles)\n', (7587, 7595), False, 'import numpy\n'), ((7602, 7619), 'numpy.sin', 'numpy.sin', (['angles'], {}), '(angles)\n', (7611, 7619), False, 'import numpy\n'), ((22758, 22772), 'numpy.sign', 'numpy.sign', (['uu'], {}), '(uu)\n', (22768, 22772), False, 'import numpy\n'), ((24208, 24222), 'numpy.sign', 'numpy.sign', (['uu'], {}), '(uu)\n', (24218, 24222), False, 'import numpy\n'), ((7362, 7404), 'numpy.arccos', 'numpy.arccos', (['(1 - self.tol ** 0.5 / radius)'], {}), '(1 - self.tol ** 0.5 / radius)\n', (7374, 7404), False, 'import numpy\n'), ((22775, 22788), 'numpy.abs', 'numpy.abs', (['uu'], {}), '(uu)\n', (22784, 22788), False, 'import numpy\n'), ((24225, 24238), 'numpy.abs', 'numpy.abs', (['uu'], {}), '(uu)\n', (24234, 24238), False, 'import numpy\n')] |
import numpy as np
import cv2
import glob
import helpers
warping_from = np.float32([[200, 720], [604, 450], [696, 450], [1120, 720]])
warping_to = np.float32([[200, 720], [200, 0], [1120, 0], [1120, 720]])
def calibrate():
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
objp = np.zeros((6*9, 3), np.float32)
objp[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
objpoints = []
imgpoints = []
images = glob.glob('camera_cal/*.jpg')
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (9, 6), None)
if ret is True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(
gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
objpoints, imgpoints, gray.shape[::-1], None, None)
return mtx, dist
def undistort(img, mtx, dist):
h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(
mtx, dist, (w, h), 1, (w, h))
undistorted = cv2.undistort(img, mtx, dist, None, newcameramtx)
x, y, w, h = roi
return cv2.resize(undistorted[y:y+h, x:x+w], (1280, 720))
def warp(img):
M = cv2.getPerspectiveTransform(warping_from, warping_to)
return cv2.warpPerspective(img, M, (1280, 720)), M
| [
"cv2.getPerspectiveTransform",
"cv2.undistort",
"cv2.getOptimalNewCameraMatrix",
"numpy.zeros",
"cv2.warpPerspective",
"cv2.cvtColor",
"cv2.calibrateCamera",
"cv2.findChessboardCorners",
"cv2.resize",
"cv2.cornerSubPix",
"cv2.imread",
"numpy.float32",
"glob.glob"
] | [((73, 134), 'numpy.float32', 'np.float32', (['[[200, 720], [604, 450], [696, 450], [1120, 720]]'], {}), '([[200, 720], [604, 450], [696, 450], [1120, 720]])\n', (83, 134), True, 'import numpy as np\n'), ((148, 206), 'numpy.float32', 'np.float32', (['[[200, 720], [200, 0], [1120, 0], [1120, 720]]'], {}), '([[200, 720], [200, 0], [1120, 0], [1120, 720]])\n', (158, 206), True, 'import numpy as np\n'), ((316, 348), 'numpy.zeros', 'np.zeros', (['(6 * 9, 3)', 'np.float32'], {}), '((6 * 9, 3), np.float32)\n', (324, 348), True, 'import numpy as np\n'), ((452, 481), 'glob.glob', 'glob.glob', (['"""camera_cal/*.jpg"""'], {}), "('camera_cal/*.jpg')\n", (461, 481), False, 'import glob\n'), ((897, 968), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objpoints', 'imgpoints', 'gray.shape[::-1]', 'None', 'None'], {}), '(objpoints, imgpoints, gray.shape[::-1], None, None)\n', (916, 968), False, 'import cv2\n'), ((1082, 1141), 'cv2.getOptimalNewCameraMatrix', 'cv2.getOptimalNewCameraMatrix', (['mtx', 'dist', '(w, h)', '(1)', '(w, h)'], {}), '(mtx, dist, (w, h), 1, (w, h))\n', (1111, 1141), False, 'import cv2\n'), ((1169, 1218), 'cv2.undistort', 'cv2.undistort', (['img', 'mtx', 'dist', 'None', 'newcameramtx'], {}), '(img, mtx, dist, None, newcameramtx)\n', (1182, 1218), False, 'import cv2\n'), ((1251, 1305), 'cv2.resize', 'cv2.resize', (['undistorted[y:y + h, x:x + w]', '(1280, 720)'], {}), '(undistorted[y:y + h, x:x + w], (1280, 720))\n', (1261, 1305), False, 'import cv2\n'), ((1327, 1380), 'cv2.getPerspectiveTransform', 'cv2.getPerspectiveTransform', (['warping_from', 'warping_to'], {}), '(warping_from, warping_to)\n', (1354, 1380), False, 'import cv2\n'), ((522, 539), 'cv2.imread', 'cv2.imread', (['fname'], {}), '(fname)\n', (532, 539), False, 'import cv2\n'), ((555, 592), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_RGB2GRAY'], {}), '(img, cv2.COLOR_RGB2GRAY)\n', (567, 592), False, 'import cv2\n'), ((616, 661), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['gray', '(9, 6)', 'None'], {}), '(gray, (9, 6), None)\n', (641, 661), False, 'import cv2\n'), ((1392, 1432), 'cv2.warpPerspective', 'cv2.warpPerspective', (['img', 'M', '(1280, 720)'], {}), '(img, M, (1280, 720))\n', (1411, 1432), False, 'import cv2\n'), ((744, 805), 'cv2.cornerSubPix', 'cv2.cornerSubPix', (['gray', 'corners', '(11, 11)', '(-1, -1)', 'criteria'], {}), '(gray, corners, (11, 11), (-1, -1), criteria)\n', (760, 805), False, 'import cv2\n')] |
import numpy as np
from collections import defaultdict
import sys
import argparse
# we rely on ordered dictionaries here
assert sys.version_info >= (3, 6)
def read_fasta(fasta_path, alphabet='ACDEFGHIKLMNPQRSTVWY-', default_index=20):
# read all the sequences into a dictionary
seq_dict = {}
with open(fasta_path, 'r') as file_handle:
seq_id = None
for line in file_handle:
line = line.strip()
if line.startswith(">"):
seq_id = line
seq_dict[seq_id] = ""
continue
assert seq_id is not None
line = ''.join([c for c in line if c.isupper() or c == '-'])
seq_dict[seq_id] += line
aa_index = defaultdict(lambda: default_index, {alphabet[i]: i for i in range(len(alphabet))})
seq_msa = []
keys_list = []
for k in seq_dict.keys():
seq_msa.append([aa_index[s] for s in seq_dict[k]])
keys_list.append(k)
seq_msa = np.array(seq_msa, dtype=int)
# reweighting sequences
seq_weight = np.zeros(seq_msa.shape)
for j in range(seq_msa.shape[1]):
aa_type, aa_counts = np.unique(seq_msa[:, j], return_counts=True)
num_type = len(aa_type)
aa_dict = {}
for a in aa_type:
aa_dict[a] = aa_counts[list(aa_type).index(a)]
for i in range(seq_msa.shape[0]):
seq_weight[i, j] = (1.0 / num_type) * (1.0 / aa_dict[seq_msa[i, j]])
tot_weight = np.sum(seq_weight)
seq_weight = seq_weight.sum(1) / tot_weight
return seq_msa, seq_weight, len(alphabet)
def get_seq_len(fasta_path):
seq_len = 0
first_line = True
with open(fasta_path, 'r') as file_handle:
for line in file_handle:
if first_line:
if not line.startswith(">"):
raise ValueError("Expect first line to start with >")
first_line = False
continue
if (first_line is False and line.startswith(">")):
return seq_len
seq_len += len([c for c in line if c.isupper() or c == '-'])
raise ValueError("Could not determine sequence length")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--print_seq_len', action='store_true')
parser.add_argument('--fasta_path', type=str, default=None)
args = parser.parse_args()
if args.fasta_path is None and args.print_seq_len:
raise ValueError("need fasta_path if printing seq_len")
if args.print_seq_len:
seq_len = get_seq_len(args.fasta_path)
print(seq_len)
| [
"numpy.unique",
"argparse.ArgumentParser",
"numpy.array",
"numpy.zeros",
"numpy.sum"
] | [((984, 1012), 'numpy.array', 'np.array', (['seq_msa'], {'dtype': 'int'}), '(seq_msa, dtype=int)\n', (992, 1012), True, 'import numpy as np\n'), ((1059, 1082), 'numpy.zeros', 'np.zeros', (['seq_msa.shape'], {}), '(seq_msa.shape)\n', (1067, 1082), True, 'import numpy as np\n'), ((1473, 1491), 'numpy.sum', 'np.sum', (['seq_weight'], {}), '(seq_weight)\n', (1479, 1491), True, 'import numpy as np\n'), ((2219, 2244), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2242, 2244), False, 'import argparse\n'), ((1150, 1194), 'numpy.unique', 'np.unique', (['seq_msa[:, j]'], {'return_counts': '(True)'}), '(seq_msa[:, j], return_counts=True)\n', (1159, 1194), True, 'import numpy as np\n')] |
import argparse
import sys
from tqdm import tqdm
import json
import subprocess
import os
import random
import numpy as np
import torch
from sklearn.metrics import (roc_auc_score, auc, precision_recall_curve,
r2_score, accuracy_score, log_loss)
# optimization goal for various metrics
METRIC_DIC = {"pr_auc": "maximize",
"roc_auc": "maximize",
"r2": "maximize",
"class_loss": "minimize",
"regress_loss": "minimize",
"mae": "minimize",
"mse": "minimize"}
METRICS = list(METRIC_DIC.keys())
# transform from chemprop syntax to our syntax for the metrics
CHEMPROP_TRANSFORM = {"auc": "roc_auc",
"prc-auc": "pr_auc",
"binary_cross_entropy": "class_loss",
"mse": "regress_loss"}
# metrics available in chemprop
CHEMPROP_METRICS = ["auc",
"prc-auc",
"rmse",
"mae",
"mse",
"r2",
"accuracy",
"cross_entropy",
"binary_cross_entropy"]
def tqdm_enum(iter):
"""
Wrap tqdm around `enumerate`.
Args:
iter (iterable): an iterable (e.g. list)
Returns
i (int): current index
y: current value
"""
i = 0
for y in tqdm(iter):
yield i, y
i += 1
def parse_args(parser, config_flag="config_file"):
"""
Parse arguments.
Args:
parser (argparse.ArgumentParser): argument parser
config_flag (str): name of the arg key
that gives the name of the config file.
Returns:
args (argparse.Namespace): arguments
"""
# parse the arguments
args = parser.parse_args()
# if the config path is specified, then load
# arguments from that file and apply the results
# to `args`
config_path = getattr(args, config_flag, None)
if config_path is not None:
with open(config_path, "r") as f:
config_args = json.load(f)
for key, val in config_args.items():
if hasattr(args, key):
setattr(args, key, val)
return args
def fprint(msg):
"""
Print a string immediately.
Args:
msg (str): string to print
Returns:
None
"""
print(msg)
sys.stdout.flush()
def bash_command(cmd):
""" Run a command from the command line using subprocess.
Args:
cmd (str): command
Returns:
None
"""
return subprocess.Popen(cmd, shell=True, executable='/bin/bash')
def convert_metric(metric):
"""
Convert a metric name to a fixed name that can be used in
various scripts.
Args:
metric (str): input metric
Returns:
metric (str): output metric
"""
if metric in ["prc_auc", "prc-auc"]:
metric = "pr_auc"
elif metric in ["auc", "roc-auc"]:
metric = "roc_auc"
return metric
def prepare_metric(lines, metric):
"""
Get various metric quantities before parsing a log fine.
Args:
lines (list[str]): lines in the log file
metric (str): name of metric
Returns:
idx (int): index at which the metric score occurs
when the given line has been split by `|`
best_score (float): initial best score
best_epoch (int): initial best_epoch
optim (str): goal of the metric optimization (i.e.
minimize or maximize.)
"""
header_items = [i.strip() for i in lines[0].split("|")]
metric = convert_metric(metric)
if "loss" in metric:
idx = header_items.index("Validation loss")
else:
for i, item in enumerate(header_items):
sub_keys = metric.split("_")
if all([key.lower() in item.lower()
for key in sub_keys]):
idx = i
optim = METRIC_DIC[metric]
if optim == "minimize":
best_score = float("inf")
else:
best_score = -float("inf")
best_epoch = -1
return idx, best_score, best_epoch, optim
def parse_score(model_path, metric):
"""
Find the best score and best epoch according to a given metric.
Args:
model_path (str): path to the training folder
metric (str): name of metric
Returns:
best_score (float): best validation score
best_epoch (int): epoch with the best validation score
"""
log_path = os.path.join(model_path, "log_human_read.csv")
with open(log_path, "r") as f:
lines = f.readlines()
idx, best_score, best_epoch, optim = prepare_metric(
lines=lines,
metric=metric)
for line in lines:
splits = [i.strip() for i in line.split("|")]
try:
score = float(splits[idx])
except (ValueError, IndexError):
continue
if any([(optim == "minimize" and score < best_score),
(optim == "maximize" and score > best_score)]):
best_score = score
best_epoch = splits[1]
return best_score, best_epoch
def read_csv(path):
"""
Read a csv into a dictionary.
Args:
path (str): path to the csv file
Returns:
dic (dict): dictionary version of the file
"""
with open(path, "r") as f:
lines = f.readlines()
keys = lines[0].strip().split(",")
dic = {key: [] for key in keys}
for line in lines[1:]:
vals = line.strip().split(",")
for key, val in zip(keys, vals):
if val.isdigit():
dic[key].append(int(val))
else:
try:
dic[key].append(float(val))
except ValueError:
dic[key].append(val)
return dic
def write_csv(path, dic):
"""
Write a dictionary to a csv.
Args:
path (str): path to the csv file
dic (dict): dictionary
Returns:
None
"""
keys = sorted(list(dic.keys()))
if "smiles" in keys:
keys.remove("smiles")
keys.insert(0, "smiles")
lines = [",".join(keys)]
for idx in range(len(dic[keys[0]])):
vals = [dic[key][idx] for key in keys]
line = ",".join(str(val) for val in vals)
lines.append(line)
text = "\n".join(lines)
with open(path, "w") as f:
f.write(text)
def prop_split(max_specs,
dataset_type,
props,
sample_dic,
seed):
"""
Sample a set of smiles strings by up to a maximum number. If the
property of interest is a binary value, try to get as many of the
underrepresented class as possible.
Args:
max_specs (int): maximum number of species
dataset_type (str): type of problem (classification or regression)
props (list[str]): names of properties you'll be fitting
sample_dic (dict): dictionary of the form {smiles: sub_dic} for the
set of smiles strings, where sub_dic contains other information,
e.g. about `props`.
seed (int): random seed for sampling
Returns:
keep_smiles (list[str]): sampled smiles strings.
"""
random.seed(seed)
if max_specs is not None and dataset_type == "classification":
msg = "Not implemented for multiclass"
assert len(props) == 1, msg
prop = props[0]
pos_smiles = [key for key, sub_dic in sample_dic.items()
if sub_dic.get(prop) == 1]
neg_smiles = [key for key, sub_dic in sample_dic.items()
if sub_dic.get(prop) == 0]
# find the underrepresnted and overrepresented class
if len(pos_smiles) < len(neg_smiles):
underrep = pos_smiles
overrep = neg_smiles
else:
underrep = neg_smiles
overrep = pos_smiles
# if possible, keep all of the underrepresented class
if max_specs >= 2 * len(underrep):
random.shuffle(overrep)
num_left = max_specs - len(underrep)
keep_smiles = underrep + overrep[:num_left]
# otherwise create a dataset with half of each
else:
random.shuffle(underrep)
random.shuffle(overrep)
keep_smiles = (underrep[:max_specs // 2]
+ overrep[max_specs // 2:])
else:
keep_smiles = list(sample_dic.keys())
# if setting a maximum, need to shuffle in order
# to take random smiles
if max_specs is not None:
random.shuffle(keep_smiles)
if max_specs is not None:
keep_smiles = keep_smiles[:max_specs]
return keep_smiles
def get_split_names(train_only,
val_only,
test_only):
"""
Get names of dataset splits.
Args:
train_only (bool): only load the training set
val_only (bool): only load the validation set
test_only (bool): only load the test set
Returns:
names (list[str]): names of splits
(train, val, and/or test) that we're
monitoring.
"""
only_dic = {"train": train_only,
"val": val_only,
"test": test_only}
requested = [name for name, only in only_dic.items()
if only]
if len(requested) > 1:
string = ", ".join(requested)
msg = (f"Requested {string}, which are mutually exclusive")
raise Exception(msg)
if len(requested) != 0:
names = requested
else:
names = ["train", "val", "test"]
return names
def preprocess_class(pred):
"""
Preprocess classifier predictions. This applies,
for example, if you train an sklearn regressor
rather than classifier, which doesn't necessarily
predict a value between 0 and 1.
Args:
pred (np.array or torch.Tensor or list): predictions
Returns:
pred (np.array or torch.Tensor or list): predictions
with max 1 and min 0.
"""
to_list = False
if type(pred) is list:
pred = np.array(pred)
to_list = True
# make sure the min and max are 0 and 1
pred[pred < 0] = 0
pred[pred > 1] = 1
if to_list:
pred = pred.tolist()
return pred
def apply_metric(metric, pred, actual):
"""
Apply a metric to a set of predictions.
Args:
metric (str): name of metric
pred (iterable): predicted values
actual (iterable): actual values
Returns:
score (float): metric score
"""
if metric == "auc":
pred = preprocess_class(pred)
if max(pred) == 0:
score = 0
else:
score = roc_auc_score(y_true=actual, y_score=pred)
elif metric == "prc-auc":
pred = preprocess_class(pred)
if max(pred) == 0:
score = 0
else:
precision, recall, _ = precision_recall_curve(
y_true=actual, probas_pred=pred)
score = auc(recall, precision)
elif metric == "mse":
score = ((np.array(pred) - np.array(actual)) ** 2).mean()
elif metric == "rmse":
score = ((np.array(pred) - np.array(actual)) ** 2).mean() ** 0.5
elif metric == "mae":
score = (abs(np.array(pred) - np.array(actual))).mean()
elif metric == "r2":
score = r2_score(y_true=actual, y_pred=pred)
elif metric == "accuracy":
np_pred = np.array(pred)
mask = np_pred >= 0.5
np_pred[mask] = 1
np_pred[np.bitwise_not(mask)] = 0
score = accuracy_score(y_true=actual, y_pred=np_pred)
elif metric in ["cross_entropy", "binary_cross_entropy"]:
score = log_loss(y_true=actual, y_pred=np_pred)
return score
def avg_distances(dset):
"""
Args:
dset (nff.nn.data.Dataset): NFF dataset where all the geometries are
different conformers for one species.
"""
# Get the neighbor list that includes the neighbor list of each conformer
all_nbrs = []
for nbrs in dset.props['nbr_list']:
for pair in nbrs:
all_nbrs.append(tuple(pair.tolist()))
all_nbrs_tuple = list(set(tuple(all_nbrs)))
all_nbrs = torch.LongTensor([list(i) for i in all_nbrs_tuple])
num_confs = len(dset)
all_distances = torch.zeros(num_confs, all_nbrs.shape[0])
for i, batch in enumerate(dset):
xyz = batch["nxyz"][:, 1:]
all_distances[i] = ((xyz[all_nbrs[:, 0]] - xyz[all_nbrs[:, 1]])
.pow(2).sum(1).sqrt())
weights = dset.props["weights"].reshape(-1, 1)
avg_d = (all_distances * weights).sum(0)
return all_nbrs, avg_d
def cat_props(props):
new_props = {}
for key, val in props.items():
if isinstance(val, list):
if isinstance(val[0], torch.Tensor):
if len(val[0].shape) == 0:
new_props[key] = torch.stack(val)
else:
new_props[key] = torch.cat(val)
else:
new_props[key] = val
elif isinstance(val, torch.Tensor):
new_props[key] = val
return new_props
def kron(a, b):
ein = torch.einsum("ab,cd-> acbd", a, b)
out = ein.view(a.size(0) * b.size(0),
a.size(1) * b.size(1))
return out
def load_defaults(direc,
arg_path):
"""
Load default arguments from a JSON file
"""
args_path = os.path.join(direc, arg_path)
with open(args_path, 'r') as f:
default_args = json.load(f)
return default_args
def parse_args_from_json(arg_path,
direc):
default_args = load_defaults(arg_path=arg_path,
direc=direc)
description = default_args['description']
parser = argparse.ArgumentParser(description=description)
default_args.pop('description')
required = parser.add_argument_group(('required arguments (either in '
'the command line or the config '
'file)'))
optional = parser.add_argument_group('optional arguments')
for name, info in default_args.items():
keys = ['default', 'choices', 'nargs']
kwargs = {key: info[key] for key in keys
if key in info}
# Required arguments get put in one group and optional ones in another
# so that they're separated in `--help` . We don't actually set
# required=True for required ones, though, because they can be given in
# the config file instead of the command line
group = required if info.get('required', False) else optional
group.add_argument(f'--{name}',
type=eval(info['type']),
help=info['help'],
**kwargs)
args = parser.parse_args()
return args
| [
"sklearn.metrics.auc",
"sklearn.metrics.roc_auc_score",
"numpy.array",
"sklearn.metrics.log_loss",
"numpy.bitwise_not",
"sklearn.metrics.r2_score",
"argparse.ArgumentParser",
"subprocess.Popen",
"sys.stdout.flush",
"random.shuffle",
"sklearn.metrics.precision_recall_curve",
"torch.einsum",
"... | [((1393, 1403), 'tqdm.tqdm', 'tqdm', (['iter'], {}), '(iter)\n', (1397, 1403), False, 'from tqdm import tqdm\n'), ((2389, 2407), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2405, 2407), False, 'import sys\n'), ((2578, 2635), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'shell': '(True)', 'executable': '"""/bin/bash"""'}), "(cmd, shell=True, executable='/bin/bash')\n", (2594, 2635), False, 'import subprocess\n'), ((4493, 4539), 'os.path.join', 'os.path.join', (['model_path', '"""log_human_read.csv"""'], {}), "(model_path, 'log_human_read.csv')\n", (4505, 4539), False, 'import os\n'), ((7224, 7241), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7235, 7241), False, 'import random\n'), ((12330, 12371), 'torch.zeros', 'torch.zeros', (['num_confs', 'all_nbrs.shape[0]'], {}), '(num_confs, all_nbrs.shape[0])\n', (12341, 12371), False, 'import torch\n'), ((13208, 13242), 'torch.einsum', 'torch.einsum', (['"""ab,cd-> acbd"""', 'a', 'b'], {}), "('ab,cd-> acbd', a, b)\n", (13220, 13242), False, 'import torch\n'), ((13476, 13505), 'os.path.join', 'os.path.join', (['direc', 'arg_path'], {}), '(direc, arg_path)\n', (13488, 13505), False, 'import os\n'), ((13832, 13880), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (13855, 13880), False, 'import argparse\n'), ((10118, 10132), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (10126, 10132), True, 'import numpy as np\n'), ((13565, 13577), 'json.load', 'json.load', (['f'], {}), '(f)\n', (13574, 13577), False, 'import json\n'), ((2083, 2095), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2092, 2095), False, 'import json\n'), ((8021, 8044), 'random.shuffle', 'random.shuffle', (['overrep'], {}), '(overrep)\n', (8035, 8044), False, 'import random\n'), ((8232, 8256), 'random.shuffle', 'random.shuffle', (['underrep'], {}), '(underrep)\n', (8246, 8256), False, 'import random\n'), ((8269, 8292), 'random.shuffle', 'random.shuffle', (['overrep'], {}), '(overrep)\n', (8283, 8292), False, 'import random\n'), ((8595, 8622), 'random.shuffle', 'random.shuffle', (['keep_smiles'], {}), '(keep_smiles)\n', (8609, 8622), False, 'import random\n'), ((10728, 10770), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', ([], {'y_true': 'actual', 'y_score': 'pred'}), '(y_true=actual, y_score=pred)\n', (10741, 10770), False, 'from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, r2_score, accuracy_score, log_loss\n'), ((10937, 10992), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', ([], {'y_true': 'actual', 'probas_pred': 'pred'}), '(y_true=actual, probas_pred=pred)\n', (10959, 10992), False, 'from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, r2_score, accuracy_score, log_loss\n'), ((11030, 11052), 'sklearn.metrics.auc', 'auc', (['recall', 'precision'], {}), '(recall, precision)\n', (11033, 11052), False, 'from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, r2_score, accuracy_score, log_loss\n'), ((12935, 12951), 'torch.stack', 'torch.stack', (['val'], {}), '(val)\n', (12946, 12951), False, 'import torch\n'), ((13011, 13025), 'torch.cat', 'torch.cat', (['val'], {}), '(val)\n', (13020, 13025), False, 'import torch\n'), ((11376, 11412), 'sklearn.metrics.r2_score', 'r2_score', ([], {'y_true': 'actual', 'y_pred': 'pred'}), '(y_true=actual, y_pred=pred)\n', (11384, 11412), False, 'from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, r2_score, accuracy_score, log_loss\n'), ((11097, 11111), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (11105, 11111), True, 'import numpy as np\n'), ((11114, 11130), 'numpy.array', 'np.array', (['actual'], {}), '(actual)\n', (11122, 11130), True, 'import numpy as np\n'), ((11462, 11476), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (11470, 11476), True, 'import numpy as np\n'), ((11591, 11636), 'sklearn.metrics.accuracy_score', 'accuracy_score', ([], {'y_true': 'actual', 'y_pred': 'np_pred'}), '(y_true=actual, y_pred=np_pred)\n', (11605, 11636), False, 'from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, r2_score, accuracy_score, log_loss\n'), ((11549, 11569), 'numpy.bitwise_not', 'np.bitwise_not', (['mask'], {}), '(mask)\n', (11563, 11569), True, 'import numpy as np\n'), ((11715, 11754), 'sklearn.metrics.log_loss', 'log_loss', ([], {'y_true': 'actual', 'y_pred': 'np_pred'}), '(y_true=actual, y_pred=np_pred)\n', (11723, 11754), False, 'from sklearn.metrics import roc_auc_score, auc, precision_recall_curve, r2_score, accuracy_score, log_loss\n'), ((11190, 11204), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (11198, 11204), True, 'import numpy as np\n'), ((11207, 11223), 'numpy.array', 'np.array', (['actual'], {}), '(actual)\n', (11215, 11223), True, 'import numpy as np\n'), ((11292, 11306), 'numpy.array', 'np.array', (['pred'], {}), '(pred)\n', (11300, 11306), True, 'import numpy as np\n'), ((11309, 11325), 'numpy.array', 'np.array', (['actual'], {}), '(actual)\n', (11317, 11325), True, 'import numpy as np\n')] |
import gradio as gr
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
def load_data():
'''
Загрузка данных
'''
data = pd.read_csv('data/occupancy_datatraining.txt', sep=",", nrows=500)
return data
def preprocess_data(data_in):
'''
Масштабирование признаков, функция возвращает X и y для кросс-валидации
'''
data_out = data_in.copy()
# Числовые колонки для масштабирования
scale_cols = ['Temperature', 'Humidity', 'Light', 'CO2']
new_cols = []
sc1 = MinMaxScaler()
sc1_data = sc1.fit_transform(data_out[scale_cols])
for i in range(len(scale_cols)):
col = scale_cols[i]
new_col_name = col + '_scaled'
new_cols.append(new_col_name)
data_out[new_col_name] = sc1_data[:,i]
return data_out[new_cols], data_out['Occupancy']
data = load_data()
data_X, data_y = preprocess_data(data)
def knn(cv_knn, cv_slider):
'''
Входы и выходы функции соединены с компонентами в интерфейсе
'''
scores = cross_val_score(KNeighborsClassifier(n_neighbors=cv_knn),
data_X, data_y, scoring='accuracy', cv=cv_slider)
scores_dict = {str(i):float(scores[i]) for i in range(len(scores)) }
return scores_dict, scores_dict, np.mean(scores)
#Входные компоненты
cv_knn = gr.inputs.Slider(minimum=1, maximum=300, step=1, default=5, label='Количество соседей')
cv_slider = gr.inputs.Slider(minimum=3, maximum=10, step=1, default=5, label='Количество фолдов')
#Выходные компоненты
out_folds_label = gr.outputs.Label(type='confidences', label='Оценки по фолдам (Label)')
out_folds_kv = gr.outputs.KeyValues(label='Оценки по фолдам (KeyValues)')
out_acc = gr.outputs.Textbox(type="number", label='Усредненное значение accuracy')
iface = gr.Interface(
fn=knn,
inputs=[cv_knn, cv_slider],
outputs=[out_folds_label, out_folds_kv, out_acc],
title='Метод ближайших соседей')
iface.launch()
| [
"numpy.mean",
"gradio.Interface",
"pandas.read_csv",
"gradio.outputs.Label",
"sklearn.neighbors.KNeighborsClassifier",
"gradio.outputs.Textbox",
"gradio.outputs.KeyValues",
"gradio.inputs.Slider",
"sklearn.preprocessing.MinMaxScaler"
] | [((1410, 1502), 'gradio.inputs.Slider', 'gr.inputs.Slider', ([], {'minimum': '(1)', 'maximum': '(300)', 'step': '(1)', 'default': '(5)', 'label': '"""Количество соседей"""'}), "(minimum=1, maximum=300, step=1, default=5, label=\n 'Количество соседей')\n", (1426, 1502), True, 'import gradio as gr\n'), ((1510, 1600), 'gradio.inputs.Slider', 'gr.inputs.Slider', ([], {'minimum': '(3)', 'maximum': '(10)', 'step': '(1)', 'default': '(5)', 'label': '"""Количество фолдов"""'}), "(minimum=3, maximum=10, step=1, default=5, label=\n 'Количество фолдов')\n", (1526, 1600), True, 'import gradio as gr\n'), ((1637, 1707), 'gradio.outputs.Label', 'gr.outputs.Label', ([], {'type': '"""confidences"""', 'label': '"""Оценки по фолдам (Label)"""'}), "(type='confidences', label='Оценки по фолдам (Label)')\n", (1653, 1707), True, 'import gradio as gr\n'), ((1723, 1781), 'gradio.outputs.KeyValues', 'gr.outputs.KeyValues', ([], {'label': '"""Оценки по фолдам (KeyValues)"""'}), "(label='Оценки по фолдам (KeyValues)')\n", (1743, 1781), True, 'import gradio as gr\n'), ((1792, 1864), 'gradio.outputs.Textbox', 'gr.outputs.Textbox', ([], {'type': '"""number"""', 'label': '"""Усредненное значение accuracy"""'}), "(type='number', label='Усредненное значение accuracy')\n", (1810, 1864), True, 'import gradio as gr\n'), ((1874, 2009), 'gradio.Interface', 'gr.Interface', ([], {'fn': 'knn', 'inputs': '[cv_knn, cv_slider]', 'outputs': '[out_folds_label, out_folds_kv, out_acc]', 'title': '"""Метод ближайших соседей"""'}), "(fn=knn, inputs=[cv_knn, cv_slider], outputs=[out_folds_label,\n out_folds_kv, out_acc], title='Метод ближайших соседей')\n", (1886, 2009), True, 'import gradio as gr\n'), ((274, 340), 'pandas.read_csv', 'pd.read_csv', (['"""data/occupancy_datatraining.txt"""'], {'sep': '""","""', 'nrows': '(500)'}), "('data/occupancy_datatraining.txt', sep=',', nrows=500)\n", (285, 340), True, 'import pandas as pd\n'), ((642, 656), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (654, 656), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1153, 1193), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'cv_knn'}), '(n_neighbors=cv_knn)\n', (1173, 1193), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((1364, 1379), 'numpy.mean', 'np.mean', (['scores'], {}), '(scores)\n', (1371, 1379), True, 'import numpy as np\n')] |
"""
Load a part config for inclusion into whole config
"""
import numpy as np
from dlpoly.config import Atom
from dlpoly.field import Molecule
from dlpoly.utility import read_line, build_3d_rotation_matrix
class CFG(Molecule):
''' Load a partial configuration '''
def __init__(self, source=None):
Molecule.__init__(self)
self.atoms = []
self.nMols = 1
if source is not None:
self.read(source)
self._centre_mol()
atomSpecies = property(lambda self: [self.species[atom.element] for atom in self.atoms])
atomPos = property(lambda self: [atom.pos for atom in self.atoms])
@property
def bounds(self):
''' Find the limiting boundaries of the molecule '''
return np.asarray([np.min(np.asarray(self.atomPos), axis=0),
np.max(np.asarray(self.atomPos), axis=0)]).reshape(2, 3)
def translate(self, translation):
''' Move all atoms by translation '''
for atom in self.atoms:
atom.pos += translation
def stretch(self, stretch):
''' Stretch all atoms by translation '''
for atom in self.atoms:
atom.pos *= stretch
def rotate(self, rotation):
''' Perform rotation on the atoms in cell '''
alpha, beta, gamma = rotation
rot = build_3d_rotation_matrix(alpha, beta, gamma, 'deg')
self.apply_matrix_transform(rot)
def apply_matrix_transform(self, transform: np.ndarray):
''' Apply a matrix transform to own atoms '''
for atom in self.atoms:
atom.pos = np.matmul(transform, atom.pos)
def _centre_com(self):
''' Centre CoM about 0, 0, 0 '''
centreOfMass = 0.
for atom in self.atoms:
centreOfMass += self.species[atom.element].mass * atom.pos
centreOfMass /= np.sum(spec.mass for spec in self.atomSpecies)
self.translate(-centreOfMass)
def _centre_mol(self):
''' Centre molecule's centroid about 0, 0, 0 '''
tmpArr = np.asarray(self.atomPos)
maxPos, minPos = np.max(tmpArr, axis=0), np.min(tmpArr, axis=0)
shift = (minPos + maxPos) / 2
self.translate(-shift)
def _read_pos(self, source, nElem):
''' Read in an atoms block '''
self.atoms = [None]*nElem
for i in range(nElem):
datum = read_line(source).split()
self.atoms[i] = Atom(element=datum[0], pos=np.asarray(datum[1:4]), index=i)
def _read_bonds(self, source, nElem):
''' Read in a bonds block '''
for _ in range(nElem):
potClass, num = read_line(source).split()
num = int(num)
self._read_block(source, potClass, num)
def read(self, source):
''' Read a partial config file '''
with open(source, 'r') as sourceFile:
self.name = read_line(sourceFile)
for line in sourceFile:
block, num = line.strip().split()
num = int(num)
block = block.lower()
if block == 'positions':
self._read_pos(sourceFile, num)
elif block == 'species':
self._read_atoms(sourceFile, num)
elif block == 'bonding':
self._read_bonds(sourceFile, num)
else:
raise ValueError('Cannot read structure {}'.format(block))
| [
"dlpoly.utility.build_3d_rotation_matrix",
"numpy.asarray",
"numpy.max",
"numpy.sum",
"numpy.matmul",
"numpy.min",
"dlpoly.utility.read_line",
"dlpoly.field.Molecule.__init__"
] | [((315, 338), 'dlpoly.field.Molecule.__init__', 'Molecule.__init__', (['self'], {}), '(self)\n', (332, 338), False, 'from dlpoly.field import Molecule\n'), ((1334, 1385), 'dlpoly.utility.build_3d_rotation_matrix', 'build_3d_rotation_matrix', (['alpha', 'beta', 'gamma', '"""deg"""'], {}), "(alpha, beta, gamma, 'deg')\n", (1358, 1385), False, 'from dlpoly.utility import read_line, build_3d_rotation_matrix\n'), ((1851, 1897), 'numpy.sum', 'np.sum', (['(spec.mass for spec in self.atomSpecies)'], {}), '(spec.mass for spec in self.atomSpecies)\n', (1857, 1897), True, 'import numpy as np\n'), ((2038, 2062), 'numpy.asarray', 'np.asarray', (['self.atomPos'], {}), '(self.atomPos)\n', (2048, 2062), True, 'import numpy as np\n'), ((1598, 1628), 'numpy.matmul', 'np.matmul', (['transform', 'atom.pos'], {}), '(transform, atom.pos)\n', (1607, 1628), True, 'import numpy as np\n'), ((2088, 2110), 'numpy.max', 'np.max', (['tmpArr'], {'axis': '(0)'}), '(tmpArr, axis=0)\n', (2094, 2110), True, 'import numpy as np\n'), ((2112, 2134), 'numpy.min', 'np.min', (['tmpArr'], {'axis': '(0)'}), '(tmpArr, axis=0)\n', (2118, 2134), True, 'import numpy as np\n'), ((2870, 2891), 'dlpoly.utility.read_line', 'read_line', (['sourceFile'], {}), '(sourceFile)\n', (2879, 2891), False, 'from dlpoly.utility import read_line, build_3d_rotation_matrix\n'), ((2369, 2386), 'dlpoly.utility.read_line', 'read_line', (['source'], {}), '(source)\n', (2378, 2386), False, 'from dlpoly.utility import read_line, build_3d_rotation_matrix\n'), ((2450, 2472), 'numpy.asarray', 'np.asarray', (['datum[1:4]'], {}), '(datum[1:4])\n', (2460, 2472), True, 'import numpy as np\n'), ((2623, 2640), 'dlpoly.utility.read_line', 'read_line', (['source'], {}), '(source)\n', (2632, 2640), False, 'from dlpoly.utility import read_line, build_3d_rotation_matrix\n'), ((775, 799), 'numpy.asarray', 'np.asarray', (['self.atomPos'], {}), '(self.atomPos)\n', (785, 799), True, 'import numpy as np\n'), ((846, 870), 'numpy.asarray', 'np.asarray', (['self.atomPos'], {}), '(self.atomPos)\n', (856, 870), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from IPython.html.widgets import interactive, IntSlider, widget, FloatText, FloatSlider, Checkbox, ToggleButtons
sigmin = 0.01
sigmax = 0.1
sigma_0 = 0 # conductivity of the air
h_1 = 1.
h_boom = 0.
h_boom_max = 2.
zmax = 4.
z = np.linspace(0.,zmax,1000)
phi_v = lambda z: (4.*z) / (4.*z**2 + 1.)**(3./2.)
phi_h = lambda z: 2 - (4.*z) / (4.*z**2 + 1.)**(1./2.)
R_v = lambda z: 1./(4.*z**2. + 1.)**(1./2.)
R_h = lambda z: (4.*z**2 + 1.)**(1./2.) - 2.*z
sigma_av = lambda h_boom, h_1, sigma_1, sigma_2: sigma_0*(1.-R_v(h_boom)) + sigma_1*(R_v(h_boom) - R_v(h_1+h_boom)) + sigma_2*R_v(h_1+h_boom)
sigma_ah = lambda h_boom, h_1, sigma_1, sigma_2: sigma_0*(1.-R_h(h_boom)) + sigma_1*(R_h(h_boom) - R_h(h_1+h_boom)) + sigma_2*R_h(h_1+h_boom)
def plot_ResponseFct(h_boom,h_1,sigma_1,sigma_2,orientation='vertical'):
sigvec = sigma_1*np.ones(z.shape)
sigvec[z > h_1] = sigma_2
if orientation is 'vertical':
phi = phi_v(z + h_boom)
sig_a = sigma_av(h_boom,h_1,sigma_1,sigma_2)
phi_title = '$\phi_V$'
elif orientation is 'horizontal':
phi = phi_h(z + h_boom)
sig_a = sigma_ah(h_boom,h_1,sigma_1,sigma_2)
phi_title = '$\phi_H$'
phisig = phi*sigvec
fig, ax = plt.subplots(1,3,figsize=(11,6))
fs = 13
ax[0].plot(sigvec,z,'r',linewidth=1.5)
ax[0].set_xlim([0.,sigmax*1.1])
ax[0].invert_yaxis()
ax[0].set_ylabel('z/s',fontsize = fs)
ax[0].set_title('$\sigma$', fontsize = fs+4, position=[.5, 1.02])
ax[0].set_xlabel('Conductivity (S/m)', fontsize=fs)
ax[0].grid(which='both',linewidth=0.6,color=[0.5,0.5,0.5])
ax[1].plot(phi,z,linewidth=1.5)
ax[1].set_xlim([0.,2.])
ax[1].invert_yaxis()
ax[1].set_title('%s'%(phi_title), fontsize = fs+4, position=[.5, 1.02])
ax[1].set_xlabel('Response Function', fontsize=fs)
ax[1].grid(which='both',linewidth=0.6,color=[0.5,0.5,0.5])
ax[2].plot(phisig,z,color='k',linewidth=1.5)
ax[2].fill_betweenx(z,phisig,color='k',alpha=0.5)
ax[2].invert_yaxis()
ax[2].set_title('$\sigma \cdot$ %s'%(phi_title), fontsize = fs+4, position=[.5, 1.02])
ax[2].set_xlabel('Weighted Conductivity (S/m)', fontsize=fs)
ax[2].set_xlim([0.,sigmax*2.])
ax[2].grid(which='both',linewidth=0.6,color=[0.5,0.5,0.5])
props = dict(boxstyle='round', facecolor='grey', alpha=0.3)
# place a text box in upper left in axes coords
textstr = '$\sigma_a=%.3f$ S/m'%(sig_a)
ax[2].text(sigmax*0.9, 3.75, textstr, fontsize=fs+2,
verticalalignment='bottom', bbox=props)
plt.tight_layout()
plt.show()
return None
def interactive_responseFct():
app = interactive(plot_ResponseFct,h_boom = FloatSlider(min=h_boom, max = h_boom_max, step = 0.1, value = h_boom, continuous_update=False),
h_1 = FloatSlider(min=0., max=zmax,value=0.1, step = 0.1, continuous_update=False),
sigma_1 = FloatSlider(min=sigmin, max = sigmax,value=sigmin, step = sigmin, continuous_update=False),
sigma_2 = FloatSlider(min=sigmin, max = sigmax,value=sigmin, step = sigmin, continuous_update=False),
orientation=ToggleButtons(options=['vertical','horizontal']))
return app
if __name__ == '__main__':
plot_ResponseFct(1., sigmin, sigmax)
| [
"numpy.ones",
"IPython.html.widgets.FloatSlider",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"IPython.html.widgets.ToggleButtons",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((287, 315), 'numpy.linspace', 'np.linspace', (['(0.0)', 'zmax', '(1000)'], {}), '(0.0, zmax, 1000)\n', (298, 315), True, 'import numpy as np\n'), ((1288, 1323), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(11, 6)'}), '(1, 3, figsize=(11, 6))\n', (1300, 1323), True, 'import matplotlib.pyplot as plt\n'), ((2613, 2631), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2629, 2631), True, 'import matplotlib.pyplot as plt\n'), ((2636, 2646), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2644, 2646), True, 'import matplotlib.pyplot as plt\n'), ((895, 911), 'numpy.ones', 'np.ones', (['z.shape'], {}), '(z.shape)\n', (902, 911), True, 'import numpy as np\n'), ((2741, 2833), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': 'h_boom', 'max': 'h_boom_max', 'step': '(0.1)', 'value': 'h_boom', 'continuous_update': '(False)'}), '(min=h_boom, max=h_boom_max, step=0.1, value=h_boom,\n continuous_update=False)\n', (2752, 2833), False, 'from IPython.html.widgets import interactive, IntSlider, widget, FloatText, FloatSlider, Checkbox, ToggleButtons\n'), ((2861, 2937), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': '(0.0)', 'max': 'zmax', 'value': '(0.1)', 'step': '(0.1)', 'continuous_update': '(False)'}), '(min=0.0, max=zmax, value=0.1, step=0.1, continuous_update=False)\n', (2872, 2937), False, 'from IPython.html.widgets import interactive, IntSlider, widget, FloatText, FloatSlider, Checkbox, ToggleButtons\n'), ((2967, 3058), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': 'sigmin', 'max': 'sigmax', 'value': 'sigmin', 'step': 'sigmin', 'continuous_update': '(False)'}), '(min=sigmin, max=sigmax, value=sigmin, step=sigmin,\n continuous_update=False)\n', (2978, 3058), False, 'from IPython.html.widgets import interactive, IntSlider, widget, FloatText, FloatSlider, Checkbox, ToggleButtons\n'), ((3087, 3178), 'IPython.html.widgets.FloatSlider', 'FloatSlider', ([], {'min': 'sigmin', 'max': 'sigmax', 'value': 'sigmin', 'step': 'sigmin', 'continuous_update': '(False)'}), '(min=sigmin, max=sigmax, value=sigmin, step=sigmin,\n continuous_update=False)\n', (3098, 3178), False, 'from IPython.html.widgets import interactive, IntSlider, widget, FloatText, FloatSlider, Checkbox, ToggleButtons\n'), ((3209, 3258), 'IPython.html.widgets.ToggleButtons', 'ToggleButtons', ([], {'options': "['vertical', 'horizontal']"}), "(options=['vertical', 'horizontal'])\n", (3222, 3258), False, 'from IPython.html.widgets import interactive, IntSlider, widget, FloatText, FloatSlider, Checkbox, ToggleButtons\n')] |
"""Mutual information using binnings.
All the functions inside this file can be compiled using Numba.
"""
import numpy as np
import logging
from frites.utils import jit
logger = logging.getLogger('frites')
###############################################################################
###############################################################################
# LOW-LEVEL CORE FUNCTIONS
###############################################################################
###############################################################################
"""
This first part contains functions for computing the mutual information using
binning method. In particular it redefines sub-functions that can be compiled
using Numba (e.g histogram 1D and 2D).
"""
@jit("f4(f4[:])")
def entropy(x):
"""Compute the entropy of a continuous variable.
Parameters
----------
x : array_like
Distribution of probabilities of shape (N,) and of type np.float32
Returns
-------
entr : np.float32
Entropy of the distribution
"""
x_max, x_min = x.max(), x.min()
assert (x_min >= 0) and (x_max <= 1)
if x_min == x_max == 0:
return np.float32(0.)
# Take only non-zero values as log(0) = 0 :
nnz_x = x[np.nonzero(x)]
entr = -np.sum(nnz_x * np.log2(nnz_x))
return entr
@jit("i8[:](f4[:], i8)")
def histogram(x, bins):
"""Compute the histogram of a continuous row vector.
Parameters
----------
x : array_like
Vector array of shape (N,) and of type np.float32
bins : int64
Number of bins
Returns
-------
hist : array_like
Vector array of shape (bins,) and of type int64
"""
hist = np.histogram(x, bins=bins)[0]
return hist
@jit("i8[:,:](f4[:], f4[:], i8, i8)")
def histogram2d(x, y, bins_x, bins_y):
"""Histogram 2d between two continuous row vectors.
Parameters
----------
x : array_like
Vector array of shape (N,) and of type np.float32
y : array_like
Vector array of shape (N,) and of type np.float32
bins_x, bins_y : int64
Number of bins respectively for the x and y variables
Returns
-------
hist : array_like
Array of shape (bins, bins) and of type int64
"""
# x-range
x_max, x_min = x.max(), x.min()
delta_x = 1 / ((x_max - x_min) / bins_x)
# y-range
y_max, y_min = y.max(), y.min()
delta_y = 1 / ((y_max - y_min) / bins_y)
# compute histogram 2d
xy_bin = np.zeros((np.int64(bins_x), np.int64(bins_y)), dtype=np.int64)
for t in range(len(x)):
i = (x[t] - x_min) * delta_x
j = (y[t] - y_min) * delta_y
if 0 <= i < bins_x and 0 <= j < bins_y:
xy_bin[int(i), int(j)] += 1
return xy_bin
@jit("f4(f4[:], f4[:], i8, i8)")
def mi_bin(x, y, bins_x, bins_y):
"""Mutual information between two arrays I(X; Y) using binning.
Parameters
----------
x : array_like
Vector array of shape (N,) and of type np.float32
y : array_like
Vector array of shape (N,) and of type np.float32
bins_x, bins_y : int64
Number of bins respectively for the x and y variables
Returns
-------
i : np.float32
The mutual information of type float32
"""
if bins_y == 0:
bins_y = len(np.unique(y))
# compute probabilities
p_x = histogram(x, bins_x)
p_y = histogram(y, bins_y)
p_xy = histogram2d(x, y, bins_x, bins_y)
p_x = p_x / p_x.sum()
p_y = p_y / p_y.sum()
p_xy = p_xy / p_xy.sum()
# compute entropy
h_x = entropy(p_x.astype(np.float32))
h_y = entropy(p_y.astype(np.float32))
h_xy = entropy(p_xy.ravel().astype(np.float32))
# compute mutual information
i = h_x + h_y - h_xy
return i
@jit("f4(f4[:], f4[:], f4[:], i8)")
def mi_bin_ccd(x, y, z, bins):
"""Compute the conditional mutual information I(X; Y | Z) using binning.
Parameters
----------
x, y, z : array_like
Vector arrays of shape (N,) and of type np.float32
bins : int64
Number of bins
Returns
-------
cmi : np.float32
The conditional mutual information of type float32
"""
# get unique z elements
z_u = np.unique(z)
n_z = len(z_u)
# compute mi for each elements of z
pz = np.zeros((np.int64(n_z)), dtype=np.float32)
icond = np.zeros((np.int64(n_z)), dtype=np.float32)
for n_k, k in enumerate(z_u):
idx_z = z == k
pz[n_k] = idx_z.sum()
_x, _y = x[idx_z], y[idx_z]
icond[n_k] = mi_bin(_x, _y, bins, bins)
# conditional mutual information
pz /= len(z)
cmi = np.sum(pz * icond)
return cmi
###############################################################################
###############################################################################
# MID-LEVEL CORE FUNCTIONS
###############################################################################
###############################################################################
"""
This second part defines mid level core mi functions mainly to speed up
computations on arrays that have a time dimension.
"""
@jit("f4[:](f4[:,:], f4[:], i8, i8)")
def mi_bin_time(x, y, bins_x, bins_y):
"""Compute the MI between two variables across time.
Parameters
----------
x : array_like
Array of data of shape (n_times, n_trials)
y : array_like
Regressor array of shape (n_trials)
bins_x, bins_y : int64
Number of bins respectively for the x and y variables
Returns
-------
mi : array_like
Array of mutual information of shape (n_times)
"""
n_times, n_trials = x.shape
mi = np.zeros((n_times), dtype=np.float32)
for t in range(n_times):
mi[t] = mi_bin(x[t, :], y, bins_x, bins_y)
return mi
@jit("f4[:](f4[:,:], f4[:], f4[:], i8)")
def mi_bin_ccd_time(x, y, z, bins):
"""Compute the MI between two variables across time.
Parameters
----------
x : array_like
Array of data of shape (n_times, n_trials)
y : array_like
Regressor array of shape (n_trials)
z : array_like
Conditional array of shape (n_trials)
bins : int64
Number of bins
Returns
-------
cmi : array_like
Array of conditional mutual information of shape (n_times)
"""
n_times, n_trials = x.shape
mi = np.zeros((n_times), dtype=np.float32)
for t in range(n_times):
mi[t] = mi_bin_ccd(x[t, :], y, z, bins)
return mi
@jit("f4[:](f4[:,:], f4[:,:], i8, i8)")
def mi_bin_conn_time(x, y, bins_x, bins_y):
"""Compute the MI between two variables of equal shapes across time.
Parameters
----------
x : array_like
Array of data of shape (n_times, n_trials)
y : array_like
Regressor array of shape (n_times, n_trials)
bins_x, bins_y : int64
Number of bins respectively for the x and y variables
Returns
-------
mi : array_like
Array of mutual information of shape (n_times)
"""
n_times, n_trials = x.shape
mi = np.zeros((n_times), dtype=np.float32)
for t in range(n_times):
mi[t] = mi_bin(x[t, :], y[t, :], bins_x, bins_y)
return mi
| [
"logging.getLogger",
"numpy.histogram",
"numpy.int64",
"numpy.unique",
"numpy.sum",
"numpy.zeros",
"numpy.nonzero",
"frites.utils.jit",
"numpy.log2",
"numpy.float32"
] | [((181, 208), 'logging.getLogger', 'logging.getLogger', (['"""frites"""'], {}), "('frites')\n", (198, 208), False, 'import logging\n'), ((787, 803), 'frites.utils.jit', 'jit', (['"""f4(f4[:])"""'], {}), "('f4(f4[:])')\n", (790, 803), False, 'from frites.utils import jit\n'), ((1364, 1387), 'frites.utils.jit', 'jit', (['"""i8[:](f4[:], i8)"""'], {}), "('i8[:](f4[:], i8)')\n", (1367, 1387), False, 'from frites.utils import jit\n'), ((1788, 1824), 'frites.utils.jit', 'jit', (['"""i8[:,:](f4[:], f4[:], i8, i8)"""'], {}), "('i8[:,:](f4[:], f4[:], i8, i8)')\n", (1791, 1824), False, 'from frites.utils import jit\n'), ((2807, 2838), 'frites.utils.jit', 'jit', (['"""f4(f4[:], f4[:], i8, i8)"""'], {}), "('f4(f4[:], f4[:], i8, i8)')\n", (2810, 2838), False, 'from frites.utils import jit\n'), ((3818, 3852), 'frites.utils.jit', 'jit', (['"""f4(f4[:], f4[:], f4[:], i8)"""'], {}), "('f4(f4[:], f4[:], f4[:], i8)')\n", (3821, 3852), False, 'from frites.utils import jit\n'), ((5225, 5261), 'frites.utils.jit', 'jit', (['"""f4[:](f4[:,:], f4[:], i8, i8)"""'], {}), "('f4[:](f4[:,:], f4[:], i8, i8)')\n", (5228, 5261), False, 'from frites.utils import jit\n'), ((5895, 5934), 'frites.utils.jit', 'jit', (['"""f4[:](f4[:,:], f4[:], f4[:], i8)"""'], {}), "('f4[:](f4[:,:], f4[:], f4[:], i8)')\n", (5898, 5934), False, 'from frites.utils import jit\n'), ((6591, 6629), 'frites.utils.jit', 'jit', (['"""f4[:](f4[:,:], f4[:,:], i8, i8)"""'], {}), "('f4[:](f4[:,:], f4[:,:], i8, i8)')\n", (6594, 6629), False, 'from frites.utils import jit\n'), ((4267, 4279), 'numpy.unique', 'np.unique', (['z'], {}), '(z)\n', (4276, 4279), True, 'import numpy as np\n'), ((4683, 4701), 'numpy.sum', 'np.sum', (['(pz * icond)'], {}), '(pz * icond)\n', (4689, 4701), True, 'import numpy as np\n'), ((5760, 5795), 'numpy.zeros', 'np.zeros', (['n_times'], {'dtype': 'np.float32'}), '(n_times, dtype=np.float32)\n', (5768, 5795), True, 'import numpy as np\n'), ((6459, 6494), 'numpy.zeros', 'np.zeros', (['n_times'], {'dtype': 'np.float32'}), '(n_times, dtype=np.float32)\n', (6467, 6494), True, 'import numpy as np\n'), ((7158, 7193), 'numpy.zeros', 'np.zeros', (['n_times'], {'dtype': 'np.float32'}), '(n_times, dtype=np.float32)\n', (7166, 7193), True, 'import numpy as np\n'), ((1209, 1224), 'numpy.float32', 'np.float32', (['(0.0)'], {}), '(0.0)\n', (1219, 1224), True, 'import numpy as np\n'), ((1286, 1299), 'numpy.nonzero', 'np.nonzero', (['x'], {}), '(x)\n', (1296, 1299), True, 'import numpy as np\n'), ((1739, 1765), 'numpy.histogram', 'np.histogram', (['x'], {'bins': 'bins'}), '(x, bins=bins)\n', (1751, 1765), True, 'import numpy as np\n'), ((4358, 4371), 'numpy.int64', 'np.int64', (['n_z'], {}), '(n_z)\n', (4366, 4371), True, 'import numpy as np\n'), ((4414, 4427), 'numpy.int64', 'np.int64', (['n_z'], {}), '(n_z)\n', (4422, 4427), True, 'import numpy as np\n'), ((2543, 2559), 'numpy.int64', 'np.int64', (['bins_x'], {}), '(bins_x)\n', (2551, 2559), True, 'import numpy as np\n'), ((2561, 2577), 'numpy.int64', 'np.int64', (['bins_y'], {}), '(bins_y)\n', (2569, 2577), True, 'import numpy as np\n'), ((3355, 3367), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3364, 3367), True, 'import numpy as np\n'), ((1328, 1342), 'numpy.log2', 'np.log2', (['nnz_x'], {}), '(nnz_x)\n', (1335, 1342), True, 'import numpy as np\n')] |
#!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example program shows how to use dlib's implementation of the paper:
# One Millisecond Face Alignment with an Ensemble of Regression Trees by
# <NAME> and <NAME>, CVPR 2014
#
# In particular, we will train a face landmarking model based on a small
# dataset and then evaluate it. If you want to visualize the output of the
# trained model on some images then you can run the
# face_landmark_detection.py example program with predictor.dat as the input
# model.
#
# It should also be noted that this kind of model, while often used for face
# landmarking, is quite general and can be used for a variety of shape
# prediction tasks. But here we demonstrate it only on a simple face
# landmarking task.
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
#
# Compiling dlib should work on any operating system so long as you have
# CMake installed. On Ubuntu, this can be done easily by running the
# command:
# sudo apt-get install cmake
#
# Also note that this example requires Numpy which can be installed
# via the command:
# pip install numpy
import os
import sys
import glob
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import dlib
# In this example we are going to train a face detector based on the small
# faces dataset in the examples/faces directory. This means you need to supply
# the path to this faces folder as a command line argument so we will know
# where it is.
if len(sys.argv) != 5:
print(
"Give the path to the examples/faces directory as the argument to this "
"program. For example, if you are in the python_examples folder then "
"execute this program by running:\n"
" ./train_shape_predictor.py ../faces")
exit()
image_folder = sys.argv[1]
checkpoint_folder = sys.argv[2]
result_folder = sys.argv[3]
Num_landmarks = int(sys.argv[4])
# Now let's use it as you would in a normal application. First we will load it
# from disk. We also need to load a face detector to provide the initial
# estimate of the facial location.
predictor = dlib.shape_predictor(checkpoint_folder+"/predictor.dat")
from skimage import io as ioSK
import io
from contextlib import redirect_stdout
import numpy as np
if not os.path.exists(result_folder):
os.makedirs(result_folder)
# Now let's run the detector and shape_predictor over the images in the faces
# folder and display the results.
print("Showing detections and predictions on the images in the faces folder...")
for f in glob.glob(os.path.join(image_folder, "*.jpg")):
print("Processing file: {}".format(f))
#img = dlib.load_rgb_image(f)
img = ioSK.imread(f)
# Ask the detector to find the bounding boxes of each face. The 1 in the
# second argument indicates that we should upsample the image 1 time. This
# will make everything bigger and allow us to detect more faces.
dets=dlib.rectangle(left=1, top=1, right=255, bottom=255)
### My comment: dets would be tuple of dlib.rectangles object contain
## multi rectagls and corresponing points
newLandmarks=np.zeros((Num_landmarks,3),dtype=np.float16)
shape = predictor(img, dets)
for k in range(Num_landmarks):
# print('-',k)
# print(shape.part(k))
h = io.StringIO()
with redirect_stdout(h):
print(shape.part(k) )
out = h.getvalue()
# print(out)
ind_n=out.find('\n', 0, len(out))
ind_comma=out.find(',', 0, len(out))
# print(ind_comma,ind_n)
# print(out[1:ind_comma])
# print(out[ind_comma+2:ind_n-1])
newLandmarks[k,0]=k+1
newLandmarks[k,1]=int(out[1:ind_comma])
newLandmarks[k,2]=int(out[ind_comma+2:ind_n-1])
# Draw the face landmarks on the screen.
ind=f.rfind('\\')
FileName=f[ind:]
fig=plt.figure()
plt.imshow(img)
plt.scatter(newLandmarks[:,1],newLandmarks[:,2], marker='x',color='blue')
fig.savefig(result_folder+'/'+FileName)
plt.close()
np.savetxt(result_folder+'/'+FileName[:-4]+'_pred.csv',
newLandmarks , delimiter=",", fmt='%i')
# np.savetxt(result_folder+FileName+'_true.csv',
# newLandmarks_true , delimiter=",", fmt='%i')
#dlib.hit_enter_to_continue()
| [
"matplotlib.pyplot.imshow",
"os.path.exists",
"contextlib.redirect_stdout",
"os.makedirs",
"matplotlib.use",
"dlib.rectangle",
"os.path.join",
"dlib.shape_predictor",
"matplotlib.pyplot.close",
"skimage.io.imread",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"nu... | [((1469, 1490), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (1483, 1490), False, 'import matplotlib\n'), ((2457, 2515), 'dlib.shape_predictor', 'dlib.shape_predictor', (["(checkpoint_folder + '/predictor.dat')"], {}), "(checkpoint_folder + '/predictor.dat')\n", (2477, 2515), False, 'import dlib\n'), ((2621, 2650), 'os.path.exists', 'os.path.exists', (['result_folder'], {}), '(result_folder)\n', (2635, 2650), False, 'import os\n'), ((2656, 2682), 'os.makedirs', 'os.makedirs', (['result_folder'], {}), '(result_folder)\n', (2667, 2682), False, 'import os\n'), ((2898, 2933), 'os.path.join', 'os.path.join', (['image_folder', '"""*.jpg"""'], {}), "(image_folder, '*.jpg')\n", (2910, 2933), False, 'import os\n'), ((3023, 3037), 'skimage.io.imread', 'ioSK.imread', (['f'], {}), '(f)\n', (3034, 3037), True, 'from skimage import io as ioSK\n'), ((3274, 3326), 'dlib.rectangle', 'dlib.rectangle', ([], {'left': '(1)', 'top': '(1)', 'right': '(255)', 'bottom': '(255)'}), '(left=1, top=1, right=255, bottom=255)\n', (3288, 3326), False, 'import dlib\n'), ((3477, 3523), 'numpy.zeros', 'np.zeros', (['(Num_landmarks, 3)'], {'dtype': 'np.float16'}), '((Num_landmarks, 3), dtype=np.float16)\n', (3485, 3523), True, 'import numpy as np\n'), ((4220, 4232), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4230, 4232), True, 'import matplotlib.pyplot as plt\n'), ((4237, 4252), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (4247, 4252), True, 'import matplotlib.pyplot as plt\n'), ((4257, 4334), 'matplotlib.pyplot.scatter', 'plt.scatter', (['newLandmarks[:, 1]', 'newLandmarks[:, 2]'], {'marker': '"""x"""', 'color': '"""blue"""'}), "(newLandmarks[:, 1], newLandmarks[:, 2], marker='x', color='blue')\n", (4268, 4334), True, 'import matplotlib.pyplot as plt\n'), ((4379, 4390), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4388, 4390), True, 'import matplotlib.pyplot as plt\n'), ((4397, 4501), 'numpy.savetxt', 'np.savetxt', (["(result_folder + '/' + FileName[:-4] + '_pred.csv')", 'newLandmarks'], {'delimiter': '""","""', 'fmt': '"""%i"""'}), "(result_folder + '/' + FileName[:-4] + '_pred.csv', newLandmarks,\n delimiter=',', fmt='%i')\n", (4407, 4501), True, 'import numpy as np\n'), ((3657, 3670), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (3668, 3670), False, 'import io\n'), ((3684, 3702), 'contextlib.redirect_stdout', 'redirect_stdout', (['h'], {}), '(h)\n', (3699, 3702), False, 'from contextlib import redirect_stdout\n')] |
import numpy as np
class Problem:
"""
General linear programming optimization problem.
Requires a vector to define the objective function.
Accepts box and linear constraints.
"""
def __init__( self, N, Nconslin=0 ):
"""
linear programming optimization problem
Arguments:
N: number of optimization variables.
Nconslin: number of linear constraints (default: 0).
"""
try:
self.N = int( N )
except:
raise ValueError( "N must be an integer" )
if( self.N <= 0 ):
raise ValueError( "N must be strictly positive" )
try:
self.Nconslin = int( Nconslin )
except:
raise ValueError( "Nconslin was not provided or was not an integer" )
if( self.Nconslin < 0 ):
raise ValueError( "Nconslin must be non-negative" )
self.lb = None
self.ub = None
self.objL = None
self.conslinA = None
self.conslinlb = None
self.conslinub = None
self.soln = None
def checkSetup( self ):
out = ( self.lb is not None and
self.ub is not None )
if( self.Nconslin > 0 ):
out = out and ( self.conslinA is not None and
self.conslinlb is not None and
self.conslinub is not None )
return out
def consBox( self, lb, ub ):
"""
sets box constraints.
Arguments:
lb: lower bounds, array of size (N,).
ub: upper bounds, array of size (N,).
"""
self.lb = np.asfortranarray( lb )
self.ub = np.asfortranarray( ub )
if( self.lb.shape != ( self.N, ) or
self.ub.shape != ( self.N, ) ):
raise ValueError( "Both arrays must have size (" + str(self.N) + ",)." )
def consLinear( self, A, lb=None, ub=None ):
"""
sets linear constraints.
Arguments:
A: linear constraint matrix, array of size (Nconslin,N).
lb: lower bounds, array of size (Nconslin,). (default: -inf).
ub: upper bounds, array of size (Nconslin,). (default: zeros).
"""
if( self.Nconslin == 0 ):
raise ValueError( "cannot set linear constraints when Nconslin=0" )
self.conslinA = np.asfortranarray( A )
if( self.conslinA.shape != ( self.Nconslin, self.N ) ):
raise ValueError( "Argument 'A' must have size (" + str(self.Nconslin)
+ "," + str(self.N) + ")." )
if( lb is None ):
lb = -np.inf * np.ones( (self.Nconslin,) )
if( ub is None ):
ub = np.zeros( (self.Nconslin,) )
self.conslinlb = np.asfortranarray( lb )
self.conslinub = np.asfortranarray( ub )
if( self.conslinlb.shape != ( self.Nconslin, ) or
self.conslinub.shape != ( self.Nconslin, ) ):
raise ValueError( "Bounds must have size (" + str(self.Nconslin) + ",)." )
def objFctn( self, lin=None ):
"""
sets objective function of the form: L.dot(x).
Arguments:
L: array of size (N,) for linear terms in the objective.
"""
if( lin is not None ):
self.objL = np.asfortranarray( lin )
if( self.objL.shape != ( self.N, ) ):
raise ValueError( "Array L must have size (" + str(self.N) + ",)." )
| [
"numpy.asfortranarray",
"numpy.zeros",
"numpy.ones"
] | [((1651, 1672), 'numpy.asfortranarray', 'np.asfortranarray', (['lb'], {}), '(lb)\n', (1668, 1672), True, 'import numpy as np\n'), ((1693, 1714), 'numpy.asfortranarray', 'np.asfortranarray', (['ub'], {}), '(ub)\n', (1710, 1714), True, 'import numpy as np\n'), ((2367, 2387), 'numpy.asfortranarray', 'np.asfortranarray', (['A'], {}), '(A)\n', (2384, 2387), True, 'import numpy as np\n'), ((2778, 2799), 'numpy.asfortranarray', 'np.asfortranarray', (['lb'], {}), '(lb)\n', (2795, 2799), True, 'import numpy as np\n'), ((2827, 2848), 'numpy.asfortranarray', 'np.asfortranarray', (['ub'], {}), '(ub)\n', (2844, 2848), True, 'import numpy as np\n'), ((2723, 2749), 'numpy.zeros', 'np.zeros', (['(self.Nconslin,)'], {}), '((self.Nconslin,))\n', (2731, 2749), True, 'import numpy as np\n'), ((3313, 3335), 'numpy.asfortranarray', 'np.asfortranarray', (['lin'], {}), '(lin)\n', (3330, 3335), True, 'import numpy as np\n'), ((2651, 2676), 'numpy.ones', 'np.ones', (['(self.Nconslin,)'], {}), '((self.Nconslin,))\n', (2658, 2676), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy import linalg as la
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from cycler import cycler
#FUNCAO CONTINUA
def potv(xa,multi,lw):
x=abs(xa)/lw
return multi*700.*(1+np.tanh(5000*(-1+x)))
################
#FUNCAO DESCONTINUA
def potdv(xa):
if abs(xa)<35.:
return 0
else:
return 700.
###################
##FUNCAO MASSA
def potmm(xa):
if abs(xa)<35.:
return .043
else:
return .120
def mtxg(multi,m,lw):
#m=potmm(t)
inter = lw
n=91
t=-inter
h=2*inter/n
hc=6.5821*10e-16
vp = np.zeros((n+1,n+1),float)
for w in range(n):
vp[w][w] = multi*(((potv(t,multi,lw)*2.*m)/(hc*hc))+2./(h*h))
vp[w+1][w] = multi*(-1./(h*h))
vp[w][w+1] = multi*(-1./(h*h))
t+=h
vp[n][n] = multi*(((potv(t,multi,lw)*2.*m)/(hc*hc))+2./(h*h))
Av, Aw = la.eig(vp)
return np.sort(Av/(2.*m/(hc*hc)))
def main():
#********CONSTANTES*****
hc=6.582*10e-16
mo = 1.66*10e-27
mgaas = .067*mo
mhh = .45 *mo
mso = .154*mo
mhh = .45*mo
mlh = .082*mo
eg = 1.424
esp = .34
x = np.linspace(-10,10,num=40)
line = [eg/2 for xk in x]
red_patch = mpatches.Patch(color='#cd0000', label='Heavy-Hole2')
b_patch = mpatches.Patch(color='#0909d2' , label='Conduction Band')
b2_patch = mpatches.Patch(color='#1c6bf2' , label='Light-Hole')
g_patch = mpatches.Patch(color='#228b22' , label='Heavy-Hole1')
bc = np.array([(+ (xk*xk*hc*hc)/(2*mgaas) + eg/2) for xk in x])
bvhh = np.array([-hc*hc*xk*xk/(2*mhh)-eg/2 for xk in x])
bvhh2 = np.array([ -hc*hc*xk*xk/(2*mhh)-eg/2-hc*hc*xk*xk/(2*mso) for xk in x])
spin = np.array([ -hc*hc*xk*xk/(2*mlh)-eg/2-esp for xk in x])
f, (ax1, ax2,ax3,ax4) = plt.subplots(1, 4, sharey=True)
lista = [ax2,ax3,ax4]
largurapc = [15,10,5]
file1 = open("qb.dat","w")
ax1.plot(x,0*x,'k--')
ax1.fill_between(x,bc,3,facecolor='#0909d2',interpolate=True)
ax1.fill_between(x,bvhh,bvhh2,where=bvhh>=bvhh2,facecolor='#228b22', interpolate=True)
ax1.fill_between(x,bvhh2,spin,where=bvhh2>=spin,facecolor='#cd0000', interpolate=True)
ax1.fill_between(x,spin,-5,facecolor='#1c6bf2',interpolate=True)
for i in range(len(x)):
file1.write("%s\t%s\t%s\t%s\t%s\t\n"%(x[i],bc[i],bvhh[i],bvhh2[i],spin[i]))
file1.write("\n\n")
ax1.grid(True)
ax1.set_ylabel('E')
ax1.set_title('Livre')
for ax in range(3):
mtxgaas = mtxg(1,mgaas,largurapc[ax])
bc = np.array([(mtxgaas[0].real+ (xk*xk*hc*hc)/(2*mgaas) + eg/2) for xk in x])
bc_2 = np.array([(mtxgaas[10].real+ (xk*xk*hc*hc)/(2*mgaas) + eg/2) for xk in x])
bc_3 = np.array([(mtxgaas[20].real+ (xk*xk*hc*hc)/(2*mgaas) + eg/2) for xk in x])
mtxbv = mtxg(-1,mhh,largurapc[ax])
bvhh = np.array([mtxbv[0].real-hc*hc*xk*xk/(2*mhh)-eg/2 for xk in x])
bvhh_2 = np.array([mtxbv[10].real-hc*hc*xk*xk/(2*mhh)-eg/2 for xk in x])
bvhh_3 = np.array([mtxbv[20].real-hc*hc*xk*xk/(2*mhh)-eg/2 for xk in x])
mtxbv2 = mtxg(-1,(mso*mhh)/(mso+mhh),largurapc[ax])
bvhh2 = np.array([mtxbv2[0].real -hc*hc*xk*xk/(2*mhh)-eg/2-hc*hc*xk*xk/(2*mso) for xk in x])
bvhh2_2 = np.array([mtxbv2[10].real -hc*hc*xk*xk/(2*mhh)-eg/2-hc*hc*xk*xk/(2*mso) for xk in x])
bvhh2_3 = np.array([mtxbv2[20].real -hc*hc*xk*xk/(2*mhh)-eg/2-hc*hc*xk*xk/(2*mso) for xk in x])
mtxspin = mtxg(-1,mlh,largurapc[ax])
spin = np.array([mtxspin[0].real -hc*hc*xk*xk/(2*mlh)-eg/2-esp for xk in x])
spin_2 = np.array([mtxspin[10].real -hc*hc*xk*xk/(2*mlh)-eg/2-esp for xk in x])
spin_3 = np.array([mtxspin[20].real -hc*hc*xk*xk/(2*mlh)-eg/2-esp for xk in x])
for i in range(len(x)):
file1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t\n"%(x[i],bc[i],bc_2[i],bc_3[i],bvhh[i],bvhh_2[i],bvhh_3[i],bvhh2[i],bvhh2_2[i],bvhh2_3[i],spin[i],spin_2[i],spin_3[i]))
file1.write("\n\n")
lista[ax].plot(x,0*x,'k--')
lista[ax].plot(x,bc,color='#0909d2')
lista[ax].plot(x,bc_2,color='#0909d2',linestyle='--')
lista[ax].plot(x,bc_3,color='#0909d2',linestyle=':')
lista[ax].plot(x,bvhh,color='#228b22',linestyle=':')
lista[ax].plot(x,bvhh_2,color='#228b22',linestyle='--')
lista[ax].plot(x,bvhh_3,color='#228b22')
lista[ax].plot(x,bvhh2,color='#cd0000',linestyle=':')
lista[ax].plot(x,bvhh2_2,color='#cd0000',linestyle='--')
lista[ax].plot(x,bvhh2_3,color='#cd0000')
lista[ax].plot(x,spin,color='#1c6bf2',linestyle=':')
lista[ax].plot(x,spin_2,color='#1c6bf2',linestyle='--')
lista[ax].plot(x,spin_3,color='#1c6bf2')
lista[ax].grid(True)
lista[ax].set_title('LP='+str(largurapc[ax]))
file1.close()
ax4.set_ylim([-5,3])
plt.legend(loc='lower center', bbox_to_anchor=(.0, 1.05),
ncol=4, fancybox=True, shadow=True,handles=[b_patch,g_patch,red_patch,b2_patch])
plt.show()
if __name__ == "__main__":
main()
| [
"numpy.sort",
"numpy.tanh",
"scipy.linalg.eig",
"numpy.zeros",
"numpy.linspace",
"numpy.array",
"matplotlib.patches.Patch",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((748, 779), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)', 'float'], {}), '((n + 1, n + 1), float)\n', (756, 779), True, 'import numpy as np\n'), ((1044, 1054), 'scipy.linalg.eig', 'la.eig', (['vp'], {}), '(vp)\n', (1050, 1054), True, 'from scipy import linalg as la\n'), ((1067, 1102), 'numpy.sort', 'np.sort', (['(Av / (2.0 * m / (hc * hc)))'], {}), '(Av / (2.0 * m / (hc * hc)))\n', (1074, 1102), True, 'import numpy as np\n'), ((1318, 1346), 'numpy.linspace', 'np.linspace', (['(-10)', '(10)'], {'num': '(40)'}), '(-10, 10, num=40)\n', (1329, 1346), True, 'import numpy as np\n'), ((1397, 1449), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""#cd0000"""', 'label': '"""Heavy-Hole2"""'}), "(color='#cd0000', label='Heavy-Hole2')\n", (1411, 1449), True, 'import matplotlib.patches as mpatches\n'), ((1465, 1521), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""#0909d2"""', 'label': '"""Conduction Band"""'}), "(color='#0909d2', label='Conduction Band')\n", (1479, 1521), True, 'import matplotlib.patches as mpatches\n'), ((1539, 1590), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""#1c6bf2"""', 'label': '"""Light-Hole"""'}), "(color='#1c6bf2', label='Light-Hole')\n", (1553, 1590), True, 'import matplotlib.patches as mpatches\n'), ((1607, 1659), 'matplotlib.patches.Patch', 'mpatches.Patch', ([], {'color': '"""#228b22"""', 'label': '"""Heavy-Hole1"""'}), "(color='#228b22', label='Heavy-Hole1')\n", (1621, 1659), True, 'import matplotlib.patches as mpatches\n'), ((1673, 1742), 'numpy.array', 'np.array', (['[(+(xk * xk * hc * hc) / (2 * mgaas) + eg / 2) for xk in x]'], {}), '([(+(xk * xk * hc * hc) / (2 * mgaas) + eg / 2) for xk in x])\n', (1681, 1742), True, 'import numpy as np\n'), ((1744, 1809), 'numpy.array', 'np.array', (['[(-hc * hc * xk * xk / (2 * mhh) - eg / 2) for xk in x]'], {}), '([(-hc * hc * xk * xk / (2 * mhh) - eg / 2) for xk in x])\n', (1752, 1809), True, 'import numpy as np\n'), ((1807, 1908), 'numpy.array', 'np.array', (['[(-hc * hc * xk * xk / (2 * mhh) - eg / 2 - hc * hc * xk * xk / (2 * mso)) for\n xk in x]'], {}), '([(-hc * hc * xk * xk / (2 * mhh) - eg / 2 - hc * hc * xk * xk / (2 *\n mso)) for xk in x])\n', (1815, 1908), True, 'import numpy as np\n'), ((1890, 1961), 'numpy.array', 'np.array', (['[(-hc * hc * xk * xk / (2 * mlh) - eg / 2 - esp) for xk in x]'], {}), '([(-hc * hc * xk * xk / (2 * mlh) - eg / 2 - esp) for xk in x])\n', (1898, 1961), True, 'import numpy as np\n'), ((1976, 2007), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(4)'], {'sharey': '(True)'}), '(1, 4, sharey=True)\n', (1988, 2007), True, 'import matplotlib.pyplot as plt\n'), ((5114, 5262), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower center"""', 'bbox_to_anchor': '(0.0, 1.05)', 'ncol': '(4)', 'fancybox': '(True)', 'shadow': '(True)', 'handles': '[b_patch, g_patch, red_patch, b2_patch]'}), "(loc='lower center', bbox_to_anchor=(0.0, 1.05), ncol=4, fancybox\n =True, shadow=True, handles=[b_patch, g_patch, red_patch, b2_patch])\n", (5124, 5262), True, 'import matplotlib.pyplot as plt\n'), ((5269, 5279), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5277, 5279), True, 'import matplotlib.pyplot as plt\n'), ((2742, 2830), 'numpy.array', 'np.array', (['[(mtxgaas[0].real + xk * xk * hc * hc / (2 * mgaas) + eg / 2) for xk in x]'], {}), '([(mtxgaas[0].real + xk * xk * hc * hc / (2 * mgaas) + eg / 2) for\n xk in x])\n', (2750, 2830), True, 'import numpy as np\n'), ((2832, 2921), 'numpy.array', 'np.array', (['[(mtxgaas[10].real + xk * xk * hc * hc / (2 * mgaas) + eg / 2) for xk in x]'], {}), '([(mtxgaas[10].real + xk * xk * hc * hc / (2 * mgaas) + eg / 2) for\n xk in x])\n', (2840, 2921), True, 'import numpy as np\n'), ((2923, 3012), 'numpy.array', 'np.array', (['[(mtxgaas[20].real + xk * xk * hc * hc / (2 * mgaas) + eg / 2) for xk in x]'], {}), '([(mtxgaas[20].real + xk * xk * hc * hc / (2 * mgaas) + eg / 2) for\n xk in x])\n', (2931, 3012), True, 'import numpy as np\n'), ((3058, 3143), 'numpy.array', 'np.array', (['[(mtxbv[0].real - hc * hc * xk * xk / (2 * mhh) - eg / 2) for xk in x]'], {}), '([(mtxbv[0].real - hc * hc * xk * xk / (2 * mhh) - eg / 2) for xk in x]\n )\n', (3066, 3143), True, 'import numpy as np\n'), ((3139, 3224), 'numpy.array', 'np.array', (['[(mtxbv[10].real - hc * hc * xk * xk / (2 * mhh) - eg / 2) for xk in x]'], {}), '([(mtxbv[10].real - hc * hc * xk * xk / (2 * mhh) - eg / 2) for xk in\n x])\n', (3147, 3224), True, 'import numpy as np\n'), ((3221, 3306), 'numpy.array', 'np.array', (['[(mtxbv[20].real - hc * hc * xk * xk / (2 * mhh) - eg / 2) for xk in x]'], {}), '([(mtxbv[20].real - hc * hc * xk * xk / (2 * mhh) - eg / 2) for xk in\n x])\n', (3229, 3306), True, 'import numpy as np\n'), ((3363, 3480), 'numpy.array', 'np.array', (['[(mtxbv2[0].real - hc * hc * xk * xk / (2 * mhh) - eg / 2 - hc * hc * xk *\n xk / (2 * mso)) for xk in x]'], {}), '([(mtxbv2[0].real - hc * hc * xk * xk / (2 * mhh) - eg / 2 - hc *\n hc * xk * xk / (2 * mso)) for xk in x])\n', (3371, 3480), True, 'import numpy as np\n'), ((3467, 3585), 'numpy.array', 'np.array', (['[(mtxbv2[10].real - hc * hc * xk * xk / (2 * mhh) - eg / 2 - hc * hc * xk *\n xk / (2 * mso)) for xk in x]'], {}), '([(mtxbv2[10].real - hc * hc * xk * xk / (2 * mhh) - eg / 2 - hc *\n hc * xk * xk / (2 * mso)) for xk in x])\n', (3475, 3585), True, 'import numpy as np\n'), ((3572, 3690), 'numpy.array', 'np.array', (['[(mtxbv2[20].real - hc * hc * xk * xk / (2 * mhh) - eg / 2 - hc * hc * xk *\n xk / (2 * mso)) for xk in x]'], {}), '([(mtxbv2[20].real - hc * hc * xk * xk / (2 * mhh) - eg / 2 - hc *\n hc * xk * xk / (2 * mso)) for xk in x])\n', (3580, 3690), True, 'import numpy as np\n'), ((3720, 3812), 'numpy.array', 'np.array', (['[(mtxspin[0].real - hc * hc * xk * xk / (2 * mlh) - eg / 2 - esp) for xk in x]'], {}), '([(mtxspin[0].real - hc * hc * xk * xk / (2 * mlh) - eg / 2 - esp) for\n xk in x])\n', (3728, 3812), True, 'import numpy as np\n'), ((3808, 3901), 'numpy.array', 'np.array', (['[(mtxspin[10].real - hc * hc * xk * xk / (2 * mlh) - eg / 2 - esp) for xk in x]'], {}), '([(mtxspin[10].real - hc * hc * xk * xk / (2 * mlh) - eg / 2 - esp) for\n xk in x])\n', (3816, 3901), True, 'import numpy as np\n'), ((3897, 3990), 'numpy.array', 'np.array', (['[(mtxspin[20].real - hc * hc * xk * xk / (2 * mlh) - eg / 2 - esp) for xk in x]'], {}), '([(mtxspin[20].real - hc * hc * xk * xk / (2 * mlh) - eg / 2 - esp) for\n xk in x])\n', (3905, 3990), True, 'import numpy as np\n'), ((332, 356), 'numpy.tanh', 'np.tanh', (['(5000 * (-1 + x))'], {}), '(5000 * (-1 + x))\n', (339, 356), True, 'import numpy as np\n')] |
from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy
import numpy as np
from scipy.sparse import csr_matrix
B = np.array([
[1, 1, 1, 1],
[1, 1, 1, 0],
[1, 1, 0, 0],
[1, 0, 0, 0],
])
N = np.array([
[2, 3, 4, 2],
[2, 3, 4, 2],
[2, 3, 3, 2],
[2, 1, 3, 4]
])
W = np.array([
[4, 3, 2, 1],
[3, 2, 1, 0],
[2, 1, 0, 0],
[1, 0, 0, 0],
])
def test_jaccard_scipy():
""" Test Jaccard: scipy.spatial.dist.jaccard """
u = np.array([2, 3, 4, 5])
v = np.array([2, 3, 4, 2])
d = _jaccard_coef_scipy(u, v, min_support=1)
assert (d == 0.75)
def test_jaccard_binary():
""" Test Jaccard: binary (bitwise) coef """
u = np.array([1, 1, 1, 1])
v = np.array([1, 1, 1, 0])
d = _jaccard_coef_binary(u, v, min_support=1)
assert (d == 0.75)
def test_jaccard_set():
""" Test Jaccard: set coef """
u = np.array([4, 3, 2, 1])
v = np.array([3, 2, 1, 0])
d = _jaccard_coef_set(u, v, min_support=1)
assert (d == 0.6)
def test_jaccard_weighted():
""" Test Jaccard: weighted coef """
u = np.array([4, 3, 2, 1])
v = np.array([3, 2, 1, 0])
d = _jaccard_coef_weighted_numpy(u, v, min_support=1)
assert (d == 0.6)
def test_pairwise_distance_numpy_scipy():
""" Test pairwise distance: using the Numpy (dense matrix) implemmentation for numer jaccard (scipy) coef """
D = pairwise_proximity(N, metric='jaccard')
true = np.array([
[1., 1., 0.75, 0.25],
[1., 1., 0.75, 0.25],
[0.75, 0.75, 1., 0.5],
[0.25, 0.25, 0.5, 1.]], dtype=float)
assert np.isclose(D, true). all()
def test_pairwise_distance_numpy_binary():
""" Test pairwise distance: using the Numpy (dense matrix) implementation for jaccard binary coef """
D = pairwise_proximity(B, metric='jaccard_binary', min_support=1, verbose=True)
true = np.array([
[1., 0.75, 0.5, 0.25],
[0.75, 1., 0.66666667, 0.33333333],
[0.5, 0.66666667, 1., 0.5],
[0.25, 0.33333333, 0.5, 1.]], dtype=float)
assert np.isclose(D, true).all()
def test_pairwise_distance_numpy_set():
""" Test pairwise distance: using the Numpy (dense matrix) implementation for jaccard set coef """
D = pairwise_proximity(W, metric='jaccard_set', min_support=1)
true = np.array([
[1., 0.6, 0.4, 0.2],
[0.6, 1., 0.75, 0.5],
[0.4, 0.75, 1., 0.66666667],
[0.2, 0.5, 0.66666667, 1.]], dtype=float)
assert np.isclose(D, true).all()
def test_pairwise_distance_numpy_weighted():
""" Test pairwise distance: using Numpy (dense matrix) using weighted jaccard """
D = pairwise_proximity(W, metric='weighted_jaccard', min_support=10)
true = np.array([
[1., 0.6, 0.3, 0.1],
[0.6, 1., 0., 0.],
[0.3, 0., 1., 0.],
[0.1, 0., 0., 1.]], dtype=float)
assert np.isclose(D, true).all()
def test_pairwise_distance_sparse_scipy():
""" Test pairwise distance: using the Scipy (sparse matrix) implemmentation for jaccard scipy coef """
N_sparse = csr_matrix(N)
D = pairwise_proximity(N_sparse, metric='jaccard', min_support=1)
true = np.array([
[1., 1., 0.75, 0.25],
[1., 1., 0.75, 0.25],
[0.75, 0.75, 1., 0.5],
[0.25, 0.25, 0.5, 1.]], dtype=float)
assert np.isclose(D.todense(), true). all()
def test_pairwise_distance_sparse_binary():
""" Test pairwise distance: using the Scipy (sparse matrix) implementation for jaccard bitwise coef """
B_sparse = csr_matrix(B)
D = pairwise_proximity(B_sparse, metric='jaccard_binary', min_support=1)
true = np.array([
[1., 0.75, 0.5, 0.25],
[0.75, 1., 0.66666667, 0.33333333],
[0.5, 0.66666667, 1., 0.5],
[0.25, 0.33333333, 0.5, 1.]], dtype=float)
assert np.isclose(D.todense(), true).all()
def test_pairwise_distance_sparse_set():
""" Test pairwise distance: using the Scipy (sparse matrix) implementation for jaccard set coef """
W_sparse = csr_matrix(W)
D = pairwise_proximity(W_sparse, metric='jaccard_set', min_support=1)
true = np.array([
[1., 0.75, 0.5, 0.25],
[0.75, 1., 0.66666667, 0.33333333],
[0.5, 0.66666667, 1., 0.5],
[0.25, 0.33333333, 0.5, 1.]], dtype=float)
assert np.isclose(D.todense(), true).all()
def test_pairwise_distance_sparse_weighted():
""" Test pairwise distance: using the Scipy (sparse matrix) implementation for jaccard weighted coef """
W_sparse = csr_matrix(W)
D = pairwise_proximity(W_sparse, metric='jaccard_weighted', min_support=1)
true = np.array([
[1., 0.6, 0.3, 0.1],
[0.6, 1., 0., 0.],
[0.3, 0., 1., 0.],
[0.1, 0., 0., 1.]], dtype=float)
assert np.isclose(D.todense(), true).all()
def test_pairwise_distance_dense_my_own_metric():
""" Test pairwise distance: using the numpy (dense matrix) implementation and my own metric function """
def my_coef(u, v):
return 0.25
D = pairwise_proximity(W, metric=my_coef, verbose=True)
true = np.array([
[1., .25, .25, .25],
[.25, 1., .25, .25],
[.25, .25, 1., .25],
[.25, .25, .25, 1.]], dtype=float)
assert np.isclose(D, true).all()
def test_pairwise_distance_sparse_my_own_metric():
""" Test pairwise distance: using the Scipy (sparse matrix) implementation and my own metric function """
def my_coef(u, v):
return 0.25
W_sparse = csr_matrix(W)
D = pairwise_proximity(W_sparse, metric=('indices', my_coef), verbose=True)
true = np.array([
[1., .25, .25, .25],
[.25, 1., .25, .25],
[.25, .25, 1., .25],
[.25, .25, .25, 1.]], dtype=float)
assert np.isclose(D.todense(), true).all()
| [
"distanceclosure.distance._jaccard_coef_set",
"distanceclosure.distance._jaccard_coef_weighted_numpy",
"numpy.isclose",
"distanceclosure.distance.pairwise_proximity",
"numpy.array",
"distanceclosure.distance._jaccard_coef_scipy",
"distanceclosure.distance._jaccard_coef_binary",
"scipy.sparse.csr_matri... | [((208, 274), 'numpy.array', 'np.array', (['[[1, 1, 1, 1], [1, 1, 1, 0], [1, 1, 0, 0], [1, 0, 0, 0]]'], {}), '([[1, 1, 1, 1], [1, 1, 1, 0], [1, 1, 0, 0], [1, 0, 0, 0]])\n', (216, 274), True, 'import numpy as np\n'), ((299, 365), 'numpy.array', 'np.array', (['[[2, 3, 4, 2], [2, 3, 4, 2], [2, 3, 3, 2], [2, 1, 3, 4]]'], {}), '([[2, 3, 4, 2], [2, 3, 4, 2], [2, 3, 3, 2], [2, 1, 3, 4]])\n', (307, 365), True, 'import numpy as np\n'), ((389, 455), 'numpy.array', 'np.array', (['[[4, 3, 2, 1], [3, 2, 1, 0], [2, 1, 0, 0], [1, 0, 0, 0]]'], {}), '([[4, 3, 2, 1], [3, 2, 1, 0], [2, 1, 0, 0], [1, 0, 0, 0]])\n', (397, 455), True, 'import numpy as np\n'), ((564, 586), 'numpy.array', 'np.array', (['[2, 3, 4, 5]'], {}), '([2, 3, 4, 5])\n', (572, 586), True, 'import numpy as np\n'), ((595, 617), 'numpy.array', 'np.array', (['[2, 3, 4, 2]'], {}), '([2, 3, 4, 2])\n', (603, 617), True, 'import numpy as np\n'), ((626, 666), 'distanceclosure.distance._jaccard_coef_scipy', '_jaccard_coef_scipy', (['u', 'v'], {'min_support': '(1)'}), '(u, v, min_support=1)\n', (645, 666), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((775, 797), 'numpy.array', 'np.array', (['[1, 1, 1, 1]'], {}), '([1, 1, 1, 1])\n', (783, 797), True, 'import numpy as np\n'), ((806, 828), 'numpy.array', 'np.array', (['[1, 1, 1, 0]'], {}), '([1, 1, 1, 0])\n', (814, 828), True, 'import numpy as np\n'), ((837, 878), 'distanceclosure.distance._jaccard_coef_binary', '_jaccard_coef_binary', (['u', 'v'], {'min_support': '(1)'}), '(u, v, min_support=1)\n', (857, 878), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((971, 993), 'numpy.array', 'np.array', (['[4, 3, 2, 1]'], {}), '([4, 3, 2, 1])\n', (979, 993), True, 'import numpy as np\n'), ((1002, 1024), 'numpy.array', 'np.array', (['[3, 2, 1, 0]'], {}), '([3, 2, 1, 0])\n', (1010, 1024), True, 'import numpy as np\n'), ((1033, 1071), 'distanceclosure.distance._jaccard_coef_set', '_jaccard_coef_set', (['u', 'v'], {'min_support': '(1)'}), '(u, v, min_support=1)\n', (1050, 1071), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((1173, 1195), 'numpy.array', 'np.array', (['[4, 3, 2, 1]'], {}), '([4, 3, 2, 1])\n', (1181, 1195), True, 'import numpy as np\n'), ((1204, 1226), 'numpy.array', 'np.array', (['[3, 2, 1, 0]'], {}), '([3, 2, 1, 0])\n', (1212, 1226), True, 'import numpy as np\n'), ((1235, 1284), 'distanceclosure.distance._jaccard_coef_weighted_numpy', '_jaccard_coef_weighted_numpy', (['u', 'v'], {'min_support': '(1)'}), '(u, v, min_support=1)\n', (1263, 1284), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((1473, 1512), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['N'], {'metric': '"""jaccard"""'}), "(N, metric='jaccard')\n", (1491, 1512), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((1524, 1647), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.75, 0.25], [1.0, 1.0, 0.75, 0.25], [0.75, 0.75, 1.0, 0.5], [\n 0.25, 0.25, 0.5, 1.0]]'], {'dtype': 'float'}), '([[1.0, 1.0, 0.75, 0.25], [1.0, 1.0, 0.75, 0.25], [0.75, 0.75, 1.0,\n 0.5], [0.25, 0.25, 0.5, 1.0]], dtype=float)\n', (1532, 1647), True, 'import numpy as np\n'), ((1868, 1943), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['B'], {'metric': '"""jaccard_binary"""', 'min_support': '(1)', 'verbose': '(True)'}), "(B, metric='jaccard_binary', min_support=1, verbose=True)\n", (1886, 1943), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((1955, 2102), 'numpy.array', 'np.array', (['[[1.0, 0.75, 0.5, 0.25], [0.75, 1.0, 0.66666667, 0.33333333], [0.5, \n 0.66666667, 1.0, 0.5], [0.25, 0.33333333, 0.5, 1.0]]'], {'dtype': 'float'}), '([[1.0, 0.75, 0.5, 0.25], [0.75, 1.0, 0.66666667, 0.33333333], [0.5,\n 0.66666667, 1.0, 0.5], [0.25, 0.33333333, 0.5, 1.0]], dtype=float)\n', (1963, 2102), True, 'import numpy as np\n'), ((2318, 2376), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['W'], {'metric': '"""jaccard_set"""', 'min_support': '(1)'}), "(W, metric='jaccard_set', min_support=1)\n", (2336, 2376), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((2388, 2520), 'numpy.array', 'np.array', (['[[1.0, 0.6, 0.4, 0.2], [0.6, 1.0, 0.75, 0.5], [0.4, 0.75, 1.0, 0.66666667],\n [0.2, 0.5, 0.66666667, 1.0]]'], {'dtype': 'float'}), '([[1.0, 0.6, 0.4, 0.2], [0.6, 1.0, 0.75, 0.5], [0.4, 0.75, 1.0, \n 0.66666667], [0.2, 0.5, 0.66666667, 1.0]], dtype=float)\n', (2396, 2520), True, 'import numpy as np\n'), ((2723, 2787), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['W'], {'metric': '"""weighted_jaccard"""', 'min_support': '(10)'}), "(W, metric='weighted_jaccard', min_support=10)\n", (2741, 2787), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((2799, 2914), 'numpy.array', 'np.array', (['[[1.0, 0.6, 0.3, 0.1], [0.6, 1.0, 0.0, 0.0], [0.3, 0.0, 1.0, 0.0], [0.1, \n 0.0, 0.0, 1.0]]'], {'dtype': 'float'}), '([[1.0, 0.6, 0.3, 0.1], [0.6, 1.0, 0.0, 0.0], [0.3, 0.0, 1.0, 0.0],\n [0.1, 0.0, 0.0, 1.0]], dtype=float)\n', (2807, 2914), True, 'import numpy as np\n'), ((3138, 3151), 'scipy.sparse.csr_matrix', 'csr_matrix', (['N'], {}), '(N)\n', (3148, 3151), False, 'from scipy.sparse import csr_matrix\n'), ((3160, 3221), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['N_sparse'], {'metric': '"""jaccard"""', 'min_support': '(1)'}), "(N_sparse, metric='jaccard', min_support=1)\n", (3178, 3221), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((3233, 3356), 'numpy.array', 'np.array', (['[[1.0, 1.0, 0.75, 0.25], [1.0, 1.0, 0.75, 0.25], [0.75, 0.75, 1.0, 0.5], [\n 0.25, 0.25, 0.5, 1.0]]'], {'dtype': 'float'}), '([[1.0, 1.0, 0.75, 0.25], [1.0, 1.0, 0.75, 0.25], [0.75, 0.75, 1.0,\n 0.5], [0.25, 0.25, 0.5, 1.0]], dtype=float)\n', (3241, 3356), True, 'import numpy as np\n'), ((3597, 3610), 'scipy.sparse.csr_matrix', 'csr_matrix', (['B'], {}), '(B)\n', (3607, 3610), False, 'from scipy.sparse import csr_matrix\n'), ((3619, 3687), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['B_sparse'], {'metric': '"""jaccard_binary"""', 'min_support': '(1)'}), "(B_sparse, metric='jaccard_binary', min_support=1)\n", (3637, 3687), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((3699, 3846), 'numpy.array', 'np.array', (['[[1.0, 0.75, 0.5, 0.25], [0.75, 1.0, 0.66666667, 0.33333333], [0.5, \n 0.66666667, 1.0, 0.5], [0.25, 0.33333333, 0.5, 1.0]]'], {'dtype': 'float'}), '([[1.0, 0.75, 0.5, 0.25], [0.75, 1.0, 0.66666667, 0.33333333], [0.5,\n 0.66666667, 1.0, 0.5], [0.25, 0.33333333, 0.5, 1.0]], dtype=float)\n', (3707, 3846), True, 'import numpy as np\n'), ((4081, 4094), 'scipy.sparse.csr_matrix', 'csr_matrix', (['W'], {}), '(W)\n', (4091, 4094), False, 'from scipy.sparse import csr_matrix\n'), ((4103, 4168), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['W_sparse'], {'metric': '"""jaccard_set"""', 'min_support': '(1)'}), "(W_sparse, metric='jaccard_set', min_support=1)\n", (4121, 4168), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((4180, 4327), 'numpy.array', 'np.array', (['[[1.0, 0.75, 0.5, 0.25], [0.75, 1.0, 0.66666667, 0.33333333], [0.5, \n 0.66666667, 1.0, 0.5], [0.25, 0.33333333, 0.5, 1.0]]'], {'dtype': 'float'}), '([[1.0, 0.75, 0.5, 0.25], [0.75, 1.0, 0.66666667, 0.33333333], [0.5,\n 0.66666667, 1.0, 0.5], [0.25, 0.33333333, 0.5, 1.0]], dtype=float)\n', (4188, 4327), True, 'import numpy as np\n'), ((4572, 4585), 'scipy.sparse.csr_matrix', 'csr_matrix', (['W'], {}), '(W)\n', (4582, 4585), False, 'from scipy.sparse import csr_matrix\n'), ((4594, 4664), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['W_sparse'], {'metric': '"""jaccard_weighted"""', 'min_support': '(1)'}), "(W_sparse, metric='jaccard_weighted', min_support=1)\n", (4612, 4664), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((4676, 4791), 'numpy.array', 'np.array', (['[[1.0, 0.6, 0.3, 0.1], [0.6, 1.0, 0.0, 0.0], [0.3, 0.0, 1.0, 0.0], [0.1, \n 0.0, 0.0, 1.0]]'], {'dtype': 'float'}), '([[1.0, 0.6, 0.3, 0.1], [0.6, 1.0, 0.0, 0.0], [0.3, 0.0, 1.0, 0.0],\n [0.1, 0.0, 0.0, 1.0]], dtype=float)\n', (4684, 4791), True, 'import numpy as np\n'), ((5072, 5123), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['W'], {'metric': 'my_coef', 'verbose': '(True)'}), '(W, metric=my_coef, verbose=True)\n', (5090, 5123), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((5135, 5263), 'numpy.array', 'np.array', (['[[1.0, 0.25, 0.25, 0.25], [0.25, 1.0, 0.25, 0.25], [0.25, 0.25, 1.0, 0.25],\n [0.25, 0.25, 0.25, 1.0]]'], {'dtype': 'float'}), '([[1.0, 0.25, 0.25, 0.25], [0.25, 1.0, 0.25, 0.25], [0.25, 0.25, \n 1.0, 0.25], [0.25, 0.25, 0.25, 1.0]], dtype=float)\n', (5143, 5263), True, 'import numpy as np\n'), ((5536, 5549), 'scipy.sparse.csr_matrix', 'csr_matrix', (['W'], {}), '(W)\n', (5546, 5549), False, 'from scipy.sparse import csr_matrix\n'), ((5558, 5629), 'distanceclosure.distance.pairwise_proximity', 'pairwise_proximity', (['W_sparse'], {'metric': "('indices', my_coef)", 'verbose': '(True)'}), "(W_sparse, metric=('indices', my_coef), verbose=True)\n", (5576, 5629), False, 'from distanceclosure.distance import pairwise_proximity, _jaccard_coef_scipy, _jaccard_coef_binary, _jaccard_coef_set, _jaccard_coef_weighted_numpy\n'), ((5641, 5769), 'numpy.array', 'np.array', (['[[1.0, 0.25, 0.25, 0.25], [0.25, 1.0, 0.25, 0.25], [0.25, 0.25, 1.0, 0.25],\n [0.25, 0.25, 0.25, 1.0]]'], {'dtype': 'float'}), '([[1.0, 0.25, 0.25, 0.25], [0.25, 1.0, 0.25, 0.25], [0.25, 0.25, \n 1.0, 0.25], [0.25, 0.25, 0.25, 1.0]], dtype=float)\n', (5649, 5769), True, 'import numpy as np\n'), ((1682, 1701), 'numpy.isclose', 'np.isclose', (['D', 'true'], {}), '(D, true)\n', (1692, 1701), True, 'import numpy as np\n'), ((2139, 2158), 'numpy.isclose', 'np.isclose', (['D', 'true'], {}), '(D, true)\n', (2149, 2158), True, 'import numpy as np\n'), ((2556, 2575), 'numpy.isclose', 'np.isclose', (['D', 'true'], {}), '(D, true)\n', (2566, 2575), True, 'import numpy as np\n'), ((2945, 2964), 'numpy.isclose', 'np.isclose', (['D', 'true'], {}), '(D, true)\n', (2955, 2964), True, 'import numpy as np\n'), ((5287, 5306), 'numpy.isclose', 'np.isclose', (['D', 'true'], {}), '(D, true)\n', (5297, 5306), True, 'import numpy as np\n')] |
from ..util import cached, search, llist, WeakRefProperty, SourceError
from ..containers.basereader import Track
import threading
import numpy
from collections import OrderedDict
from itertools import count
from copy import deepcopy
import weakref
def notifyIterate(iterator, func):
for item in iterator:
func(item)
yield item
class CacheResettingProperty(object):
def __init__(self, attrname):
self.attrname = attrname
self._attrname = f"_{attrname}"
def __get__(self, inst, cls):
if inst is None:
return self
return getattr(inst, self._attrname)
def __set__(self, inst, value):
inst.reset_cache()
setattr(inst, self._attrname, value)
class BaseFilter(object):
"""
Base class for filter objects.
This class also serves as a filter that does nothing.
"""
from copy import deepcopy as copy
allowedtypes = ("audio", "video")
@property
def __name__(self):
return self.__class__.__name__
def __new__(cls, *args, **kwargs):
self = super().__new__(cls)
self._source = None
self._prev = None
self.next = None
self._parent = None
self._monitors = {}
return self
def __init__(self, source=None, prev=None, next=None, parent=None,
name=None, notify_input=None, notify_output=None):
self.parent = parent
try:
self.source = source
except AttributeError:
pass
self.next = next
self.prev = prev
self.name = name
self.notify_input = notify_input
self.notify_output = notify_output
self.lock = threading.RLock()
def addMonitor(self, mon):
self._monitors[id(mon)] = weakref.ref(mon)
if isinstance(mon, BaseFilter):
mon.reset_cache()
def removeMonitor(self, mon):
i = id(mon)
if i in self._monitors and self._monitors[i]() is mon:
del self._monitors[i]
@WeakRefProperty
def source(self, value):
if isinstance(self.parent, FilterChain):
return self.parent.prev
return value
@source.setter
def source(self, value):
if isinstance(self.parent, FilterChain):
raise ValueError(
"'source' property is read-only for FilterChain members.")
oldsource = self.source
if isinstance(value, BaseFilter):
value.addMonitor(self)
if isinstance(oldsource, BaseFilter) and oldsource is not self._prev:
oldsource.removeMonitor(self)
return value
@WeakRefProperty
def prev(self, value):
if isinstance(self._source, weakref.ref):
source = self._source()
else:
source = self._source
parent = self.parent
if isinstance(parent, BaseFilter):
return value or source or self.parent.prev
return value or source
@prev.setter
def prev(self, value):
oldprev = self._prev() if isinstance(self._prev, weakref.ref) else None
if isinstance(value, BaseFilter):
value.addMonitor(self)
if isinstance(oldprev, BaseFilter) and oldprev is not self._source:
oldprev.removeMonitor(self)
return value
def reset_cache(self, start=0, end=None):
try:
del self.duration
except AttributeError:
pass
for i, ref in list(self._monitors.items()):
mon = ref()
if mon is None:
# Filter has been deallocated, removed from monitors.
del self._monitors[i]
elif isinstance(mon, BaseFilter):
mon.reset_cache(start, end)
def isValidSource(self, source):
if source.type not in self.allowedtypes:
return False
if self is source:
return False
if isinstance(source, BaseFilter) and self in source.dependencies:
return False
return True
def __reduce__(self):
return type(self), (), self.__getstate__()
def __getstate__(self):
state = OrderedDict()
if self.name is not None:
state["name"] = self.name
try:
if isinstance(self._source, weakref.ref):
source = self._source()
else:
source = self._source
if source is not None:
state["source"] = self._source()
except AttributeError:
pass
return state
def __setstate__(self, state):
if not isinstance(self.parent, FilterChain):
try:
self.source = state.get("source", state.get("prev"))
except AttributeError:
pass
self.name = state.get("name")
def __deepcopy__(self, memo):
reduced = self.__reduce__()
if len(reduced) == 2:
cls, args = reduced
state = items = dictitems = None
elif len(reduced) == 3:
cls, args, state = reduced
items = dictitems = None
if len(reduced) == 4:
cls, args, state, items = reduced
dictitems = None
if len(reduced) == 5:
cls, args, state, items, dictitems = reduced
new = cls(*args)
if state is not None:
if "source" in state:
source = state.pop("source")
newstate = deepcopy(state, memo)
newstate["source"] = source
else:
newstate = deepcopy(state, memo)
new.__setstate__(newstate)
if items is not None:
new.extend(deepcopy(item, memo) for item in items)
if dictitems is not None:
new.update(deepcopy((key, value), memo)
for (key, value) in dictitems)
return new
@property
def dependencies(self):
if isinstance(self.prev, BaseFilter):
return self.prev.dependencies.union({self.prev})
if isinstance(self.prev, Track) and self.prev.container is not None:
return {self.prev, self.prev.container}
return set()
def __lt__(self, other):
if self in other.dependencies:
return True
return False
def __gt__(self, other):
if other in self.dependencies:
return True
return False
@property
def type(self):
if self.prev is not None:
return self.prev.type
@property
def time_base(self):
try:
if self._time_base:
return self._time_base
except AttributeError:
if self.prev:
return self.prev.time_base
@time_base.setter
def time_base(self, value):
self._time_base = value
@time_base.deleter
def time_base(self):
del self._time_base
@cached
def pts_time(self):
if self.prev is not None:
return self.prev.pts_time
@pts_time.deleter
def pts_time(self):
del self.pts
@cached
def pts(self):
return numpy.int0(self.pts_time/self.time_base)
@property
def defaultDuration(self):
if self.prev:
return self.prev.defaultDuration
@cached
def duration(self):
if self.prev is not None:
return self.prev.duration
@cached
def framecount(self):
if self.prev is not None and self.prev.framecount:
for k in count(self.prev.framecount - 1, -1):
n = self.indexMap[k]
if n not in (None, -1):
return n + 1
return 0
@framecount.deleter
def framecount(self):
del self.duration
def frameIndexFromPts(self, pts, dir="+"):
return search(self.pts, pts, dir)
def frameIndexFromPtsTime(self, pts_time, dir="+"):
return search(self.pts_time, pts_time + self.time_base/2, dir)
@cached
def cumulativeIndexMap(self):
if hasattr(self._prev, "cumulativeIndexMap"):
n = self._prev.cumulativeIndexMap
else:
n = numpy.arange(self.prev.framecount)
nonneg = n >= 0
results = -numpy.ones(n.shape, dtype=numpy.int0)
results[nonneg] = self.indexMap[n[nonneg]]
return results
@cached
def cumulativeIndexReverseMap(self):
n = self.reverseIndexMap
if hasattr(self._prev, "cumulativeIndexReverseMap"):
n = self._prev.cumulativeIndexReverseMap[n]
return n
@cached
def indexMap(self):
return numpy.arange(self.prev.framecount)
@cached
def reverseIndexMap(self):
return numpy.arange(self.prev.framecount)
@indexMap.deleter
def indexMap(self):
del self.cumulativeIndexMap
@reverseIndexMap.deleter
def reverseIndexMap(self):
del self.cumulativeIndexReverseMap
def _processFrames(self, iterable):
return iterable
def processFrames(self, iterable):
if callable(self.notify_input):
iterable = notifyIterate(iterable, self.notify_input)
iterable = self._processFrames(iterable)
if callable(self.notify_output):
iterable = notifyIterate(iterable, self.notify_output)
return iterable
def iterFrames(self, start=0, end=None, whence="framenumber"):
if whence == "pts":
start = self.frameIndexFromPts(start)
if end is not None:
try:
end = self.frameIndexFromPts(end)
except Exception:
end = None
prev_start = self.reverseIndexMap[start]
if end is not None and end < self.framecount:
prev_end = self.reverseIndexMap[end]
else:
prev_end = None
iterable = self.prev.iterFrames(prev_start, prev_end)
for frame in self.processFrames(iterable):
k = self.frameIndexFromPts(frame.pts)
if k < start:
continue
if end is not None and k >= end:
break
yield frame
@property
def keyframes(self):
if len(self):
return self.end.keyframes
prev = self.prev
if isinstance(prev, BaseFilter):
return prev.keyframes
return set()
def __next__(self):
with self.lock:
frame = next(self.prev)
newframe = self._processFrame(frame)
self._tell = self.frameIndexFromPts(frame.pts) + 1
return newframe
def seek(self, offset):
with self.lock:
self.prev.seek(self._backtranslate_index(offset))
self._tell = offset
def tell(self):
with self.lock:
return self._tell
def _processFrame(self, frame):
return frame
@classmethod
def hasQtDlg(cls):
from PyQt5.QtWidgets import QWidget
return hasattr(cls, "QtDlgClass") and \
callable(cls.QtDlgClass) and \
isinstance(cls.QtDlgClass(), type) and \
issubclass(cls.QtDlgClass(), QWidget)
@classmethod
def QtInitialize(cls, parent=None):
if cls.hasQtDlg():
return cls.QtDlgClass()(parent)
def QtDlg(self, parent=None):
dlg = self.QtInitialize(parent)
dlg.setFilter(self)
return dlg
def __repr__(self):
return f"<{self.__class__.__name__} filter at 0x{id(self):012x}>"
def validate(self):
if self.prev is None:
return [SourceError("No source provided.", self)]
return []
@property
def canIterPackets(self):
return (hasattr(self, "iterPackets")
and callable(self.iterPackets)
and hasattr(self.prev, "canIterPackets")
and self.prev.canIterPackets)
class FilterChain(llist, BaseFilter):
from copy import deepcopy as copy
def __init__(self, filters=[], **kwargs):
llist.__init__(self, filters.copy())
BaseFilter.__init__(self, **kwargs)
if len(self):
self.end.addMonitor(self)
def _exchange_new_old(self, oldstart, newstart, oldend, newend):
if oldstart is not newstart and isinstance(self.prev, BaseFilter):
if oldstart is not None:
self.prev.removeMonitor(oldstart)
if newstart is not None:
self.prev.addMonitor(newstart)
if oldend is not newend:
if oldend is not None:
oldend.removeMonitor(self)
if newend is not None:
newend.addMonitor(self)
def _get_start_end(self):
start = self.start if len(self) else None
end = self.end if len(self) else None
return (start, end)
def __setitem__(self, index, value):
oldstart, oldend = self._get_start_end()
super().__setitem__(index, value)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def __delitem__(self, index):
oldstart, oldend = self._get_start_end()
super().__delitem__(index)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def append(self, value):
oldstart, oldend = self._get_start_end()
super().append(value)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def insert(self, index, value):
oldstart, oldend = self._get_start_end()
super().insert(index, value)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def extend(self, values):
oldstart, oldend = self._get_start_end()
super().extend(values)
newstart, newend = self._get_start_end()
self._exchange_new_old(oldstart, newstart, oldend, newend)
def clear(self):
oldstart, oldend = self._get_start_end()
super().clear()
self._exchange_new_old(oldstart, None, oldend, None)
@WeakRefProperty
def source(self, value):
if isinstance(self.parent, FilterChain):
return self.parent.prev
return value
@source.setter
def source(self, value):
if isinstance(self.parent, FilterChain):
raise ValueError(
"'source' property is read-only for FilterChain members.")
oldsource = self.source
if isinstance(value, BaseFilter):
if len(self):
value.addMonitor(self.start)
else:
value.addMonitor(self)
if (isinstance(oldsource, BaseFilter)
and oldsource not in (self._prev, value)):
if len(self):
oldsource.removeMonitor(self.start)
else:
oldsource.removeMonitor(self)
return value
def isValidSource(self, other):
if not super().isValidSource(other):
return False
for item in self:
if not item.isValidSource(other):
return False
return True
def __hash__(self):
return BaseFilter.__hash__(self)
@property
def format(self):
if self.end is not None:
return self.end.format
elif self.prev is not None:
return self.prev.format
@property
def sar(self):
if self.end is not None:
return self.end.sar
elif self.prev is not None:
return self.prev.sar
@property
def defaultDuration(self):
if self.end is not None:
return self.end.defaultDuration
elif self.prev is not None:
return self.prev.defaultDuration
@property
def width(self):
if self.end is not None:
return self.end.width
elif self.prev is not None:
return self.prev.width
@property
def height(self):
if self.end is not None:
return self.end.height
elif self.prev is not None:
return self.prev.height
@cached
def pts_time(self):
if self.end is not None:
return self.end.pts_time
elif self.prev is not None:
return self.prev.pts_time
@cached
def pts(self):
if self.end is not None:
return self.end.pts
elif self.prev is not None:
return self.prev.pts
@cached
def duration(self):
if self.end is not None:
return self.end.duration
elif self.prev is not None:
return self.prev.duration
@cached
def durations(self):
if self.end is not None:
return self.end.durations
elif self.prev is not None:
return self.prev.durations
@property
def layout(self):
if self.end is not None:
return self.end.layout
if self.prev is not None:
return self.prev.layout
@property
def channels(self):
if self.end is not None:
return self.end.channels
if self.prev is not None:
return self.prev.channels
@property
def rate(self):
if self.end is not None:
return self.end.rate
if self.prev is not None:
return self.prev.rate
@cached
def framecount(self):
if self.end is not None:
return self.end.framecount
elif self.prev is not None:
return self.prev.framecount
@property
def time_base(self):
if self.end is not None:
return self.end.time_base
elif self.prev is not None:
return self.prev.time_base
@property
def indexMap(self):
if self.end is not None:
return self.end.cumulativeIndexMap
elif self.prev is not None:
return numpy.arange(0, self.prev.framecount, dtype=numpy.int0)
@property
def reverseIndexMap(self):
if self.end is not None:
return self.end.cumulativeIndexReverseMap
elif self.prev is not None:
return numpy.arange(0, self.prev.framecount, dtype=numpy.int0)
return self.end.reverseIndexMap
def reset_cache(self, start=0, end=None):
del self.cumulativeIndexMap
del self.cumulativeIndexReverseMap
del self.pts
del self.pts_time
del self.framecount
del self.durations
del self.duration
super().reset_cache(start, end)
def _processFrames(self, iterable, through=None):
if isinstance(through, (int, numpy.int0)):
through = self[through]
for filter in self:
iterable = filter.processFrames(iterable)
if filter is through:
break
return iterable
def processFrames(self, iterable, through=None):
if callable(self.notify_input):
iterable = notifyIterate(iterable, self.notify_input)
iterable = self._processFrames(iterable, through)
if callable(self.notify_output):
iterable = notifyIterate(iterable, self.notify_output)
return iterable
def iterFrames(self, start=0, end=None, whence="framenumber"):
if self.end is not None:
return self.end.iterFrames(start, end, whence)
elif self.prev is not None:
return self.prev.iterFrames(start, end, whence)
@staticmethod
def QtDlgClass():
from transcode.pyqtgui.qfilterchain import QFilterChain
return QFilterChain
def __getstate__(self):
return BaseFilter.__getstate__(self)
def __setstate__(self, state):
BaseFilter.__setstate__(self, state)
def __reduce__(self):
return type(self), (), self.__getstate__(), iter(self)
def __repr__(self):
return (f"<{self.__class__.__name__} ({len(self)} filters) "
f"at 0x{id(self):012x}>")
| [
"collections.OrderedDict",
"numpy.ones",
"numpy.int0",
"threading.RLock",
"itertools.count",
"copy.deepcopy",
"weakref.ref",
"numpy.arange"
] | [((1703, 1720), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (1718, 1720), False, 'import threading\n'), ((1787, 1803), 'weakref.ref', 'weakref.ref', (['mon'], {}), '(mon)\n', (1798, 1803), False, 'import weakref\n'), ((4184, 4197), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4195, 4197), False, 'from collections import OrderedDict\n'), ((7185, 7227), 'numpy.int0', 'numpy.int0', (['(self.pts_time / self.time_base)'], {}), '(self.pts_time / self.time_base)\n', (7195, 7227), False, 'import numpy\n'), ((8672, 8706), 'numpy.arange', 'numpy.arange', (['self.prev.framecount'], {}), '(self.prev.framecount)\n', (8684, 8706), False, 'import numpy\n'), ((8766, 8800), 'numpy.arange', 'numpy.arange', (['self.prev.framecount'], {}), '(self.prev.framecount)\n', (8778, 8800), False, 'import numpy\n'), ((7567, 7602), 'itertools.count', 'count', (['(self.prev.framecount - 1)', '(-1)'], {}), '(self.prev.framecount - 1, -1)\n', (7572, 7602), False, 'from itertools import count\n'), ((8205, 8239), 'numpy.arange', 'numpy.arange', (['self.prev.framecount'], {}), '(self.prev.framecount)\n', (8217, 8239), False, 'import numpy\n'), ((8284, 8321), 'numpy.ones', 'numpy.ones', (['n.shape'], {'dtype': 'numpy.int0'}), '(n.shape, dtype=numpy.int0)\n', (8294, 8321), False, 'import numpy\n'), ((5508, 5529), 'copy.deepcopy', 'deepcopy', (['state', 'memo'], {}), '(state, memo)\n', (5516, 5529), False, 'from copy import deepcopy\n'), ((5620, 5641), 'copy.deepcopy', 'deepcopy', (['state', 'memo'], {}), '(state, memo)\n', (5628, 5641), False, 'from copy import deepcopy\n'), ((18039, 18094), 'numpy.arange', 'numpy.arange', (['(0)', 'self.prev.framecount'], {'dtype': 'numpy.int0'}), '(0, self.prev.framecount, dtype=numpy.int0)\n', (18051, 18094), False, 'import numpy\n'), ((18284, 18339), 'numpy.arange', 'numpy.arange', (['(0)', 'self.prev.framecount'], {'dtype': 'numpy.int0'}), '(0, self.prev.framecount, dtype=numpy.int0)\n', (18296, 18339), False, 'import numpy\n'), ((5736, 5756), 'copy.deepcopy', 'deepcopy', (['item', 'memo'], {}), '(item, memo)\n', (5744, 5756), False, 'from copy import deepcopy\n'), ((5834, 5862), 'copy.deepcopy', 'deepcopy', (['(key, value)', 'memo'], {}), '((key, value), memo)\n', (5842, 5862), False, 'from copy import deepcopy\n')] |
import h5py
import numpy as np
from tensorflow.keras.utils import to_categorical
import os
# to test generator, values = next(generator) in code
def ensureDir(filePath):
''' This function checks if the folder at filePath exists.
If not, it creates it. '''
if not os.path.exists(filePath):
os.makedirs(filePath)
def generator(h5file, indexes, batch_size):
X = []
Y = []
idx = 0
while True:
for index in indexes:
RNA = np.expand_dims(h5file["RNASeq"][index], axis = -1)
label = to_categorical(h5file["label"][index], num_classes = 33, dtype = np.uint8)
X.append(RNA)
Y.append(label)
idx = idx + 1
if(idx>=batch_size):
yield np.asarray(X),np.asarray(Y)
idx = 0
X = []
Y = []
| [
"os.path.exists",
"tensorflow.keras.utils.to_categorical",
"os.makedirs",
"numpy.asarray",
"numpy.expand_dims"
] | [((272, 296), 'os.path.exists', 'os.path.exists', (['filePath'], {}), '(filePath)\n', (286, 296), False, 'import os\n'), ((300, 321), 'os.makedirs', 'os.makedirs', (['filePath'], {}), '(filePath)\n', (311, 321), False, 'import os\n'), ((465, 513), 'numpy.expand_dims', 'np.expand_dims', (["h5file['RNASeq'][index]"], {'axis': '(-1)'}), "(h5file['RNASeq'][index], axis=-1)\n", (479, 513), True, 'import numpy as np\n'), ((536, 606), 'tensorflow.keras.utils.to_categorical', 'to_categorical', (["h5file['label'][index]"], {'num_classes': '(33)', 'dtype': 'np.uint8'}), "(h5file['label'][index], num_classes=33, dtype=np.uint8)\n", (550, 606), False, 'from tensorflow.keras.utils import to_categorical\n'), ((759, 772), 'numpy.asarray', 'np.asarray', (['X'], {}), '(X)\n', (769, 772), True, 'import numpy as np\n'), ((773, 786), 'numpy.asarray', 'np.asarray', (['Y'], {}), '(Y)\n', (783, 786), True, 'import numpy as np\n')] |
from PIL import Image
import tensorflow as tf
from config import MODEL_META_DATA as model_meta
from maxfw.model import MAXModelWrapper
import io
import numpy as np
import logging
from config import PATH_TO_CKPT, PATH_TO_LABELS, NUM_CLASSES
# TODO maybe a better way to import this?
import sys
sys.path.insert(0, '../')
from utils import label_map_util
logger = logging.getLogger()
class ModelWrapper(MAXModelWrapper):
MODEL_META_DATA = model_meta
def __init__(self, model_file=PATH_TO_CKPT, label_file=PATH_TO_LABELS):
logger.info('Loading model from: {}...'.format(model_file))
detection_graph = tf.Graph()
graph = tf.Graph()
sess = tf.Session(graph=detection_graph)
# load the graph ===
# loading a (frozen) TensorFlow model into memory
with graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(model_file, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# loading a label map
label_map = label_map_util.load_labelmap(label_file)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# set up instance variables
self.graph = graph
self.category_index = category_index
self.categories = categories
def _read_image(self, image_data):
image = Image.open(io.BytesIO(image_data)).convert("RGB")
return image
def _pre_process(self, image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def _predict(self, imageRaw, threshold): # was originally run_inference_for_single_image
image = self._pre_process(imageRaw)
logger.info('image loaded')
with self.graph.as_default():
with tf.Session() as sess:
# Get handles to input and output tensors
ops = tf.get_default_graph().get_operations()
all_tensor_names = {output.name for op in ops for output in op.outputs}
tensor_dict = {}
for key in ['num_detections', 'detection_boxes', 'detection_scores', 'detection_classes',
'detection_masks']:
tensor_name = key + ':0'
if tensor_name in all_tensor_names:
tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(tensor_name)
if 'detection_masks' in tensor_dict:
# The following processing is only for single image
detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])
detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])
# Re-frame is required to translate mask from box coordinates to image coordinates and fit the image
# size.
real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)
detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(detection_masks,
detection_boxes,
image.shape[0],
image.shape[1])
detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
# Follow the convention by adding back the batch dimension
tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
# Run inference
output_dict = sess.run(tensor_dict, feed_dict={image_tensor: np.expand_dims(image, 0)})
# all outputs are float32 numpy arrays, so convert types as appropriate
output_dict['num_detections'] = int(output_dict['num_detections'][0])
output_dict['detection_classes'] = output_dict[
'detection_classes'][0].astype(np.uint8)
output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
output_dict['detection_scores'] = output_dict['detection_scores'][0]
if 'detection_masks' in output_dict:
output_dict['detection_masks'] = output_dict['detection_masks'][0]
# TODO: Threshold setting of 0.7 is only an ad hoc setting to limit result size...
label_preds = []
for i, label_id in enumerate(output_dict['detection_classes']):
if output_dict['detection_scores'][i] > threshold: # where to set this?
label_preds.append(
{'label_id': label_id,
'label': self.category_index[label_id]['name'],
'probability': output_dict['detection_scores'][i],
'detection_box': output_dict['detection_boxes'][i].tolist()
}
)
# sending top 5 entries to output
# for i in range(min(5,len(label_preds))): print(label_preds[i])
return label_preds
| [
"logging.getLogger",
"tensorflow.Graph",
"utils.label_map_util.load_labelmap",
"sys.path.insert",
"tensorflow.slice",
"tensorflow.Session",
"io.BytesIO",
"tensorflow.GraphDef",
"utils.label_map_util.convert_label_map_to_categories",
"utils.label_map_util.create_category_index",
"tensorflow.impor... | [((294, 319), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../"""'], {}), "(0, '../')\n", (309, 319), False, 'import sys\n'), ((363, 382), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (380, 382), False, 'import logging\n'), ((627, 637), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (635, 637), True, 'import tensorflow as tf\n'), ((654, 664), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (662, 664), True, 'import tensorflow as tf\n'), ((680, 713), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'detection_graph'}), '(graph=detection_graph)\n', (690, 713), True, 'import tensorflow as tf\n'), ((862, 875), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (873, 875), True, 'import tensorflow as tf\n'), ((1161, 1201), 'utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['label_file'], {}), '(label_file)\n', (1189, 1201), False, 'from utils import label_map_util\n'), ((1227, 1341), 'utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (1273, 1341), False, 'from utils import label_map_util\n'), ((1438, 1486), 'utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (1474, 1486), False, 'from utils import label_map_util\n'), ((893, 925), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['model_file', '"""rb"""'], {}), "(model_file, 'rb')\n", (907, 925), True, 'import tensorflow as tf\n'), ((1059, 1101), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (1078, 1101), True, 'import tensorflow as tf\n'), ((2174, 2186), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2184, 2186), True, 'import tensorflow as tf\n'), ((1700, 1722), 'io.BytesIO', 'io.BytesIO', (['image_data'], {}), '(image_data)\n', (1710, 1722), False, 'import io\n'), ((2953, 3000), 'tensorflow.squeeze', 'tf.squeeze', (["tensor_dict['detection_boxes']", '[0]'], {}), "(tensor_dict['detection_boxes'], [0])\n", (2963, 3000), True, 'import tensorflow as tf\n'), ((3039, 3086), 'tensorflow.squeeze', 'tf.squeeze', (["tensor_dict['detection_masks']", '[0]'], {}), "(tensor_dict['detection_masks'], [0])\n", (3049, 3086), True, 'import tensorflow as tf\n'), ((3277, 3328), 'tensorflow.cast', 'tf.cast', (["tensor_dict['num_detections'][0]", 'tf.int32'], {}), "(tensor_dict['num_detections'][0], tf.int32)\n", (3284, 3328), True, 'import tensorflow as tf\n'), ((3367, 3426), 'tensorflow.slice', 'tf.slice', (['detection_boxes', '[0, 0]', '[real_num_detection, -1]'], {}), '(detection_boxes, [0, 0], [real_num_detection, -1])\n', (3375, 3426), True, 'import tensorflow as tf\n'), ((3465, 3531), 'tensorflow.slice', 'tf.slice', (['detection_masks', '[0, 0, 0]', '[real_num_detection, -1, -1]'], {}), '(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n', (3473, 3531), True, 'import tensorflow as tf\n'), ((4198, 4241), 'tensorflow.expand_dims', 'tf.expand_dims', (['detection_masks_reframed', '(0)'], {}), '(detection_masks_reframed, 0)\n', (4212, 4241), True, 'import tensorflow as tf\n'), ((2276, 2298), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2296, 2298), True, 'import tensorflow as tf\n'), ((4013, 4054), 'tensorflow.greater', 'tf.greater', (['detection_masks_reframed', '(0.5)'], {}), '(detection_masks_reframed, 0.5)\n', (4023, 4054), True, 'import tensorflow as tf\n'), ((4273, 4295), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (4293, 4295), True, 'import tensorflow as tf\n'), ((4443, 4467), 'numpy.expand_dims', 'np.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (4457, 4467), True, 'import numpy as np\n'), ((2735, 2757), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (2755, 2757), True, 'import tensorflow as tf\n')] |
import sys
import numpy as np
import random
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM, SimpleRNN
from keras.layers.wrappers import TimeDistributed
import os
import re
#generate_length = int(sys.argv[1]) #Number of chars to generate
#starter = sys.argv[2]
def generate_text(model, length,starter):
vocab_size = len(chars)
ix = []
y_char = []
starter_list = list("|\n" + starter + " ")
x = np.zeros((1, length, vocab_size))
for i in range(len(starter_list)):
letter = starter_list[i]
ix.append(char_to_ix[ letter ])
y_char.append(letter)
x[0, i, :][ix[-1]] = 1
print("")
for i in range(len(starter_list), length):
#print ("(%i/%i)" % (i,length))
j = i - 200
if j <0:
j=0
x[0, i, :][ix[-1]] = 1
#print(ix_to_char[ix[-1]], end = "")
#ix = np.argmax(model.predict(x[:, :i+1, :])[0],1)
ix = np.argmax(model.predict(x[:, j:i+1, :])[0],1)
new_char = ix_to_char[ix[-1]]
y_char.append(new_char)
#print(new_char, end= "")
status_message = "(%i/%i)" % (i,length)
preview = ('').join(y_char)
preview = preview.replace('\n', ' ').replace('\r', '')
preview = preview + status_message
try:
cols, rows = os.get_terminal_size()
if len(preview) > cols:
preview = preview[cols * -1 :]
print("\r" + preview ,end = "\r")
except:
if i % 20 == 0:
print(preview)
return('').join(y_char)
def loadText():
data = open("buffy-summaries.txt", 'r').read()
return data
def recreateNetwork():
layer_num = 3
hidden_dim = 500
model = Sequential()
model.add(LSTM(hidden_dim, input_shape = (None, vocab_size), return_sequences=True))
for i in range(layer_num - 1):
model.add(LSTM(hidden_dim, return_sequences=True))
model.add(TimeDistributed(Dense(vocab_size)))
model.add(Activation('softmax'))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
model.load_weights('checkpoint_500_epoch_62_.hdf5')
return model
def get_first_words():
first_words = []
data_list = data.split("\n|\n")
for entry in data_list:
first_words.append( entry.split(" ")[0] )
return list(dict.fromkeys(first_words))
def trim_generated(text):
attempts = text.split("|")
for i in range(len(attempts)):
attempts[i] = attempts[i].strip()
sentences = attempts[i].split(". ")
while len( ". ".join(sentences) ) > 280:
sentences = sentences[:-1]
attempts[i] = ". ".join(sentences)
output = "\n|\n".join(attempts) + "."
output = re.sub(" +", " ", output)
return output
#generate text
data = loadText()
chars = list(set(data))
chars.sort()
vocab_size = len(chars)
print("vocab size: ", vocab_size)
ix_to_char = {ix:char for ix, char in enumerate(chars)}
char_to_ix = {char:ix for ix, char in enumerate(chars)}
model = recreateNetwork()
first_words = get_first_words()
for i in range(len(first_words)):
starter = first_words[i]
print("starter: %s" % starter)
text = generate_text(model, 350, starter)
text = trim_generated(text)
#output generated text
print("")
print ("run %i/%i: Recording:%s" % (i,len(first_words), text))
with open("output.txt", 'a+') as file:
file.write(text)
| [
"os.get_terminal_size",
"keras.layers.core.Activation",
"keras.models.Sequential",
"numpy.zeros",
"keras.layers.core.Dense",
"re.sub",
"keras.layers.recurrent.LSTM"
] | [((503, 536), 'numpy.zeros', 'np.zeros', (['(1, length, vocab_size)'], {}), '((1, length, vocab_size))\n', (511, 536), True, 'import numpy as np\n'), ((1615, 1627), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1625, 1627), False, 'from keras.models import Sequential, load_model\n'), ((2532, 2557), 're.sub', 're.sub', (['""" +"""', '""" """', 'output'], {}), "(' +', ' ', output)\n", (2538, 2557), False, 'import re\n'), ((1640, 1711), 'keras.layers.recurrent.LSTM', 'LSTM', (['hidden_dim'], {'input_shape': '(None, vocab_size)', 'return_sequences': '(True)'}), '(hidden_dim, input_shape=(None, vocab_size), return_sequences=True)\n', (1644, 1711), False, 'from keras.layers.recurrent import LSTM, SimpleRNN\n'), ((1858, 1879), 'keras.layers.core.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1868, 1879), False, 'from keras.layers.core import Dense, Activation, Dropout\n'), ((1270, 1292), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (1290, 1292), False, 'import os\n'), ((1759, 1798), 'keras.layers.recurrent.LSTM', 'LSTM', (['hidden_dim'], {'return_sequences': '(True)'}), '(hidden_dim, return_sequences=True)\n', (1763, 1798), False, 'from keras.layers.recurrent import LSTM, SimpleRNN\n'), ((1827, 1844), 'keras.layers.core.Dense', 'Dense', (['vocab_size'], {}), '(vocab_size)\n', (1832, 1844), False, 'from keras.layers.core import Dense, Activation, Dropout\n')] |
# coding=utf-8
import sys
import numpy as np
import random
import math
import torch
import tensorflow as tf
#梯度裁剪功能
def clip_func(clip_bound,clip_type,input):
if(clip_bound<=0):
return input
if(clip_type=="norm1"):
return tf.clip_by_value(input,-1*clip_bound,clip_bound)
elif(clip_type=="norm2"):
norm2=float(tf.norm(input))
tmp=max(norm2/clip_bound,1)
return input/tmp
else:
print("no such clip-type")
return input
#拉普拉斯噪声,高斯噪声
def laplace_function(beta,size):
return np.random.laplace(0,beta,size=size)
def gauss_function(sigma):
return random.gauss(0,sigma)
def get_tensor_size(param_size):
tmp=1
for i in param_size:
tmp*=i
return tmp.value
#计算所有参数梯度的敏感度
def calculate_l1_sensitivity(clip_bound,param_size):
return 2*clip_bound*param_size
def calculate_l2_sensitivity(clip_bound):
return 2*clip_bound
def calculate_l1_sensitivity_sample(grad_data,param_size,sample_num):
# sample
grad_data_1D=tf.reshape(grad_data,[-1])
if(sample_num<=param_size):
sample_index=random.sample(range(param_size),sample_num)
sample_grad = tf.gather(grad_data_1D,sample_index)
else:
sample_grad=grad_data_1D
#计算标准差
mean, var = tf.nn.moments(sample_grad, axes=0)
# array=sample_grad.numpy()
# std=array.std()
std_deviation=math.sqrt(float(var))
return (1.13*param_size+2.56*math.sqrt(param_size))*std_deviation
def calculate_l2_sensitivity_sample(grad_data,param_size,sample_num):
# sample
grad_data_1D = tf.reshape(grad_data, [-1])
if (sample_num <= param_size):
sample_index = random.sample(range(param_size), sample_num)
sample_grad = tf.gather(grad_data_1D, sample_index)
else:
sample_grad = grad_data_1D
# 计算标准差
mean, var = tf.nn.moments(sample_grad, axes=0)
# array=sample_grad.numpy()
# std=array.std()
std_deviation = math.sqrt(float(var))
return 1.45*math.sqrt(param_size)*std_deviation
def gen_laplace_beta(batchsize,Parallelnum,sensitivity,privacy_budget):
scaledEpsilon=privacy_budget*float(batchsize)/Parallelnum
beta=sensitivity/scaledEpsilon
return beta
def gen_gaussian_sigma(batchsize,Parallelnum,sensitivity,privacy_budget,privacyDelta):
scaledEpsilon = privacy_budget * float(batchsize) / Parallelnum
sigma= (2.0 * math.log(1.25 / privacyDelta)) * sensitivity / scaledEpsilon
return sigma
| [
"tensorflow.nn.moments",
"math.sqrt",
"math.log",
"tensorflow.gather",
"tensorflow.clip_by_value",
"numpy.random.laplace",
"tensorflow.reshape",
"random.gauss",
"tensorflow.norm"
] | [((548, 585), 'numpy.random.laplace', 'np.random.laplace', (['(0)', 'beta'], {'size': 'size'}), '(0, beta, size=size)\n', (565, 585), True, 'import numpy as np\n'), ((623, 645), 'random.gauss', 'random.gauss', (['(0)', 'sigma'], {}), '(0, sigma)\n', (635, 645), False, 'import random\n'), ((1021, 1048), 'tensorflow.reshape', 'tf.reshape', (['grad_data', '[-1]'], {}), '(grad_data, [-1])\n', (1031, 1048), True, 'import tensorflow as tf\n'), ((1275, 1309), 'tensorflow.nn.moments', 'tf.nn.moments', (['sample_grad'], {'axes': '(0)'}), '(sample_grad, axes=0)\n', (1288, 1309), True, 'import tensorflow as tf\n'), ((1579, 1606), 'tensorflow.reshape', 'tf.reshape', (['grad_data', '[-1]'], {}), '(grad_data, [-1])\n', (1589, 1606), True, 'import tensorflow as tf\n'), ((1844, 1878), 'tensorflow.nn.moments', 'tf.nn.moments', (['sample_grad'], {'axes': '(0)'}), '(sample_grad, axes=0)\n', (1857, 1878), True, 'import tensorflow as tf\n'), ((248, 300), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['input', '(-1 * clip_bound)', 'clip_bound'], {}), '(input, -1 * clip_bound, clip_bound)\n', (264, 300), True, 'import tensorflow as tf\n'), ((1167, 1204), 'tensorflow.gather', 'tf.gather', (['grad_data_1D', 'sample_index'], {}), '(grad_data_1D, sample_index)\n', (1176, 1204), True, 'import tensorflow as tf\n'), ((1732, 1769), 'tensorflow.gather', 'tf.gather', (['grad_data_1D', 'sample_index'], {}), '(grad_data_1D, sample_index)\n', (1741, 1769), True, 'import tensorflow as tf\n'), ((1992, 2013), 'math.sqrt', 'math.sqrt', (['param_size'], {}), '(param_size)\n', (2001, 2013), False, 'import math\n'), ((347, 361), 'tensorflow.norm', 'tf.norm', (['input'], {}), '(input)\n', (354, 361), True, 'import tensorflow as tf\n'), ((1438, 1459), 'math.sqrt', 'math.sqrt', (['param_size'], {}), '(param_size)\n', (1447, 1459), False, 'import math\n'), ((2389, 2418), 'math.log', 'math.log', (['(1.25 / privacyDelta)'], {}), '(1.25 / privacyDelta)\n', (2397, 2418), False, 'import math\n')] |
import os.path as path
import json
import numpy as np
import tensorflow as tf
from .tool_generate_data import GenerateData
from .hmm import HMM
thisdir = path.dirname(path.realpath(__file__))
generator = GenerateData(num_time=7)
# Make data
states, emissions = generator.data()
# Check reference
hmm = HMM(generator.num_states, generator.num_dims,
obs=generator.num_obs, time=generator.num_time)
hmm._p0[:, :] = generator.pi[np.newaxis, :]
hmm._tp[:, :] = generator.A
hmm._mu[:, :] = generator.mu
hmm._sigma[:, :, :] = generator.Sigma
log_likelihood = hmm.posterior(np.transpose(emissions, [1, 0, 2]))
# Save input and output
with open(path.join(thisdir, 'log_likelihood.json'), 'w') as fp:
json.dump({
'config': {
**generator.config
},
'input': {
'emissions': emissions.tolist(),
'pi': generator.pi.tolist(),
'A': generator.A.tolist(),
'mu': generator.mu.tolist(),
'Sigma': generator.Sigma.tolist()
},
'output': log_likelihood.tolist()
}, fp)
| [
"os.path.realpath",
"numpy.transpose",
"os.path.join"
] | [((169, 192), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (182, 192), True, 'import os.path as path\n'), ((581, 615), 'numpy.transpose', 'np.transpose', (['emissions', '[1, 0, 2]'], {}), '(emissions, [1, 0, 2])\n', (593, 615), True, 'import numpy as np\n'), ((652, 693), 'os.path.join', 'path.join', (['thisdir', '"""log_likelihood.json"""'], {}), "(thisdir, 'log_likelihood.json')\n", (661, 693), True, 'import os.path as path\n')] |
import numpy as np
import tensorflow as tf
import copy
np.random.seed(1)
tf.set_random_seed(1)
class PolicyGradient:
def __init__(
self,
n_actions=2,
n_features=87,
learning_rate=0.01,
reward_decay=0.95,
prob_clip=0.06,
output_graph=False,
):
#动作空间的维数
self.n_actions = n_actions
#状态特征的维数
self.n_features = n_features
#学习速率
self.lr = learning_rate
#回报衰减率
self.gamma = reward_decay
#一条轨迹的观测值,动作值,和回报值
self.ep_obs, self.ep_as, self.ep_rs = [],[],[]
self.ep_length = []
#创建策略网络
self._build_net()
self.prob_clip = prob_clip
#启动一个默认的会话
self.sess = tf.Session()
# if output_graph:
# tf.summary.FileWriter("logs/", self.sess.graph)
# 初始化会话中的变量
self.sess.run(tf.global_variables_initializer())
#创建策略网络的实现
def _build_net(self):
with tf.name_scope('input'):
#创建占位符作为输入
self.tf_obs = tf.placeholder(tf.float32, [None,self.n_features], name="observations")
self.tf_acts = tf.placeholder(tf.int32, [None,], name="actions_num")
self.tf_vt = tf.placeholder(tf.float32, [None, ], name="actions_value")
# 构建一个向量,这个向量专门用来存储每一个样本中的timesteps的数目,这个是核心所在
self.seq_length = tf.placeholder(tf.int32, [None],name="seq_length")
# self.today = tf.placeholder(tf.int32, [None], name="today")
# #第一层
# layer = tf.layers.dense(
# inputs=self.tf_obs,
# units=10,
# activation=tf.nn.tanh,
# kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
# bias_initializer=tf.constant_initializer(0.1),
# name='fc1',
# )
# #第二层
# all_act = tf.layers.dense(
# inputs=layer,
# units=self.n_actions,
# activation=None,
# kernel_initializer=tf.random_normal_initializer(mean=0, stddev=0.3),
# bias_initializer=tf.constant_initializer(0.1),
# name='fc2'
#
# )
#arg:output_dim
basic_cell = tf.nn.rnn_cell.BasicLSTMCell(10)
X = tf.expand_dims(self.tf_obs, axis=2)
# print(X)
outputs,states = tf.nn.dynamic_rnn(basic_cell, X, dtype=tf.float32,
sequence_length=self.seq_length, time_major=False)
#利用softmax函数得到每个动作的概率
c,self.h = states
# s = tf.contrib.layers.fully_connected(
# self.today, 10, activation_fn=tf.nn.relu)
self.all_act = tf.contrib.layers.fully_connected(
self.h, 2, activation_fn = tf.nn.relu)
self.all_act_prob = tf.nn.softmax(self.all_act, name='act_prob')
#定义损失函数
with tf.name_scope('loss'):
self.neg_log_prob = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.clip_by_value(self.all_act,0.001,1,name=None),
labels=self.tf_acts)
self.loss = tf.reduce_sum(self.neg_log_prob*self.tf_vt)
# self.neg_log_prob = tf.cast(self.tf_acts,tf.float32) * self.all_act_prob[:,1] \
# + (1 - tf.cast(self.tf_acts,tf.float32)) * self.all_act_prob[:,0]
# self.loss = -tf.reduce_sum(tf.log(self.neg_log_prob) * self.tf_vt)
#定义训练,更新参数
with tf.name_scope('train'):
self.train_op = tf.train.RMSPropOptimizer (self.lr).minimize(self.loss)
#定义如何选择行为,即状态s处的行为采样.根据当前的行为概率分布进行采样
def choose_action(self, observation,seq_length):
# print(observation,seq_length)
prob_weights = self.sess.run(self.all_act_prob,
feed_dict={self.tf_obs:observation,self.seq_length:np.array([seq_length])})
#按照给定的概率采样
p = prob_weights.ravel()[0],prob_weights.ravel()[1]
# print("--------------------------------")
if p[0] > 1 - self.prob_clip:
action = 0
elif p[0] < self.prob_clip:
action = 1
else:
action = np.random.choice(range(prob_weights.shape[1]), p=prob_weights.ravel())
# print("Action Probability:", p)
return action,p
def greedy(self, observation,seq_length):
prob_weights = self.sess.run(self.all_act_prob,
feed_dict={self.tf_obs: observation,self.seq_length:np.array([seq_length])})
# 按照给定的概率采样
p = prob_weights.ravel()[0], prob_weights.ravel()[1]
print("--------------------------------")
print("Action Probability:", p)
action = np.argmax(prob_weights.ravel())
return action
#定义存储,将一个回合的状态,动作和回报都保存在一起
def store_transition(self, s, a, r, length):
self.ep_obs.append(s)
self.ep_as.append(a)
self.ep_rs.append(r)
self.ep_length.append(length)
#学习,以便更新策略网络参数,一个episode之后学一回
def learn(self):
#计算一个episode的折扣回报
discounted_ep_rs_norm = self._discount_and_norm_rewards()
# print(np.array(self.ep_length))
# print(np.vstack(self.ep_obs))
#调用训练函数更新参数
_,loss = self.sess.run([self.train_op,self.loss], feed_dict={
self.tf_obs: np.vstack(self.ep_obs),
self.tf_acts: np.array(self.ep_as),
self.tf_vt: discounted_ep_rs_norm,
self.seq_length: np.array(self.ep_length),
# self.today: np.array(len(self.seq_length[-1])-1),
})
#清空episode数据
# print("******************************")
# print("Reward:",self.ep_rs,"Loss:",loss)
# print(discounted_ep_rs_norm)
# print(self.ep_rs)
seq_list = copy.deepcopy(np.array(self.ep_length))
reward_list = copy.deepcopy(self.ep_rs)
self.ep_obs, self.ep_as, self.ep_rs,self.ep_length = [], [],[],[]
return loss,seq_list,reward_list
#myself reward only end get a value
def _expand_sparse_rewards(self):
expand_ep_rs = np.zeros_like(self.ep_rs)
for t in range(0, len(self.ep_rs)):
expand_ep_rs[t] = self.ep_rs[-1]
# 归一化
# expand_ep_rs -= np.mean(expand_ep_rs)
# if np.std(expand_ep_rs) > 0.00001:
# expand_ep_rs /= np.std(expand_ep_rs)
return expand_ep_rs
def _discount_and_norm_rewards(self):
#折扣回报和
discounted_ep_rs =np.zeros_like(self.ep_rs)
running_add = 0
for t in reversed(range(0, len(self.ep_rs))):
running_add = running_add * self.gamma + self.ep_rs[t]
discounted_ep_rs[t] = running_add
#归一化
# discounted_ep_rs-= np.mean(discounted_ep_rs)
# if np.std(discounted_ep_rs) > 0.00001:
# discounted_ep_rs /= np.std(discounted_ep_rs)
# print(discounted_ep_rs)
return discounted_ep_rs | [
"tensorflow.train.RMSPropOptimizer",
"tensorflow.nn.rnn_cell.BasicLSTMCell",
"tensorflow.Session",
"tensorflow.contrib.layers.fully_connected",
"tensorflow.placeholder",
"tensorflow.reduce_sum",
"tensorflow.nn.dynamic_rnn",
"tensorflow.global_variables_initializer",
"numpy.array",
"tensorflow.name... | [((55, 72), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (69, 72), True, 'import numpy as np\n'), ((73, 94), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(1)'], {}), '(1)\n', (91, 94), True, 'import tensorflow as tf\n'), ((764, 776), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (774, 776), True, 'import tensorflow as tf\n'), ((2232, 2264), 'tensorflow.nn.rnn_cell.BasicLSTMCell', 'tf.nn.rnn_cell.BasicLSTMCell', (['(10)'], {}), '(10)\n', (2260, 2264), True, 'import tensorflow as tf\n'), ((2277, 2312), 'tensorflow.expand_dims', 'tf.expand_dims', (['self.tf_obs'], {'axis': '(2)'}), '(self.tf_obs, axis=2)\n', (2291, 2312), True, 'import tensorflow as tf\n'), ((2357, 2463), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['basic_cell', 'X'], {'dtype': 'tf.float32', 'sequence_length': 'self.seq_length', 'time_major': '(False)'}), '(basic_cell, X, dtype=tf.float32, sequence_length=self.\n seq_length, time_major=False)\n', (2374, 2463), True, 'import tensorflow as tf\n'), ((2688, 2758), 'tensorflow.contrib.layers.fully_connected', 'tf.contrib.layers.fully_connected', (['self.h', '(2)'], {'activation_fn': 'tf.nn.relu'}), '(self.h, 2, activation_fn=tf.nn.relu)\n', (2721, 2758), True, 'import tensorflow as tf\n'), ((2803, 2847), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['self.all_act'], {'name': '"""act_prob"""'}), "(self.all_act, name='act_prob')\n", (2816, 2847), True, 'import tensorflow as tf\n'), ((5827, 5852), 'copy.deepcopy', 'copy.deepcopy', (['self.ep_rs'], {}), '(self.ep_rs)\n', (5840, 5852), False, 'import copy\n'), ((6070, 6095), 'numpy.zeros_like', 'np.zeros_like', (['self.ep_rs'], {}), '(self.ep_rs)\n', (6083, 6095), True, 'import numpy as np\n'), ((6456, 6481), 'numpy.zeros_like', 'np.zeros_like', (['self.ep_rs'], {}), '(self.ep_rs)\n', (6469, 6481), True, 'import numpy as np\n'), ((908, 941), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (939, 941), True, 'import tensorflow as tf\n'), ((997, 1019), 'tensorflow.name_scope', 'tf.name_scope', (['"""input"""'], {}), "('input')\n", (1010, 1019), True, 'import tensorflow as tf\n'), ((1070, 1142), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_features]'], {'name': '"""observations"""'}), "(tf.float32, [None, self.n_features], name='observations')\n", (1084, 1142), True, 'import tensorflow as tf\n'), ((1169, 1221), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""actions_num"""'}), "(tf.int32, [None], name='actions_num')\n", (1183, 1221), True, 'import tensorflow as tf\n'), ((1248, 1304), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None]'], {'name': '"""actions_value"""'}), "(tf.float32, [None], name='actions_value')\n", (1262, 1304), True, 'import tensorflow as tf\n'), ((1396, 1447), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None]'], {'name': '"""seq_length"""'}), "(tf.int32, [None], name='seq_length')\n", (1410, 1447), True, 'import tensorflow as tf\n'), ((2877, 2898), 'tensorflow.name_scope', 'tf.name_scope', (['"""loss"""'], {}), "('loss')\n", (2890, 2898), True, 'import tensorflow as tf\n'), ((3114, 3159), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self.neg_log_prob * self.tf_vt)'], {}), '(self.neg_log_prob * self.tf_vt)\n', (3127, 3159), True, 'import tensorflow as tf\n'), ((3466, 3488), 'tensorflow.name_scope', 'tf.name_scope', (['"""train"""'], {}), "('train')\n", (3479, 3488), True, 'import tensorflow as tf\n'), ((5779, 5803), 'numpy.array', 'np.array', (['self.ep_length'], {}), '(self.ep_length)\n', (5787, 5803), True, 'import numpy as np\n'), ((3003, 3054), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.all_act', '(0.001)', '(1)'], {'name': 'None'}), '(self.all_act, 0.001, 1, name=None)\n', (3019, 3054), True, 'import tensorflow as tf\n'), ((3518, 3552), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['self.lr'], {}), '(self.lr)\n', (3543, 3552), True, 'import tensorflow as tf\n'), ((3852, 3874), 'numpy.array', 'np.array', (['[seq_length]'], {}), '([seq_length])\n', (3860, 3874), True, 'import numpy as np\n'), ((4491, 4513), 'numpy.array', 'np.array', (['[seq_length]'], {}), '([seq_length])\n', (4499, 4513), True, 'import numpy as np\n'), ((5308, 5330), 'numpy.vstack', 'np.vstack', (['self.ep_obs'], {}), '(self.ep_obs)\n', (5317, 5330), True, 'import numpy as np\n'), ((5358, 5378), 'numpy.array', 'np.array', (['self.ep_as'], {}), '(self.ep_as)\n', (5366, 5378), True, 'import numpy as np\n'), ((5456, 5480), 'numpy.array', 'np.array', (['self.ep_length'], {}), '(self.ep_length)\n', (5464, 5480), True, 'import numpy as np\n')] |
import pytest
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.datasets import load_diabetes
from tests.utils import resample_data
from deeprob.spn.structure.node import Sum, Product
from deeprob.spn.structure.leaf import Bernoulli, Gaussian
from deeprob.spn.learning.wrappers import learn_estimator
from deeprob.spn.algorithms.inference import log_likelihood
from deeprob.spn.learning.em import expectation_maximization
@pytest.fixture
def data():
data, _, = load_diabetes(return_X_y=True)
return (data < np.median(data, axis=0)).astype(np.float32)
@pytest.fixture
def evi_data(data):
return resample_data(data, 1000, np.random.RandomState(42))
@pytest.fixture
def blobs_data():
blobs_data, _ = make_blobs(
n_samples=1000, n_features=2, random_state=1337,
centers=[[-1.0, 1.0], [1.0, -1.0]], cluster_std=0.25
)
return blobs_data
@pytest.fixture
def gaussian_spn():
g0a, g1a = Gaussian(0, -0.5, 0.5), Gaussian(1, 0.5, 0.5)
g0b, g1b = Gaussian(0, 0.5, 0.5), Gaussian(1, -0.5, 0.5)
p0 = Product(children=[g0a, g1a])
p1 = Product(children=[g0b, g1b])
p2 = Product(children=[g0a, g1b])
s0 = Sum(children=[p0, p1, p2], weights=[0.3, 0.5, 0.2])
s0.id, p0.id, p1.id, p2.id = 0, 1, 2, 3
g0a.id, g1a.id, g0b.id, g1b.id = 4, 5, 6, 7
return s0
@pytest.fixture
def spn_mle(evi_data):
return learn_estimator(
evi_data, [Bernoulli] * 10, [[0, 1]] * 10,
learn_leaf='mle', split_rows='gmm', split_cols='gvs', min_rows_slice=512,
random_state=42, verbose=False
)
@pytest.fixture
def spn_clt(evi_data):
return learn_estimator(
evi_data, [Bernoulli] * 10, [[0, 1]] * 10,
learn_leaf='binary-clt', split_rows='kmeans', split_cols='gvs', min_rows_slice=512,
learn_leaf_kwargs={'to_pc': False},
random_state=42, verbose=False
)
def test_spn_binary(spn_mle, evi_data):
expectation_maximization(
spn_mle, evi_data, num_iter=100, batch_perc=0.1, step_size=0.5,
random_init=False, random_state=42, verbose=False
)
ll = log_likelihood(spn_mle, evi_data).mean()
assert np.isclose(ll, -5.3, atol=5e-2)
def test_clt_binary(spn_clt, evi_data):
expectation_maximization(
spn_clt, evi_data, num_iter=100, batch_perc=0.1, step_size=0.5,
random_init=True, random_state=42, verbose=False
)
ll = log_likelihood(spn_clt, evi_data).mean()
assert np.isclose(ll, -5.1, atol=5e-2)
def test_spn_gaussian(gaussian_spn, blobs_data):
expectation_maximization(
gaussian_spn, blobs_data, num_iter=25, batch_perc=0.1, step_size=0.5,
random_init=True, random_state=42, verbose=False
)
ll = log_likelihood(gaussian_spn, blobs_data).mean()
assert np.isclose(ll, -0.7, atol=5e-2)
| [
"deeprob.spn.structure.node.Sum",
"numpy.isclose",
"deeprob.spn.structure.leaf.Gaussian",
"numpy.median",
"deeprob.spn.structure.node.Product",
"sklearn.datasets.make_blobs",
"deeprob.spn.learning.em.expectation_maximization",
"deeprob.spn.learning.wrappers.learn_estimator",
"sklearn.datasets.load_d... | [((491, 521), 'sklearn.datasets.load_diabetes', 'load_diabetes', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (504, 521), False, 'from sklearn.datasets import load_diabetes\n'), ((743, 860), 'sklearn.datasets.make_blobs', 'make_blobs', ([], {'n_samples': '(1000)', 'n_features': '(2)', 'random_state': '(1337)', 'centers': '[[-1.0, 1.0], [1.0, -1.0]]', 'cluster_std': '(0.25)'}), '(n_samples=1000, n_features=2, random_state=1337, centers=[[-1.0,\n 1.0], [1.0, -1.0]], cluster_std=0.25)\n', (753, 860), False, 'from sklearn.datasets import make_blobs\n'), ((1070, 1098), 'deeprob.spn.structure.node.Product', 'Product', ([], {'children': '[g0a, g1a]'}), '(children=[g0a, g1a])\n', (1077, 1098), False, 'from deeprob.spn.structure.node import Sum, Product\n'), ((1108, 1136), 'deeprob.spn.structure.node.Product', 'Product', ([], {'children': '[g0b, g1b]'}), '(children=[g0b, g1b])\n', (1115, 1136), False, 'from deeprob.spn.structure.node import Sum, Product\n'), ((1146, 1174), 'deeprob.spn.structure.node.Product', 'Product', ([], {'children': '[g0a, g1b]'}), '(children=[g0a, g1b])\n', (1153, 1174), False, 'from deeprob.spn.structure.node import Sum, Product\n'), ((1184, 1235), 'deeprob.spn.structure.node.Sum', 'Sum', ([], {'children': '[p0, p1, p2]', 'weights': '[0.3, 0.5, 0.2]'}), '(children=[p0, p1, p2], weights=[0.3, 0.5, 0.2])\n', (1187, 1235), False, 'from deeprob.spn.structure.node import Sum, Product\n'), ((1394, 1566), 'deeprob.spn.learning.wrappers.learn_estimator', 'learn_estimator', (['evi_data', '([Bernoulli] * 10)', '([[0, 1]] * 10)'], {'learn_leaf': '"""mle"""', 'split_rows': '"""gmm"""', 'split_cols': '"""gvs"""', 'min_rows_slice': '(512)', 'random_state': '(42)', 'verbose': '(False)'}), "(evi_data, [Bernoulli] * 10, [[0, 1]] * 10, learn_leaf='mle',\n split_rows='gmm', split_cols='gvs', min_rows_slice=512, random_state=42,\n verbose=False)\n", (1409, 1566), False, 'from deeprob.spn.learning.wrappers import learn_estimator\n'), ((1641, 1860), 'deeprob.spn.learning.wrappers.learn_estimator', 'learn_estimator', (['evi_data', '([Bernoulli] * 10)', '([[0, 1]] * 10)'], {'learn_leaf': '"""binary-clt"""', 'split_rows': '"""kmeans"""', 'split_cols': '"""gvs"""', 'min_rows_slice': '(512)', 'learn_leaf_kwargs': "{'to_pc': False}", 'random_state': '(42)', 'verbose': '(False)'}), "(evi_data, [Bernoulli] * 10, [[0, 1]] * 10, learn_leaf=\n 'binary-clt', split_rows='kmeans', split_cols='gvs', min_rows_slice=512,\n learn_leaf_kwargs={'to_pc': False}, random_state=42, verbose=False)\n", (1656, 1860), False, 'from deeprob.spn.learning.wrappers import learn_estimator\n'), ((1936, 2079), 'deeprob.spn.learning.em.expectation_maximization', 'expectation_maximization', (['spn_mle', 'evi_data'], {'num_iter': '(100)', 'batch_perc': '(0.1)', 'step_size': '(0.5)', 'random_init': '(False)', 'random_state': '(42)', 'verbose': '(False)'}), '(spn_mle, evi_data, num_iter=100, batch_perc=0.1,\n step_size=0.5, random_init=False, random_state=42, verbose=False)\n', (1960, 2079), False, 'from deeprob.spn.learning.em import expectation_maximization\n'), ((2159, 2190), 'numpy.isclose', 'np.isclose', (['ll', '(-5.3)'], {'atol': '(0.05)'}), '(ll, -5.3, atol=0.05)\n', (2169, 2190), True, 'import numpy as np\n'), ((2237, 2379), 'deeprob.spn.learning.em.expectation_maximization', 'expectation_maximization', (['spn_clt', 'evi_data'], {'num_iter': '(100)', 'batch_perc': '(0.1)', 'step_size': '(0.5)', 'random_init': '(True)', 'random_state': '(42)', 'verbose': '(False)'}), '(spn_clt, evi_data, num_iter=100, batch_perc=0.1,\n step_size=0.5, random_init=True, random_state=42, verbose=False)\n', (2261, 2379), False, 'from deeprob.spn.learning.em import expectation_maximization\n'), ((2459, 2490), 'numpy.isclose', 'np.isclose', (['ll', '(-5.1)'], {'atol': '(0.05)'}), '(ll, -5.1, atol=0.05)\n', (2469, 2490), True, 'import numpy as np\n'), ((2546, 2695), 'deeprob.spn.learning.em.expectation_maximization', 'expectation_maximization', (['gaussian_spn', 'blobs_data'], {'num_iter': '(25)', 'batch_perc': '(0.1)', 'step_size': '(0.5)', 'random_init': '(True)', 'random_state': '(42)', 'verbose': '(False)'}), '(gaussian_spn, blobs_data, num_iter=25, batch_perc=\n 0.1, step_size=0.5, random_init=True, random_state=42, verbose=False)\n', (2570, 2695), False, 'from deeprob.spn.learning.em import expectation_maximization\n'), ((2781, 2812), 'numpy.isclose', 'np.isclose', (['ll', '(-0.7)'], {'atol': '(0.05)'}), '(ll, -0.7, atol=0.05)\n', (2791, 2812), True, 'import numpy as np\n'), ((660, 685), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (681, 685), True, 'import numpy as np\n'), ((954, 976), 'deeprob.spn.structure.leaf.Gaussian', 'Gaussian', (['(0)', '(-0.5)', '(0.5)'], {}), '(0, -0.5, 0.5)\n', (962, 976), False, 'from deeprob.spn.structure.leaf import Bernoulli, Gaussian\n'), ((978, 999), 'deeprob.spn.structure.leaf.Gaussian', 'Gaussian', (['(1)', '(0.5)', '(0.5)'], {}), '(1, 0.5, 0.5)\n', (986, 999), False, 'from deeprob.spn.structure.leaf import Bernoulli, Gaussian\n'), ((1015, 1036), 'deeprob.spn.structure.leaf.Gaussian', 'Gaussian', (['(0)', '(0.5)', '(0.5)'], {}), '(0, 0.5, 0.5)\n', (1023, 1036), False, 'from deeprob.spn.structure.leaf import Bernoulli, Gaussian\n'), ((1038, 1060), 'deeprob.spn.structure.leaf.Gaussian', 'Gaussian', (['(1)', '(-0.5)', '(0.5)'], {}), '(1, -0.5, 0.5)\n', (1046, 1060), False, 'from deeprob.spn.structure.leaf import Bernoulli, Gaussian\n'), ((2107, 2140), 'deeprob.spn.algorithms.inference.log_likelihood', 'log_likelihood', (['spn_mle', 'evi_data'], {}), '(spn_mle, evi_data)\n', (2121, 2140), False, 'from deeprob.spn.algorithms.inference import log_likelihood\n'), ((2407, 2440), 'deeprob.spn.algorithms.inference.log_likelihood', 'log_likelihood', (['spn_clt', 'evi_data'], {}), '(spn_clt, evi_data)\n', (2421, 2440), False, 'from deeprob.spn.algorithms.inference import log_likelihood\n'), ((2722, 2762), 'deeprob.spn.algorithms.inference.log_likelihood', 'log_likelihood', (['gaussian_spn', 'blobs_data'], {}), '(gaussian_spn, blobs_data)\n', (2736, 2762), False, 'from deeprob.spn.algorithms.inference import log_likelihood\n'), ((541, 564), 'numpy.median', 'np.median', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (550, 564), True, 'import numpy as np\n')] |
# Visualizations for debugging and Tensorboard
import matplotlib
import socket
if socket.gethostname() != 'arch':
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import io
import tensorflow as tf
from matplotlib.patches import Circle
from matplotlib.patches import Patch
# Keep colors consistent
class_colors = [None, '#ff0000', '#00ff00', '#0000ff', '#ffff00', '#ff00ff', '#00ffff', '#000000', '#80ff80', '#b0bc32', '#d65111', '#615562', '#ef8bd4', '#83bc8c', '#726800', '#40d93e', '#54692c', '#6fd4f1', '#e2d978', '#ff8000', '#1dcceb', '#7a58f7', '#1aaa91', '#ba60b0', '#76191f']
class_labels = [None, 'LoRa 1 ', 'LoRa 2 ', 'LoRa 3 ', 'LoRa 4 ', 'LoRa 5 ', 'LoRa 6 ', 'LoRa 7 ', 'LoRa 8 ', 'LoRa 9 ', 'LoRa 10', 'LoRa 11', 'LoRa 12', 'LoRa 13', 'LoRa 14', 'LoRa 15', 'LoRa 16', 'Lora 17', 'LoRa 18', 'LoRa 19', 'LoRa 20', 'LoRa 21', 'LoRa 22', 'LoRa 23', 'LoRa 24']
def dbg_plot(y, title=''):
fig = plt.figure()
ax = plt.gca()
ax.set_title(title)
ax.plot(np.arange(len(y)), y)
ax.set_xlim([0, len(y)])
ax.set_xlabel("samples")
plt.show()
def dbg_plot_complex(y, title=''):
fig = plt.figure()
ax = plt.gca()
ax.set_title(title)
ax.plot(np.arange(len(y)), np.real(y), "b", np.arange(len(y)), np.imag(y), "g")
ax.set_xlim([0, len(y)])
ax.set_xlabel("samples")
plt.show()
# Convert matplotlib plot to tensorboard image
def _plt_to_tf(plot, tag):
# Write to PNG buffer
buf = io.BytesIO()
plot.savefig(buf, format='png')
plot.savefig("/tmp/tf_" + tag + ".pdf", format='pdf')
buf.seek(0)
# Add to TensorBoard summary
image = tf.image.decode_png(buf.getvalue(), channels=4)
image = tf.expand_dims(image, 0) # Add the batch dimension
return tf.summary.image(tag, image, 1)
# See https://stackoverflow.com/questions/38543850/tensorflow-how-to-display-custom-images-in-tensorboard-e-g-matplotlib-plots
def plot_values(values, instances_mapping, height=800, width=800, tag="", title="", label=None, backdrop=None):
# Configure figure
dpi = 96
fig = plt.figure(figsize=(width/dpi, height/dpi), dpi=dpi)
plt.title(title)
plot_color = 'gray'
if not label is None: # Show plot in color of the label according to class_colors
title += " (LoRa " + str(instances_mapping.map_to_lora_id(label)) + ")"
plt.title(title)
plot_color = class_colors[instances_mapping.map_to_lora_id(label)]
# Plot main values
if backdrop is None:
xvalues = range(0, len(values))
values_normed = (values - values.min(0)) / values.ptp(0)
plt.plot(xvalues, values_normed, plot_color, alpha=0.7)
# Plot weights backdrop?
else:
num_classes = backdrop.shape[1]
props = dict(alpha=0.5, edgecolors='none')
for i in range(0, num_classes):
class_backdrop = backdrop[0:,i]
class_backdrop_normed = (class_backdrop - class_backdrop.min(0)) / class_backdrop.ptp(0)
xvalues = range(0, len(class_backdrop_normed))
# Get correct color for backdrop
color = class_colors[instances_mapping.map_to_lora_id(i)]
plt.scatter(xvalues, class_backdrop_normed, c=color, **props)
# Organize plot tightly
plt.tight_layout()
# Fix axis ranges
ax = plt.gca()
ax.set_xlim([0, len(xvalues)])
return _plt_to_tf(plt, tag)
def plot_kernels(kernels, kernel_size, height, width, tag="", title=""):
dpi = 96
cols = 2 # TODO: Make user definable
rows = len(kernels)/cols # Should be round number
plt.title(title)
fig, axes = plt.subplots(rows, cols, sharex='col', sharey='row', figsize=(width/dpi, height/dpi), dpi=dpi)
kernel_idx = 0
for axis_rows in axes:
for axis_col in axis_rows:
kernel = kernels[kernel_idx]
# Line plot
#axis_col.plot(range(0, len(kernel)), kernel)
#axis_col.set_xlim([0, len(kernel)])
# Image
kernel_image = kernel.reshape((1, len(kernel)))
axis_col.imshow(kernel_image, extent=(0, width, 0, 64), interpolation='nearest', cmap=plt.get_cmap('Blues'))
kernel_idx += 1
plt.tight_layout()
return _plt_to_tf(plt, tag)
def plot_weights(weights, real_labels, predictions, expected_values, thresholds, instances_mapping, height=600, width=800, tag="", title="", xlabel="Class A", ylabel="Class B", metrics=None, equal_aspect=False, tf=True):
# Configure figure TODO duplicate code fix me
dpi = 96
fig = plt.figure(figsize=(width/dpi, height/dpi), dpi=dpi)
plt.title(title)
# Plot output weights
num_points = len(weights)
num_real_labels = len(real_labels)
num_predictions = len(predictions)
if num_points != num_real_labels != predictions:
print("[-] Number of points != number of real_labels. That's not good. plot_output_weights exiting.")
exit(1)
if weights.shape[1] != 2:
print("[-] Can't plot other-than 2D data, continuing without plot.")
return
# Draw weights
real_props = dict(alpha=0.50, edgecolors='none')
predicted_props = dict(alpha=1.00, facecolors='none')
adversary_props = dict(alpha=0.50, facecolors='r', marker="x")
for i in range(0, num_points):
point = weights[i]
real_lora_id = real_labels[i]
predicted_lora_id = predictions[i]
real_point_color = class_colors[real_lora_id]
plt.scatter(point[0], point[1], c=real_point_color, **real_props)
if predicted_lora_id == -1:
plt.scatter(point[0], point[1], **adversary_props)
else:
predicted_point_color = class_colors[predicted_lora_id]
plt.scatter(point[0], point[1], edgecolors=predicted_point_color, **predicted_props)
# Draw expected values
# TODO: temp disabled until I figure out what to do with this
"""
for i in range(0, len(expected_values)):
circle_x = expected_values[i][0]
circle_y = expected_values[i][1]
circle = Circle((circle_x, circle_y), thresholds[i], edgecolor=class_colors[instances_mapping.map_to_lora_id(i)], facecolor='none', linewidth=2, alpha=0.5)
plt.gca().add_patch(circle)
"""
# Draw legend
patches = []
tmp = []
for rlabel in sorted(real_labels):
lora_id = rlabel
real_color = class_colors[lora_id]
real_label = class_labels[lora_id]
if not (real_label in tmp):
patches.append(Patch(color=real_color, label=real_label))
tmp.append(real_label)
plt.legend(loc='upper right', ncol=4, fancybox=True, shadow=True, fontsize=8, handles=patches)
# Draw metrics on the pdf
if not metrics is None:
ax = plt.gca()
metrics_text = 'accuracy: %.2f%%\nprecision: %.2f%%\nrecall: %.2f%%' % (metrics['accuracy'], metrics['precision'], metrics['recall'])
ax.text(0.01, 0.80, metrics_text,
verticalalignment='bottom', horizontalalignment='left',
transform=ax.transAxes, fontsize=10)
# Set labels
#plt.tight_layout()
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# Fix axis aspect ratio
if equal_aspect:
plt.axes().set_aspect('equal', 'datalim')
if tf:
return _plt_to_tf(plt, tag)
else:
plt.show()
| [
"matplotlib.pyplot.ylabel",
"io.BytesIO",
"numpy.imag",
"tensorflow.summary.image",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"numpy.real",
"matplotlib.pyplot.scatter",
"socket.gethostname",
"matplotlib.use",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axes",
"matplotlib.patches.... | [((82, 102), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (100, 102), False, 'import socket\n'), ((118, 139), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (132, 139), False, 'import matplotlib\n'), ((936, 948), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (946, 948), True, 'import matplotlib.pyplot as plt\n'), ((958, 967), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (965, 967), True, 'import matplotlib.pyplot as plt\n'), ((1088, 1098), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1096, 1098), True, 'import matplotlib.pyplot as plt\n'), ((1145, 1157), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1155, 1157), True, 'import matplotlib.pyplot as plt\n'), ((1167, 1176), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1174, 1176), True, 'import matplotlib.pyplot as plt\n'), ((1347, 1357), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1355, 1357), True, 'import matplotlib.pyplot as plt\n'), ((1469, 1481), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1479, 1481), False, 'import io\n'), ((1698, 1722), 'tensorflow.expand_dims', 'tf.expand_dims', (['image', '(0)'], {}), '(image, 0)\n', (1712, 1722), True, 'import tensorflow as tf\n'), ((1760, 1791), 'tensorflow.summary.image', 'tf.summary.image', (['tag', 'image', '(1)'], {}), '(tag, image, 1)\n', (1776, 1791), True, 'import tensorflow as tf\n'), ((2078, 2134), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width / dpi, height / dpi)', 'dpi': 'dpi'}), '(figsize=(width / dpi, height / dpi), dpi=dpi)\n', (2088, 2134), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2151), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2144, 2151), True, 'import matplotlib.pyplot as plt\n'), ((3261, 3279), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3277, 3279), True, 'import matplotlib.pyplot as plt\n'), ((3312, 3321), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3319, 3321), True, 'import matplotlib.pyplot as plt\n'), ((3578, 3594), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3587, 3594), True, 'import matplotlib.pyplot as plt\n'), ((3611, 3714), 'matplotlib.pyplot.subplots', 'plt.subplots', (['rows', 'cols'], {'sharex': '"""col"""', 'sharey': '"""row"""', 'figsize': '(width / dpi, height / dpi)', 'dpi': 'dpi'}), "(rows, cols, sharex='col', sharey='row', figsize=(width / dpi, \n height / dpi), dpi=dpi)\n", (3623, 3714), True, 'import matplotlib.pyplot as plt\n'), ((4195, 4213), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4211, 4213), True, 'import matplotlib.pyplot as plt\n'), ((4543, 4599), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(width / dpi, height / dpi)', 'dpi': 'dpi'}), '(figsize=(width / dpi, height / dpi), dpi=dpi)\n', (4553, 4599), True, 'import matplotlib.pyplot as plt\n'), ((4600, 4616), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (4609, 4616), True, 'import matplotlib.pyplot as plt\n'), ((6585, 6684), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""', 'ncol': '(4)', 'fancybox': '(True)', 'shadow': '(True)', 'fontsize': '(8)', 'handles': 'patches'}), "(loc='upper right', ncol=4, fancybox=True, shadow=True, fontsize=\n 8, handles=patches)\n", (6595, 6684), True, 'import matplotlib.pyplot as plt\n'), ((7109, 7127), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (7119, 7127), True, 'import matplotlib.pyplot as plt\n'), ((7132, 7150), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (7142, 7150), True, 'import matplotlib.pyplot as plt\n'), ((1232, 1242), 'numpy.real', 'np.real', (['y'], {}), '(y)\n', (1239, 1242), True, 'import numpy as np\n'), ((1268, 1278), 'numpy.imag', 'np.imag', (['y'], {}), '(y)\n', (1275, 1278), True, 'import numpy as np\n'), ((2351, 2367), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2360, 2367), True, 'import matplotlib.pyplot as plt\n'), ((2605, 2660), 'matplotlib.pyplot.plot', 'plt.plot', (['xvalues', 'values_normed', 'plot_color'], {'alpha': '(0.7)'}), '(xvalues, values_normed, plot_color, alpha=0.7)\n', (2613, 2660), True, 'import matplotlib.pyplot as plt\n'), ((5459, 5524), 'matplotlib.pyplot.scatter', 'plt.scatter', (['point[0]', 'point[1]'], {'c': 'real_point_color'}), '(point[0], point[1], c=real_point_color, **real_props)\n', (5470, 5524), True, 'import matplotlib.pyplot as plt\n'), ((6752, 6761), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (6759, 6761), True, 'import matplotlib.pyplot as plt\n'), ((7317, 7327), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7325, 7327), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3227), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xvalues', 'class_backdrop_normed'], {'c': 'color'}), '(xvalues, class_backdrop_normed, c=color, **props)\n', (3177, 3227), True, 'import matplotlib.pyplot as plt\n'), ((5574, 5624), 'matplotlib.pyplot.scatter', 'plt.scatter', (['point[0]', 'point[1]'], {}), '(point[0], point[1], **adversary_props)\n', (5585, 5624), True, 'import matplotlib.pyplot as plt\n'), ((5719, 5808), 'matplotlib.pyplot.scatter', 'plt.scatter', (['point[0]', 'point[1]'], {'edgecolors': 'predicted_point_color'}), '(point[0], point[1], edgecolors=predicted_point_color, **\n predicted_props)\n', (5730, 5808), True, 'import matplotlib.pyplot as plt\n'), ((6503, 6544), 'matplotlib.patches.Patch', 'Patch', ([], {'color': 'real_color', 'label': 'real_label'}), '(color=real_color, label=real_label)\n', (6508, 6544), False, 'from matplotlib.patches import Patch\n'), ((7209, 7219), 'matplotlib.pyplot.axes', 'plt.axes', ([], {}), '()\n', (7217, 7219), True, 'import matplotlib.pyplot as plt\n'), ((4139, 4160), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Blues"""'], {}), "('Blues')\n", (4151, 4160), True, 'import matplotlib.pyplot as plt\n')] |
from Beam import Beam
from OpticalElement import Optical_element
from Shape import BoundaryRectangle
import numpy as np
from SurfaceConic import SurfaceConic
import matplotlib.pyplot as plt
from CompoundOpticalElement import CompoundOpticalElement
from Vector import Vector
from numpy.testing import assert_almost_equal
do_plot = True
def shadow_source():
#
# Python script to run shadow3. Created automatically with ShadowTools.make_python_script_from_list().
#
import Shadow
import numpy
# write (1) or not (0) SHADOW files start.xx end.xx star.xx
iwrite = 0
#
# initialize shadow3 source (oe0) and beam
#
beam = Shadow.Beam()
oe0 = Shadow.Source()
#
# Define variables. See meaning of variables in:
# https://raw.githubusercontent.com/srio/shadow3/master/docs/source.nml
# https://raw.githubusercontent.com/srio/shadow3/master/docs/oe.nml
#
oe0.FDISTR = 1
oe0.FSOUR = 0
oe0.F_PHOT = 0
oe0.HDIV1 = 0.005
oe0.HDIV2 = 0.005
oe0.IDO_VX = 0
oe0.IDO_VZ = 0
oe0.IDO_X_S = 0
oe0.IDO_Y_S = 0
oe0.IDO_Z_S = 0
oe0.NPOINT = 10000
oe0.PH1 = 1000.0
oe0.VDIV1 = 0.05
oe0.VDIV2 = 0.05
# Run SHADOW to create the source
if iwrite:
oe0.write("start.00")
beam.genSource(oe0)
if iwrite:
oe0.write("end.00")
beam.write("begin.dat")
return beam
def test_kirk_patrick_baez():
#beam=Beam.initialize_as_person()
#beam.set_flat_divergence(1e-12, 1e-12)
#beam.x = beam.x*1e-3
#beam.z = beam.z*1e-3
shadow_beam = shadow_source()
beam = Beam()
beam.initialize_from_arrays(
shadow_beam.getshonecol(1),
shadow_beam.getshonecol(2),
shadow_beam.getshonecol(3),
shadow_beam.getshonecol(4),
shadow_beam.getshonecol(5),
shadow_beam.getshonecol(6),
shadow_beam.getshonecol(10),
0
)
bound1 = BoundaryRectangle(xmax=2.5, xmin=-2.5, ymax=2.5, ymin=-2.5)
bound2 = BoundaryRectangle(xmax=1., xmin=-1., ymax=1., ymin=-1.)
kirk_patrick_baez = CompoundOpticalElement.initialize_as_kirkpatrick_baez(p=10., q=5., separation=4., theta=89*np.pi/180, bound1=bound1, bound2=bound2)
beam = kirk_patrick_baez.trace_compound(beam)
beam.plot_good_xz(0)
indices = np.where(beam.flag>0)
assert_almost_equal(beam.x[indices], 0., 4)
assert_almost_equal(beam.z[indices], 0., 4)
beam.retrace(50.)
beam.plot_good_xz()
print(kirk_patrick_baez.info())
print("Number of good rays: %f" %(beam.number_of_good_rays()))
#beam.histogram()
if do_plot:
plt.show()
| [
"Beam.Beam",
"numpy.where",
"CompoundOpticalElement.CompoundOpticalElement.initialize_as_kirkpatrick_baez",
"Shadow.Beam",
"numpy.testing.assert_almost_equal",
"Shadow.Source",
"Shape.BoundaryRectangle",
"matplotlib.pyplot.show"
] | [((664, 677), 'Shadow.Beam', 'Shadow.Beam', ([], {}), '()\n', (675, 677), False, 'import Shadow\n'), ((688, 703), 'Shadow.Source', 'Shadow.Source', ([], {}), '()\n', (701, 703), False, 'import Shadow\n'), ((1628, 1634), 'Beam.Beam', 'Beam', ([], {}), '()\n', (1632, 1634), False, 'from Beam import Beam\n'), ((1953, 2012), 'Shape.BoundaryRectangle', 'BoundaryRectangle', ([], {'xmax': '(2.5)', 'xmin': '(-2.5)', 'ymax': '(2.5)', 'ymin': '(-2.5)'}), '(xmax=2.5, xmin=-2.5, ymax=2.5, ymin=-2.5)\n', (1970, 2012), False, 'from Shape import BoundaryRectangle\n'), ((2026, 2085), 'Shape.BoundaryRectangle', 'BoundaryRectangle', ([], {'xmax': '(1.0)', 'xmin': '(-1.0)', 'ymax': '(1.0)', 'ymin': '(-1.0)'}), '(xmax=1.0, xmin=-1.0, ymax=1.0, ymin=-1.0)\n', (2043, 2085), False, 'from Shape import BoundaryRectangle\n'), ((2107, 2249), 'CompoundOpticalElement.CompoundOpticalElement.initialize_as_kirkpatrick_baez', 'CompoundOpticalElement.initialize_as_kirkpatrick_baez', ([], {'p': '(10.0)', 'q': '(5.0)', 'separation': '(4.0)', 'theta': '(89 * np.pi / 180)', 'bound1': 'bound1', 'bound2': 'bound2'}), '(p=10.0, q=5.0,\n separation=4.0, theta=89 * np.pi / 180, bound1=bound1, bound2=bound2)\n', (2160, 2249), False, 'from CompoundOpticalElement import CompoundOpticalElement\n'), ((2332, 2355), 'numpy.where', 'np.where', (['(beam.flag > 0)'], {}), '(beam.flag > 0)\n', (2340, 2355), True, 'import numpy as np\n'), ((2358, 2402), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['beam.x[indices]', '(0.0)', '(4)'], {}), '(beam.x[indices], 0.0, 4)\n', (2377, 2402), False, 'from numpy.testing import assert_almost_equal\n'), ((2406, 2450), 'numpy.testing.assert_almost_equal', 'assert_almost_equal', (['beam.z[indices]', '(0.0)', '(4)'], {}), '(beam.z[indices], 0.0, 4)\n', (2425, 2450), False, 'from numpy.testing import assert_almost_equal\n'), ((2651, 2661), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2659, 2661), True, 'import matplotlib.pyplot as plt\n')] |
from scipy.optimize import minimize
import numpy as np
import glog as log
def get_objective_function(ball_center, normal_vector):
func = lambda x: -np.dot(x-ball_center, normal_vector) # minimize (negative sign)
jac = lambda x: -normal_vector # jac是梯度/导数公式
return func, jac
def get_constraints(x_original, ball_center, radius):
'''
:param ball_center: the center of a circle's point
:param x_original: the original benign image
:param radius: the radius of the circle
:return:
'''
func_orthogonal = lambda x: np.dot(x-ball_center, x-x_original)
func_radius = lambda x: np.sum(np.square(x-ball_center)) - np.square(radius)
cons = ({"type":"eq","fun": func_orthogonal,"jac":lambda x: 2*x - x_original - ball_center},
{"type":"eq","fun": func_radius, "jac":lambda x: 2*(x-ball_center)}) # jac是梯度/导数公式
return cons
def solve_tangent_point(x_original, ball_center, normal_vector, radius, clip_min=0.0, clip_max=1.0):
assert isinstance(x_original,np.ndarray)
initial_x = np.random.rand(ball_center.size)
bounds = [(clip_min, clip_max) for _ in range(ball_center.size)]
objective_func, objective_func_deriv = get_objective_function(ball_center, normal_vector)
cons = get_constraints(x_original, ball_center, radius)
result = minimize(objective_func, initial_x, jac=objective_func_deriv, method='SLSQP',bounds=bounds, constraints=cons,
options={"disp":True})
log.info(result)
return result.x
| [
"numpy.random.rand",
"scipy.optimize.minimize",
"numpy.square",
"numpy.dot",
"glog.info"
] | [((1043, 1075), 'numpy.random.rand', 'np.random.rand', (['ball_center.size'], {}), '(ball_center.size)\n', (1057, 1075), True, 'import numpy as np\n'), ((1312, 1451), 'scipy.optimize.minimize', 'minimize', (['objective_func', 'initial_x'], {'jac': 'objective_func_deriv', 'method': '"""SLSQP"""', 'bounds': 'bounds', 'constraints': 'cons', 'options': "{'disp': True}"}), "(objective_func, initial_x, jac=objective_func_deriv, method=\n 'SLSQP', bounds=bounds, constraints=cons, options={'disp': True})\n", (1320, 1451), False, 'from scipy.optimize import minimize\n'), ((1471, 1487), 'glog.info', 'log.info', (['result'], {}), '(result)\n', (1479, 1487), True, 'import glog as log\n'), ((554, 593), 'numpy.dot', 'np.dot', (['(x - ball_center)', '(x - x_original)'], {}), '(x - ball_center, x - x_original)\n', (560, 593), True, 'import numpy as np\n'), ((153, 191), 'numpy.dot', 'np.dot', (['(x - ball_center)', 'normal_vector'], {}), '(x - ball_center, normal_vector)\n', (159, 191), True, 'import numpy as np\n'), ((653, 670), 'numpy.square', 'np.square', (['radius'], {}), '(radius)\n', (662, 670), True, 'import numpy as np\n'), ((625, 651), 'numpy.square', 'np.square', (['(x - ball_center)'], {}), '(x - ball_center)\n', (634, 651), True, 'import numpy as np\n')] |
#
# Created by <NAME> on 05/02/2019.
#
from typing import List
import numpy as np
from numpy.random import RandomState
from sklearn.utils import resample
from phenotrex.util.logging import get_logger
from phenotrex.structure.records import TrainingRecord
class TrainingRecordResampler:
"""
Instantiates an object which can generate versions of a TrainingRecord
resampled to defined completeness and contamination levels.
Requires prior fitting with full List[TrainingRecord]
to get sources of contamination for both classes.
:param random_state: Randomness seed to use while resampling
:param verb: Toggle verbosity
"""
def __init__(
self,
random_state: float = None,
verb: bool = False
):
self.logger = get_logger(initname=self.__class__.__name__, verb=verb)
self.random_state = random_state if type(random_state) is RandomState else RandomState(random_state)
self.conta_source_pos = None
self.conta_source_neg = None
self.fitted = False
def fit(self, records: List[TrainingRecord]):
"""
Fit TrainingRecordResampler on full TrainingRecord list
to determine set of positive and negative features for contamination resampling.
:param records: the full List[TrainingRecord] on which ml training will commence.
:return: True if fitting was performed, else False.
"""
if self.fitted:
self.logger.warning("TrainingRecordSampler already fitted on full TrainingRecord data."
" Refusing to fit again.")
return False
total_neg_featureset = []
total_pos_featureset = []
for record in records:
if record.trait_sign == 1:
total_pos_featureset.append(record.features)
elif record.trait_sign == 0:
total_neg_featureset.append(record.features)
else:
raise RuntimeError("Unexpected record sign found. Aborting.")
self.conta_source_pos = np.array(total_pos_featureset)
self.conta_source_neg = np.array(total_neg_featureset)
self.fitted = True
return True
def get_resampled(
self,
record: TrainingRecord,
comple: float = 1.,
conta: float = 0.
) -> TrainingRecord:
"""
Resample a TrainingRecord to defined completeness and contamination levels.
Comple=1, Conta=1 will double set size.
:param comple: completeness of returned TrainingRecord features. Range: 0 - 1
:param conta: contamination of returned TrainingRecord features. Range: 0 - 1
:param record: the input TrainingRecord
:return: a resampled TrainingRecord.
"""
if not self.fitted:
raise RuntimeError(
"TrainingRecordResampler is not fitted on full TrainingRecord set. Aborting."
)
if not 0 <= comple <= 1 or not 0 <= conta <= 1:
raise RuntimeError("Invalid comple/conta settings. Must be between 0 and 1.")
features = record.features
n_features_comple = int(np.floor(len(features) * comple))
# make incomplete
incomplete_features = resample(
features, replace=False, n_samples=n_features_comple, random_state=self.random_state
)
self.logger.info(
f"Reduced features of TrainingRecord {record.identifier} "
f"from {len(features)} to {n_features_comple}."
)
# make contaminations
record_class = record.trait_sign
if record.trait_sign == 1:
# guard against very small sample errors after StratifiedKFold
if self.conta_source_neg.shape[0] == 1:
source_set_id = 0
else:
source_set_id = self.random_state.randint(0, self.conta_source_neg.shape[0] - 1)
conta_source = list(self.conta_source_neg[source_set_id])
elif record.trait_sign == 0:
if self.conta_source_pos.shape[0] == 1:
source_set_id = 0
else:
source_set_id = self.random_state.randint(0, self.conta_source_pos.shape[0] - 1)
conta_source = list(self.conta_source_pos[source_set_id])
else:
raise RuntimeError(f"Unexpected record sign found: {record.trait_sign}. Aborting.")
n_features_conta = min(len(conta_source), int(np.floor(len(conta_source) * conta)))
conta_features = list(self.random_state.choice(
a=conta_source, size=n_features_conta, replace=False
))
# TODO: what if not enough conta features?
self.logger.info(
f"Enriched features of TrainingRecord {record.identifier} "
f"with {len(conta_features)} features from "
f"{'positive' if record_class == 0 else 'negative'} set."
)
new_record = TrainingRecord(
identifier=record.identifier,
trait_name=record.trait_name,
trait_sign=record.trait_sign,
feature_type=record.feature_type,
features=incomplete_features + conta_features,
group_name=None,
group_id=None
)
return new_record
| [
"phenotrex.util.logging.get_logger",
"numpy.array",
"sklearn.utils.resample",
"phenotrex.structure.records.TrainingRecord",
"numpy.random.RandomState"
] | [((781, 836), 'phenotrex.util.logging.get_logger', 'get_logger', ([], {'initname': 'self.__class__.__name__', 'verb': 'verb'}), '(initname=self.__class__.__name__, verb=verb)\n', (791, 836), False, 'from phenotrex.util.logging import get_logger\n'), ((2064, 2094), 'numpy.array', 'np.array', (['total_pos_featureset'], {}), '(total_pos_featureset)\n', (2072, 2094), True, 'import numpy as np\n'), ((2127, 2157), 'numpy.array', 'np.array', (['total_neg_featureset'], {}), '(total_neg_featureset)\n', (2135, 2157), True, 'import numpy as np\n'), ((3249, 3348), 'sklearn.utils.resample', 'resample', (['features'], {'replace': '(False)', 'n_samples': 'n_features_comple', 'random_state': 'self.random_state'}), '(features, replace=False, n_samples=n_features_comple, random_state\n =self.random_state)\n', (3257, 3348), False, 'from sklearn.utils import resample\n'), ((4935, 5164), 'phenotrex.structure.records.TrainingRecord', 'TrainingRecord', ([], {'identifier': 'record.identifier', 'trait_name': 'record.trait_name', 'trait_sign': 'record.trait_sign', 'feature_type': 'record.feature_type', 'features': '(incomplete_features + conta_features)', 'group_name': 'None', 'group_id': 'None'}), '(identifier=record.identifier, trait_name=record.trait_name,\n trait_sign=record.trait_sign, feature_type=record.feature_type,\n features=incomplete_features + conta_features, group_name=None,\n group_id=None)\n', (4949, 5164), False, 'from phenotrex.structure.records import TrainingRecord\n'), ((920, 945), 'numpy.random.RandomState', 'RandomState', (['random_state'], {}), '(random_state)\n', (931, 945), False, 'from numpy.random import RandomState\n')] |
# Copyright (c) 2017, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
"""
This module provides tools for recording information about a cellular automata
over time as it evolves. This includes the :py:class:`AutomataReader` class
which performs the actual recording of values, and a collection of callables
that transform states in useful ways.
"""
from functools import wraps
import numpy as np
from traits.api import Callable, HasStrictTraits, Instance, List, on_trait_change
from .cellular_automaton import CellularAutomaton
class AutomataRecorder(HasStrictTraits):
""" An object that records changes to the states of a cellular automata.
An optional :py:attr:`transform` function can be provided that will be used
to compute derived values from the states (such as counts of different
states) or only recording on certain time ticks.
Recording happens on changes to the the :py:attr:`automaton.ticks` value.
If the :py:attr:`transform` trait is :py:obj:`None`, then the current
value of the automaton's states will be added to the record.
If the :py:attr:`transform` trait is not :py:obj:`None` then that will
be called with the automaton passed to it as the only argument, and any
non-:py:obj:`None` value that is returned will be added to the
:py:attr:`record` list.
"""
#: The CellularAutomaton to record.
automaton = Instance(CellularAutomaton)
#: The record of states.
record = List
#: A function to call to compute the value to record. This should accept
#: a single CellularAutomaton as an argument and return an arbitrary value.
transform = Callable
# ------------------------------------------------------------------------
# AutomataRecorder interface
# ------------------------------------------------------------------------
def as_array(self):
""" Return the record as a single stacked array.
This presumes that the recorded values are all arrays with compatible
shapes to be stacked.
"""
return np.stack(self.record)
# ------------------------------------------------------------------------
# object interface
# ------------------------------------------------------------------------
def __init__(self, automaton=None, **traits):
super(AutomataRecorder, self).__init__(**traits)
# ensure that the automaton is set _after_ everything is set up
# this means in particular that we get first state if it is not None.
if automaton is not None:
self.automaton = automaton
# ------------------------------------------------------------------------
# Private interface
# ------------------------------------------------------------------------
def _record(self):
""" Record the (possibly transformed) states.
If the :py:attr:`transform` trait is not :py:obj:`None` then that will
be called with the automaton passed to it as the only argument, and any
non-:py:obj:`None` value that is returned will be added to the
:py:attr:`record` list.
Subclasses that want to do something more sophisticated can override
this method.
Parameters
----------
states : array
The states that will be recorded.
"""
if self.automaton is None or self.automaton.states is None:
return
if self.transform is not None:
value = self.transform(self.automaton)
else:
value = self.automaton.states
if value is not None:
self.record.append(value)
# Trait change handlers --------------------------------------------------
@on_trait_change('automaton:tick')
def _time_updated(self):
if self.automaton.tick == -1:
# automaton was reset, dump
self.record = []
else:
self._record()
@on_trait_change('automaton')
def _automaton_updated(self, automaton):
# reset the record for the new automaton
self.record = []
self._record()
def count_states(automaton):
""" A function that counts the unique states of the automata.
This is suitable for use as the :py:attr:`transform` of an
:py:class:`AutomataRecorder`.
Parameters
----------
automaton : CellularAutomaton
The cellular automaton being analyzed.
Returns
-------
counts : array
A 1D array of size 256 containing the counts of each value.
"""
states = automaton.states
uniques, counts = np.unique(states, return_counts=True)
full_counts = np.zeros(256, dtype=int)
full_counts[uniques] = counts
return full_counts
def call_if(test):
""" Decorator factory that records automaton state only if test is True.
Parameters
----------
test : callable
A callable that takes an automaton as input and returns a bool.
Returns
-------
decorator : function
The decorator that wraps the function with the test.
"""
def decorator(fn):
""" Decorator that records automaton state every only if test is True. """
@wraps(fn)
def f(automaton):
if test(automaton):
return fn(automaton)
return None
return f
return decorator
def every_nth(n):
""" Decorator factory that records automaton state every nth tick. """
def is_nth(automaton):
return automaton.tick % n == 0
return call_if(is_nth)
| [
"traits.api.Instance",
"traits.api.on_trait_change",
"numpy.unique",
"functools.wraps",
"numpy.stack",
"numpy.zeros"
] | [((1710, 1737), 'traits.api.Instance', 'Instance', (['CellularAutomaton'], {}), '(CellularAutomaton)\n', (1718, 1737), False, 'from traits.api import Callable, HasStrictTraits, Instance, List, on_trait_change\n'), ((4046, 4079), 'traits.api.on_trait_change', 'on_trait_change', (['"""automaton:tick"""'], {}), "('automaton:tick')\n", (4061, 4079), False, 'from traits.api import Callable, HasStrictTraits, Instance, List, on_trait_change\n'), ((4263, 4291), 'traits.api.on_trait_change', 'on_trait_change', (['"""automaton"""'], {}), "('automaton')\n", (4278, 4291), False, 'from traits.api import Callable, HasStrictTraits, Instance, List, on_trait_change\n'), ((4913, 4950), 'numpy.unique', 'np.unique', (['states'], {'return_counts': '(True)'}), '(states, return_counts=True)\n', (4922, 4950), True, 'import numpy as np\n'), ((4969, 4993), 'numpy.zeros', 'np.zeros', (['(256)'], {'dtype': 'int'}), '(256, dtype=int)\n', (4977, 4993), True, 'import numpy as np\n'), ((2380, 2401), 'numpy.stack', 'np.stack', (['self.record'], {}), '(self.record)\n', (2388, 2401), True, 'import numpy as np\n'), ((5508, 5517), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (5513, 5517), False, 'from functools import wraps\n')] |
import json
import numpy as np
learning_map = {
0 : 0, # "unlabeled"
1 : 0, # "outlier" mapped to "unlabeled" --------------------------mapped
10: 1, # "car"
11: 2, # "bicycle"
13: 5, # "bus" mapped to "other-vehicle" --------------------------mapped
15: 3, # "motorcycle"
16: 5, # "on-rails" mapped to "other-vehicle" ---------------------mapped
18: 4, # "truck"
20: 5, # "other-vehicle"
30: 6, # "person"
31: 7, # "bicyclist"
32: 8, # "motorcyclist"
40: 9, # "road"
44: 10, # "parking"
48: 11, # "sidewalk"
49: 12, # "other-ground"
50: 13, # "building"
51: 14, # "fence"
52: 0, # "other-structure" mapped to "unlabeled" ------------------mapped
60: 9, # "lane-marking" to "road" ---------------------------------mapped
70: 15, # "vegetation"
71: 16, # "trunk"
72: 17, # "terrain"
80: 18, # "pole"
81: 19, # "traffic-sign"
99: 0, # "other-object" to "unlabeled" ----------------------------mapped
252: 1, # "moving-car"
253: 7, # "moving-bicyclist"
254: 6, # "moving-person"
255: 8, # "moving-motorcyclist"
256: 5, # "moving-on-rails" mapped to "moving-other-vehicle" ------mapped
257: 5, # "moving-bus" mapped to "moving-other-vehicle" -----------mapped
258: 4, # "moving-truck"
259: 5, # "moving-other-vehicle"
}
def class_contents(labels_files, lbl_count):
for file_ in labels_files['labels']:
labels = np.fromfile('../' + file_, dtype=np.uint32)
labels = labels.reshape((-1))
labels = labels & 0xFFFF
#remap labels to learning values
labels = np.vectorize(learning_map.get)(labels)
classes, counts = np.unique(labels, return_counts=True)
for class_, count in zip(classes, counts):
lbl_count[class_] += count
return lbl_count
splits = None
with open('percentiles_split.json', 'r') as f:
splits = json.load(f)
for percentile in splits:
print(f'PERCENT: {percentile}')
lbl_count = [ 0 for _ in range(20) ]
for seq in splits[percentile]:
lbl_count = class_contents(splits[percentile][seq], lbl_count)
lbl_count = np.array(lbl_count)
class_dist = lbl_count / np.sum(lbl_count)
for class_ in range(20):
print(f'{class_}: {round(class_dist[class_],5)}')
print(f'\t- CLASS DIST: {class_dist}')
| [
"numpy.fromfile",
"numpy.unique",
"numpy.array",
"numpy.sum",
"json.load",
"numpy.vectorize"
] | [((1973, 1985), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1982, 1985), False, 'import json\n'), ((2214, 2233), 'numpy.array', 'np.array', (['lbl_count'], {}), '(lbl_count)\n', (2222, 2233), True, 'import numpy as np\n'), ((1507, 1550), 'numpy.fromfile', 'np.fromfile', (["('../' + file_)"], {'dtype': 'np.uint32'}), "('../' + file_, dtype=np.uint32)\n", (1518, 1550), True, 'import numpy as np\n'), ((1746, 1783), 'numpy.unique', 'np.unique', (['labels'], {'return_counts': '(True)'}), '(labels, return_counts=True)\n', (1755, 1783), True, 'import numpy as np\n'), ((2263, 2280), 'numpy.sum', 'np.sum', (['lbl_count'], {}), '(lbl_count)\n', (2269, 2280), True, 'import numpy as np\n'), ((1681, 1711), 'numpy.vectorize', 'np.vectorize', (['learning_map.get'], {}), '(learning_map.get)\n', (1693, 1711), True, 'import numpy as np\n')] |
import numpy as np
from . import stereonet_math
def fit_girdle(*args, **kwargs):
"""
Fits a plane to a scatter of points on a stereonet (a.k.a. a "girdle").
Input arguments will be interpreted as poles, lines, rakes, or "raw"
longitudes and latitudes based on the ``measurement`` keyword argument.
(Defaults to ``"poles"``.)
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strikes`` & ``dips``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : {'poles', 'lines', 'rakes', 'radians'}, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : Arguments are assumed to be sequences of strikes and
dips of planes. Poles to these planes are used for density
contouring.
``"lines"`` : Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : Arguments are assumed to be sequences of strikes,
dips, and rakes along the plane.
``"radians"`` : Arguments are assumed to be "raw" longitudes and
latitudes in the underlying projection's coordinate system.
bidirectional : boolean, optional
Whether or not the antipode of each measurement will be used in the
calculation. For almost all use cases, it should. Defaults to True.
Returns
-------
strike, dip: floats
The strike and dip of the plane.
Notes
-----
The pole to the best-fit plane is extracted by calculating the smallest
eigenvector of the covariance matrix of the input measurements in cartesian
3D space.
Examples
--------
Calculate the plunge of a cylindrical fold axis from a series of strike/dip
measurements of bedding from the limbs:
>>> strike = [270, 334, 270, 270]
>>> dip = [20, 15, 80, 78]
>>> s, d = mplstereonet.fit_girdle(strike, dip)
>>> plunge, bearing = mplstereonet.pole2plunge_bearing(s, d)
"""
vec = 0 # Smallest eigenvector will be the pole
return _sd_of_eigenvector(args, vec=vec, **kwargs)
def fit_pole(*args, **kwargs):
"""
Fits the pole to a plane to a "bullseye" of points on a stereonet.
Input arguments will be interpreted as poles, lines, rakes, or "raw"
longitudes and latitudes based on the ``measurement`` keyword argument.
(Defaults to ``"poles"``.)
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : {'poles', 'lines', 'rakes', 'radians'}, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : Arguments are assumed to be sequences of strikes and
dips of planes. Poles to these planes are used for density
contouring.
``"lines"`` : Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : Arguments are assumed to be sequences of strikes,
dips, and rakes along the plane.
``"radians"`` : Arguments are assumed to be "raw" longitudes and
latitudes in the underlying projection's coordinate system.
bidirectional : boolean, optional
Whether or not the antipode of each measurement will be used in the
calculation. For almost all use cases, it should. Defaults to True.
Returns
-------
strike, dip: floats
The strike and dip of the plane.
Notes
-----
The pole to the best-fit plane is extracted by calculating the largest
eigenvector of the covariance matrix of the input measurements in cartesian
3D space.
Examples
--------
Find the average strike/dip of a series of bedding measurements
>>> strike = [270, 65, 280, 300]
>>> dip = [20, 15, 10, 5]
>>> strike0, dip0 = mplstereonet.fit_pole(strike, dip)
"""
vec = -1 # Largest eigenvector will be the pole
return _sd_of_eigenvector(args, vec=vec, **kwargs)
def _sd_of_eigenvector(data, vec, measurement='poles', bidirectional=True):
"""Unifies ``fit_pole`` and ``fit_girdle``."""
lon, lat = _convert_measurements(data, measurement)
vals, vecs = cov_eig(lon, lat, bidirectional)
x, y, z = vecs[:, vec]
s, d = stereonet_math.geographic2pole(*stereonet_math.cart2sph(x, y, z))
return s[0], d[0]
def eigenvectors(*args, **kwargs):
"""
Finds the 3 eigenvectors and eigenvalues of the 3D covariance matrix of a
series of geometries. This can be used to fit a plane/pole to a dataset or
for shape fabric analysis (e.g. Flinn/Hsu plots).
Input arguments will be interpreted as poles, lines, rakes, or "raw"
longitudes and latitudes based on the *measurement* keyword argument.
(Defaults to ``"poles"``.)
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : {'poles', 'lines', 'rakes', 'radians'}, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : Arguments are assumed to be sequences of strikes and
dips of planes. Poles to these planes are used for density
contouring.
``"lines"`` : Arguments are assumed to be sequences of plunges and
bearings of linear features.
``"rakes"`` : Arguments are assumed to be sequences of strikes,
dips, and rakes along the plane.
``"radians"`` : Arguments are assumed to be "raw" longitudes and
latitudes in the underlying projection's coordinate system.
bidirectional : boolean, optional
Whether or not the antipode of each measurement will be used in the
calculation. For almost all use cases, it should. Defaults to True.
Returns
-------
plunges, bearings, values : sequences of 3 floats each
The plunges, bearings, and eigenvalues of the three eigenvectors of the
covariance matrix of the input data. The measurements are returned
sorted in descending order relative to the eigenvalues. (i.e. The
largest eigenvector/eigenvalue is first.)
Examples
--------
Find the eigenvectors as plunge/bearing and eigenvalues of the 3D
covariance matrix of a series of planar measurements:
>>> strikes = [270, 65, 280, 300]
>>> dips = [20, 15, 10, 5]
>>> plu, azi, vals = mplstereonet.eigenvectors(strikes, dips)
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles'))
vals, vecs = cov_eig(lon, lat, kwargs.get('bidirectional', True))
lon, lat = stereonet_math.cart2sph(*vecs)
plunges, bearings = stereonet_math.geographic2plunge_bearing(lon, lat)
# Largest eigenvalue first...
return plunges[::-1], bearings[::-1], vals[::-1]
def cov_eig(lon, lat, bidirectional=True):
lon = np.atleast_1d(np.squeeze(lon))
lat = np.atleast_1d(np.squeeze(lat))
if bidirectional:
# Include antipodes in calculation...
lon2, lat2 = stereonet_math.antipode(lon, lat)
lon, lat = np.hstack([lon, lon2]), np.hstack([lat, lat2])
xyz = np.column_stack(stereonet_math.sph2cart(lon, lat))
cov = np.cov(xyz.T)
eigvals, eigvecs = np.linalg.eigh(cov)
order = eigvals.argsort()
return eigvals[order], eigvecs[:, order]
def _convert_measurements(data, measurement):
def do_nothing(x, y):
return x, y
func = {'poles':stereonet_math.pole,
'lines':stereonet_math.line,
'rakes':stereonet_math.rake,
'radians':do_nothing}[measurement]
return func(*data)
def find_mean_vector(*args, **kwargs):
"""
Returns the mean vector for a set of measurments. By default, this expects
the input to be plunges and bearings, but the type of input can be
controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector : tuple of two floats
The plunge and bearing of the mean vector (in degrees).
r_value : float
The length of the mean vector (a value between 0 and 1).
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines'))
vector, r_value = stereonet_math.mean_vector(lon, lat)
plunge, bearing = stereonet_math.geographic2plunge_bearing(*vector)
return (plunge[0], bearing[0]), r_value
def find_fisher_stats(*args, **kwargs):
"""
Returns the mean vector and summary statistics for a set of measurements.
By default, this expects the input to be plunges and bearings, but the type
of input can be controlled through the ``measurement`` kwarg.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``plunge`` & ``bearing``, both
array-like sequences representing linear features. (Rake measurements
require three parameters, thus the variable number of arguments.) The
*measurement* kwarg controls how these arguments are interpreted.
conf : number
The confidence level (0-100). Defaults to 95%, similar to 2 sigma.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"lines"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
mean_vector: tuple of two floats
A set consisting of the plunge and bearing of the mean vector (in
degrees).
stats : tuple of three floats
``(r_value, confidence, kappa)``
The ``r_value`` is the magnitude of the mean vector as a number between
0 and 1.
The ``confidence`` radius is the opening angle of a small circle that
corresponds to the confidence in the calculated direction, and is
dependent on the input ``conf``.
The ``kappa`` value is the dispersion factor that quantifies the amount
of dispersion of the given vectors, analgous to a variance/stddev.
"""
# How the heck did this wind up as a separate function?
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines'))
conf = kwargs.get('conf', 95)
center, stats = stereonet_math.fisher_stats(lon, lat, conf)
plunge, bearing = stereonet_math.geographic2plunge_bearing(*center)
mean_vector = (plunge[0], bearing[0])
return mean_vector, stats
def kmeans(*args, **kwargs):
"""
Find centers of multi-modal clusters of data using a kmeans approach
modified for spherical measurements.
Parameters
----------
*args : 2 or 3 sequences of measurements
By default, this will be expected to be ``strike`` & ``dip``, both
array-like sequences representing poles to planes. (Rake measurements
require three parameters, thus the variable number of arguments.) The
``measurement`` kwarg controls how these arguments are interpreted.
num : int
The number of clusters to find. Defaults to 2.
bidirectional : bool
Whether or not the measurements are bi-directional linear/planar
features or directed vectors. Defaults to True.
tolerance : float
Iteration will continue until the centers have not changed by more
than this amount. Defaults to 1e-5.
measurement : string, optional
Controls how the input arguments are interpreted. Defaults to
``"poles"``. May be one of the following:
``"poles"`` : strikes, dips
Arguments are assumed to be sequences of strikes and dips of
planes. Poles to these planes are used for analysis.
``"lines"`` : plunges, bearings
Arguments are assumed to be sequences of plunges and bearings
of linear features.
``"rakes"`` : strikes, dips, rakes
Arguments are assumed to be sequences of strikes, dips, and
rakes along the plane.
``"radians"`` : lon, lat
Arguments are assumed to be "raw" longitudes and latitudes in
the stereonet's underlying coordinate system.
Returns
-------
centers : An Nx2 array-like
Longitude and latitude in radians of the centers of each cluster.
"""
lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles'))
num = kwargs.get('num', 2)
bidirectional = kwargs.get('bidirectional', True)
tolerance = kwargs.get('tolerance', 1e-5)
points = lon, lat
dist = lambda x: stereonet_math.angular_distance(x, points, bidirectional)
center_lon = np.random.choice(lon, num)
center_lat = np.random.choice(lat, num)
centers = zip(center_lon, center_lat)
while True:
dists = np.array([dist(item) for item in centers]).T
closest = dists.argmin(axis=1)
new_centers = []
for i in range(num):
mask = mask = closest == i
_, vecs = cov_eig(lon[mask], lat[mask], bidirectional)
new_centers.append(stereonet_math.cart2sph(*vecs[:,-1]))
if np.allclose(centers, new_centers, atol=tolerance):
break
else:
centers = new_centers
return centers | [
"numpy.allclose",
"numpy.hstack",
"numpy.random.choice",
"numpy.squeeze",
"numpy.linalg.eigh",
"numpy.cov"
] | [((8249, 8262), 'numpy.cov', 'np.cov', (['xyz.T'], {}), '(xyz.T)\n', (8255, 8262), True, 'import numpy as np\n'), ((8286, 8305), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (8300, 8305), True, 'import numpy as np\n'), ((15470, 15496), 'numpy.random.choice', 'np.random.choice', (['lon', 'num'], {}), '(lon, num)\n', (15486, 15496), True, 'import numpy as np\n'), ((15514, 15540), 'numpy.random.choice', 'np.random.choice', (['lat', 'num'], {}), '(lat, num)\n', (15530, 15540), True, 'import numpy as np\n'), ((7931, 7946), 'numpy.squeeze', 'np.squeeze', (['lon'], {}), '(lon)\n', (7941, 7946), True, 'import numpy as np\n'), ((7972, 7987), 'numpy.squeeze', 'np.squeeze', (['lat'], {}), '(lat)\n', (7982, 7987), True, 'import numpy as np\n'), ((15942, 15991), 'numpy.allclose', 'np.allclose', (['centers', 'new_centers'], {'atol': 'tolerance'}), '(centers, new_centers, atol=tolerance)\n', (15953, 15991), True, 'import numpy as np\n'), ((8131, 8153), 'numpy.hstack', 'np.hstack', (['[lon, lon2]'], {}), '([lon, lon2])\n', (8140, 8153), True, 'import numpy as np\n'), ((8155, 8177), 'numpy.hstack', 'np.hstack', (['[lat, lat2]'], {}), '([lat, lat2])\n', (8164, 8177), True, 'import numpy as np\n')] |
# For the class Data Science: Practical Deep Learning Concepts in Theano and TensorFlow
# https://deeplearningcourses.com/c/data-science-deep-learning-in-theano-tensorflow
# https://www.udemy.com/data-science-deep-learning-in-theano-tensorflow
from __future__ import print_function, division
from builtins import range
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import theano
import theano.tensor as T
from sklearn.utils import shuffle
def init_weight(M1, M2):
return np.random.randn(M1, M2) * np.sqrt(2.0 / M1)
class HiddenLayer(object):
def __init__(self, M1, M2, f):
self.M1 = M1
self.M2 = M2
self.f = f
W = init_weight(M1, M2)
b = np.zeros(M2)
self.W = theano.shared(W)
self.b = theano.shared(b)
self.params = [self.W, self.b]
def forward(self, X):
if self.f == T.nnet.relu:
return self.f(X.dot(self.W) + self.b, alpha=0.1)
return self.f(X.dot(self.W) + self.b)
class ANN(object):
def __init__(self, hidden_layer_sizes):
self.hidden_layer_sizes = hidden_layer_sizes
def fit(self, X, Y, activation=T.nnet.relu, learning_rate=1e-3, mu=0.0, reg=0, epochs=100, batch_sz=None, print_period=100, show_fig=True):
X = X.astype(np.float32)
Y = Y.astype(np.int32)
# initialize hidden layers
N, D = X.shape
self.layers = []
M1 = D
for M2 in self.hidden_layer_sizes:
h = HiddenLayer(M1, M2, activation)
self.layers.append(h)
M1 = M2
# final layer
K = len(set(Y))
# print("K:", K)
h = HiddenLayer(M1, K, T.nnet.softmax)
self.layers.append(h)
if batch_sz is None:
batch_sz = N
# collect params for later use
self.params = []
for h in self.layers:
self.params += h.params
# for momentum
dparams = [theano.shared(np.zeros_like(p.get_value())) for p in self.params]
# set up theano functions and variables
thX = T.matrix('X')
thY = T.ivector('Y')
p_y_given_x = self.forward(thX)
rcost = reg*T.mean([(p*p).sum() for p in self.params])
cost = -T.mean(T.log(p_y_given_x[T.arange(thY.shape[0]), thY])) #+ rcost
prediction = T.argmax(p_y_given_x, axis=1)
grads = T.grad(cost, self.params)
# momentum only
updates = [
(p, p + mu*dp - learning_rate*g) for p, dp, g in zip(self.params, dparams, grads)
] + [
(dp, mu*dp - learning_rate*g) for dp, g in zip(dparams, grads)
]
train_op = theano.function(
inputs=[thX, thY],
outputs=[cost, prediction],
updates=updates,
)
self.predict_op = theano.function(
inputs=[thX],
outputs=prediction,
)
n_batches = N // batch_sz
costs = []
for i in range(epochs):
if n_batches > 1:
X, Y = shuffle(X, Y)
for j in range(n_batches):
Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)]
Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)]
c, p = train_op(Xbatch, Ybatch)
costs.append(c)
if (j+1) % print_period == 0:
print("i:", i, "j:", j, "nb:", n_batches, "cost:", c)
if show_fig:
plt.plot(costs)
plt.show()
def forward(self, X):
out = X
for h in self.layers:
out = h.forward(out)
return out
def score(self, X, Y):
P = self.predict_op(X)
return np.mean(Y == P)
def predict(self, X):
return self.predict_op(X)
| [
"numpy.mean",
"theano.shared",
"numpy.sqrt",
"theano.function",
"matplotlib.pyplot.show",
"sklearn.utils.shuffle",
"theano.tensor.matrix",
"matplotlib.pyplot.plot",
"theano.tensor.ivector",
"theano.tensor.arange",
"numpy.zeros",
"theano.tensor.argmax",
"builtins.range",
"numpy.random.randn... | [((584, 607), 'numpy.random.randn', 'np.random.randn', (['M1', 'M2'], {}), '(M1, M2)\n', (599, 607), True, 'import numpy as np\n'), ((610, 627), 'numpy.sqrt', 'np.sqrt', (['(2.0 / M1)'], {}), '(2.0 / M1)\n', (617, 627), True, 'import numpy as np\n'), ((797, 809), 'numpy.zeros', 'np.zeros', (['M2'], {}), '(M2)\n', (805, 809), True, 'import numpy as np\n'), ((827, 843), 'theano.shared', 'theano.shared', (['W'], {}), '(W)\n', (840, 843), False, 'import theano\n'), ((861, 877), 'theano.shared', 'theano.shared', (['b'], {}), '(b)\n', (874, 877), False, 'import theano\n'), ((2171, 2184), 'theano.tensor.matrix', 'T.matrix', (['"""X"""'], {}), "('X')\n", (2179, 2184), True, 'import theano.tensor as T\n'), ((2199, 2213), 'theano.tensor.ivector', 'T.ivector', (['"""Y"""'], {}), "('Y')\n", (2208, 2213), True, 'import theano.tensor as T\n'), ((2420, 2449), 'theano.tensor.argmax', 'T.argmax', (['p_y_given_x'], {'axis': '(1)'}), '(p_y_given_x, axis=1)\n', (2428, 2449), True, 'import theano.tensor as T\n'), ((2466, 2491), 'theano.tensor.grad', 'T.grad', (['cost', 'self.params'], {}), '(cost, self.params)\n', (2472, 2491), True, 'import theano.tensor as T\n'), ((2750, 2829), 'theano.function', 'theano.function', ([], {'inputs': '[thX, thY]', 'outputs': '[cost, prediction]', 'updates': 'updates'}), '(inputs=[thX, thY], outputs=[cost, prediction], updates=updates)\n', (2765, 2829), False, 'import theano\n'), ((2904, 2953), 'theano.function', 'theano.function', ([], {'inputs': '[thX]', 'outputs': 'prediction'}), '(inputs=[thX], outputs=prediction)\n', (2919, 2953), False, 'import theano\n'), ((3060, 3073), 'builtins.range', 'range', (['epochs'], {}), '(epochs)\n', (3065, 3073), False, 'from builtins import range\n'), ((3782, 3797), 'numpy.mean', 'np.mean', (['(Y == P)'], {}), '(Y == P)\n', (3789, 3797), True, 'import numpy as np\n'), ((3161, 3177), 'builtins.range', 'range', (['n_batches'], {}), '(n_batches)\n', (3166, 3177), False, 'from builtins import range\n'), ((3544, 3559), 'matplotlib.pyplot.plot', 'plt.plot', (['costs'], {}), '(costs)\n', (3552, 3559), True, 'import matplotlib.pyplot as plt\n'), ((3572, 3582), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3580, 3582), True, 'import matplotlib.pyplot as plt\n'), ((3126, 3139), 'sklearn.utils.shuffle', 'shuffle', (['X', 'Y'], {}), '(X, Y)\n', (3133, 3139), False, 'from sklearn.utils import shuffle\n'), ((2359, 2381), 'theano.tensor.arange', 'T.arange', (['thY.shape[0]'], {}), '(thY.shape[0])\n', (2367, 2381), True, 'import theano.tensor as T\n')] |
from objects.CSCG._2d.mesh.domain.regions.region.interpolations.allocator import InterpolationSearcher
from objects.CSCG._2d.mesh.domain.regions.region.edge_geometries.allocator import EdgeGeometryDispatcher
from objects.CSCG._2d.mesh.domain.regions.region.types_wrt_metric.allocator import TypeWr2MetricGiver
from objects.CSCG._2d.mesh.domain.regions.region.IS import _2dCSCG_Region_IS
from screws.freeze.main import FrozenOnly
from objects.CSCG._2d.mesh.domain.regions.region.edges.main import Edges
from screws.decorators.accepts import accepts
from objects.CSCG._2d.mesh.domain.regions.region.inheriting.topology import RegionTopology
import numpy as np
class Region(RegionTopology, FrozenOnly):
@accepts('self', int, str)
def __init__(self, ndim, name, cc, et, interpolator, domain_input):
"""
Parameters
---------
ndim : int
Must be 2.
name :
cc : corner coordinates
et :
interpolator :
domain_input :
The DomainInput, but its classname is not 'DomainInput'. It inherits the
class `DomainInput` but has personal name.
"""
assert ndim == 2, " < Region> "
self._ndim_ = ndim
self._name_ = name
self._domain_input_ = domain_input # can not remove this line
self.___PRIVATE_set_corner_coordinates___(cc)
self.___PRIVATE_set_edge_types___(et)
self.___PRIVATE_parse_edge_types___()
self._type_wrt_metric_ = self.___PRIVATE_PARSE_type_wrt_metric___()
self._interpolation_ = InterpolationSearcher(interpolator)(self)
self._edges_ = Edges(self)
self._MAP_ = None
self._IS_ = None
self._freeze_self_()
@property
def map(self):
return self._MAP_
@property
def IS(self):
if self._IS_ is None:
self._IS_ = _2dCSCG_Region_IS(self)
return self._IS_
@property
def edges(self):
return self._edges_
@property
def ndim(self):
return self._ndim_
@property
def name(self):
return self._name_
def ___PRIVATE_PARSE_type_wrt_metric___(self):
if self._domain_input_.region_type_wr2_metric is None:
return TypeWr2MetricGiver('chaotic')(self)
elif self.name in self._domain_input_.region_type_wr2_metric:
return TypeWr2MetricGiver(self._domain_input_.region_type_wr2_metric[self.name])(self)
else:
return TypeWr2MetricGiver('chaotic')(self)
@property
def type_wrt_metric(self):
return self._type_wrt_metric_
def ___PRIVATE_set_corner_coordinates___(self, cc):
"""
Parameters
----------
cc : tuple
For 2D:
np.shape(cc) must be (4, 2), and cc[i] corresponds to UL, DL, UR,
DR corners, and each i is a tuple itself represents (x, y)
coordinates.
"""
assert np.shape(cc) == (4, 2), \
" <Region> : coordinates shape={} wrong.".format(np.shape(cc))
self._corner_coordinates_ = cc
@property
def corner_coordinates(self):
return self._corner_coordinates_
@accepts('self', dict)
def ___PRIVATE_set_edge_types___(self, et):
"""
Parameters
----------
et : dict
contain the info of non-straight edges.
"""
_edge_types_ = [None for _ in range(self.num_edges())]
for key in et:
if key.split('-')[0] == self.name:
_edge_types_[self._edge_name_to_index_(key.split('-')[1])] = et[key]
self._edge_types_ = tuple(_edge_types_)
def ___PRIVATE_parse_edge_types___(self):
"""
Here we get the 4 edge geometries for further getting the region interpolation.
Attributes
----------
self._edge_geometries_ :
For 2D: U -> D -> L -> R
"""
_eg_ = {}
for i in range(self.num_edges()):
_et_ = self._edge_types_[i]
if _et_ is None: # when we do not mention, it is straight, not free.
_et_ = ('straight',)
_cc_ = np.array(self.corner_coordinates)[list(self._edge_corner_local_numbering_(i))]
_eg_[self._edge_index_to_name_(i)] = EdgeGeometryDispatcher(_et_)(_cc_)
self._edge_geometries_ = _eg_
@property
def interpolation(self):
return self._interpolation_ | [
"screws.decorators.accepts.accepts",
"objects.CSCG._2d.mesh.domain.regions.region.edges.main.Edges",
"objects.CSCG._2d.mesh.domain.regions.region.types_wrt_metric.allocator.TypeWr2MetricGiver",
"objects.CSCG._2d.mesh.domain.regions.region.edge_geometries.allocator.EdgeGeometryDispatcher",
"numpy.array",
"... | [((716, 741), 'screws.decorators.accepts.accepts', 'accepts', (['"""self"""', 'int', 'str'], {}), "('self', int, str)\n", (723, 741), False, 'from screws.decorators.accepts import accepts\n'), ((3200, 3221), 'screws.decorators.accepts.accepts', 'accepts', (['"""self"""', 'dict'], {}), "('self', dict)\n", (3207, 3221), False, 'from screws.decorators.accepts import accepts\n'), ((1647, 1658), 'objects.CSCG._2d.mesh.domain.regions.region.edges.main.Edges', 'Edges', (['self'], {}), '(self)\n', (1652, 1658), False, 'from objects.CSCG._2d.mesh.domain.regions.region.edges.main import Edges\n'), ((1582, 1617), 'objects.CSCG._2d.mesh.domain.regions.region.interpolations.allocator.InterpolationSearcher', 'InterpolationSearcher', (['interpolator'], {}), '(interpolator)\n', (1603, 1617), False, 'from objects.CSCG._2d.mesh.domain.regions.region.interpolations.allocator import InterpolationSearcher\n'), ((1886, 1909), 'objects.CSCG._2d.mesh.domain.regions.region.IS._2dCSCG_Region_IS', '_2dCSCG_Region_IS', (['self'], {}), '(self)\n', (1903, 1909), False, 'from objects.CSCG._2d.mesh.domain.regions.region.IS import _2dCSCG_Region_IS\n'), ((2964, 2976), 'numpy.shape', 'np.shape', (['cc'], {}), '(cc)\n', (2972, 2976), True, 'import numpy as np\n'), ((3051, 3063), 'numpy.shape', 'np.shape', (['cc'], {}), '(cc)\n', (3059, 3063), True, 'import numpy as np\n'), ((2257, 2286), 'objects.CSCG._2d.mesh.domain.regions.region.types_wrt_metric.allocator.TypeWr2MetricGiver', 'TypeWr2MetricGiver', (['"""chaotic"""'], {}), "('chaotic')\n", (2275, 2286), False, 'from objects.CSCG._2d.mesh.domain.regions.region.types_wrt_metric.allocator import TypeWr2MetricGiver\n'), ((4175, 4208), 'numpy.array', 'np.array', (['self.corner_coordinates'], {}), '(self.corner_coordinates)\n', (4183, 4208), True, 'import numpy as np\n'), ((4303, 4331), 'objects.CSCG._2d.mesh.domain.regions.region.edge_geometries.allocator.EdgeGeometryDispatcher', 'EdgeGeometryDispatcher', (['_et_'], {}), '(_et_)\n', (4325, 4331), False, 'from objects.CSCG._2d.mesh.domain.regions.region.edge_geometries.allocator import EdgeGeometryDispatcher\n'), ((2382, 2455), 'objects.CSCG._2d.mesh.domain.regions.region.types_wrt_metric.allocator.TypeWr2MetricGiver', 'TypeWr2MetricGiver', (['self._domain_input_.region_type_wr2_metric[self.name]'], {}), '(self._domain_input_.region_type_wr2_metric[self.name])\n', (2400, 2455), False, 'from objects.CSCG._2d.mesh.domain.regions.region.types_wrt_metric.allocator import TypeWr2MetricGiver\n'), ((2495, 2524), 'objects.CSCG._2d.mesh.domain.regions.region.types_wrt_metric.allocator.TypeWr2MetricGiver', 'TypeWr2MetricGiver', (['"""chaotic"""'], {}), "('chaotic')\n", (2513, 2524), False, 'from objects.CSCG._2d.mesh.domain.regions.region.types_wrt_metric.allocator import TypeWr2MetricGiver\n')] |
# Copyright 2015-2016 <NAME> and The University of British Columbia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Routines for loading SOG model output files into Pandas data structures
"""
from datetime import datetime, timedelta
from dateutil.parser import parse
import numpy as np
import pandas as pd
import xarray as xr
import carbonate as carb
def load_TS(filename):
'''Load the timeseries file from path FILENAME into a dataframe TS_OUT
'''
# Load timeseries file and extract headers
file_obj = open(filename, 'rt')
for index, line in enumerate(file_obj):
line = line.strip()
if line.startswith('*FieldNames:'):
field_names = line.split(': ', 1)[1].split(', ')
elif line.startswith('*FieldUnits:'):
field_units = line.split(': ', 1)[1].split(', ')
elif line.startswith('*EndOfHeader'):
break
# Read timeseries data into dataframe and assign header
data = pd.read_csv(
filename, delim_whitespace=True, header=0, names=field_names,
skiprows=index+1,
)
# Extract startdate and convert to MPL time
datetime_start = parse(
field_units[0].split('hr since ', 1)[1].split(' LST', 1)[0],
)
# Create date dataframe and append to DATA
date = pd.DataFrame({
'date': [
datetime_start + timedelta(hours=hour) for hour in data['time']
],
})
TS_out = pd.concat([date, data], axis=1).set_index('date').to_xarray()
return TS_out
def load_hoff(filename):
'''Load the hoffmueller file from path FILENAME into a panel HOFF_OUT
'''
# Load timeseries file and extract headers
file_obj = open(filename, 'rt')
for index, line in enumerate(file_obj):
line = line.strip()
if line.startswith('*FieldNames:'):
field_names = line.split(': ', 1)[1].split(', ')
elif line.startswith('*FieldUnits:'):
field_units = line.split(': ', 1)[1].split(', ')
elif line.startswith('*HoffmuellerStartYr:'):
year_start = line.split(': ', 1)[1]
elif line.startswith('*HoffmuellerStartDay:'):
day_start = line.split(': ', 1)[1]
elif line.startswith('*HoffmuellerStartSec:'):
sec_start = line.split(': ', 1)[1]
elif line.startswith('*HoffmuellerInterval:'):
interval = line.split(': ', 1)[1]
elif line.startswith('*EndOfHeader'):
break
# Read timeseries data into dataframe and assign header
data = pd.read_csv(filename, delim_whitespace=True, header=0,
names=field_names, skiprows=index, chunksize=82,
index_col=0)
# Timestamp in matplotlib time
datetime_start = datetime.strptime(
year_start + day_start, '%Y%j',
) + timedelta(seconds=int(sec_start))
# Extract dataframe chunks
datetime_index = []
data_list = []
for index, chunk in enumerate(data):
datetime_index.append(
datetime_start + timedelta(days=index*float(interval)),
)
data_list.append(chunk.to_xarray())
# Concatenate xarray dataset list along time axis
hoff_out = xr.concat(
data_list, dim=xr.DataArray(datetime_index, name='time', dims='time'),
)
return hoff_out
def loadSOG(filepath):
'''Loads SOG timeseries and hoffmueller files from FILEPATH and returns
Pandas dataframes PHYS_TS, BIO_TS, CHEM_TS, and panel HOFF
'''
# Load timeseries and hoff files from FILEPATH
phys_TS = load_TS(filepath + 'timeseries/std_phys_SOG.out')
bio_TS = load_TS(filepath + 'timeseries/std_bio_SOG.out')
chem_TS = load_TS(filepath + 'timeseries/std_chem_SOG.out')
hoff = load_hoff(filepath + 'profiles/hoff-SOG.dat')
# Construct depth array for calcs
depth_array = hoff.minor_axis.values
date_array = hoff.major_axis.values
depth, dummy = np.meshgrid(depth_array, np.ones(date_array.size))
# Calculate surface pH and Omega_A
pH_sur, Omega_A_sur = carb.calc_carbonate(
chem_TS['surface alkalinity'], # TAlk [uM]
chem_TS['surface DIC concentration'], # DIC [uM]
calc_sigma(phys_TS['surface temperature'],
phys_TS['surface salinity']), # sigma_t [kg m3]
phys_TS['surface salinity'], # salinity [PSS 78]
phys_TS['surface temperature'], # temperature [deg C]
0.0, # pressure [dbar]
bio_TS['surface nitrate concentration'] / 16, # phosphate [uM]
bio_TS['surface silicon concentration']) # silicate [uM]
# Calculate 3 m avg pH and Omega_A
pH_3m, Omega_A_3m = carb.calc_carbonate(
chem_TS['3 m avg alkalinity'], # TAlk [uM]
chem_TS['3 m avg DIC concentration'], # DIC [uM]
calc_sigma(phys_TS['3 m avg temperature'],
phys_TS['3 m avg salinity']), # sigma_t [kg m3]
phys_TS['3 m avg salinity'], # salinity [PSS 78]
phys_TS['3 m avg temperature'], # temperature [deg C]
0.0, # pressure [dbar]
bio_TS['3 m avg nitrate concentration'] / 16, # phosphate [uM]
bio_TS['3 m avg silicon concentration']) # silicate [uM]
# Calculate hoffmueller pH and Omega_A
hoff['pH'], hoff['Omega_A'] = carb.calc_carbonate(
hoff.ix['alkalinity', :, :], # TAlk [uM]
hoff.ix['dissolved inorganic carbon', :, :], # DIC [uM]
hoff.ix['sigma-t', :, :], # sigma_t [kg m3]
hoff.ix['salinity', :, :], # salinity [PSS 78]
hoff.ix['temperature', :, :], # temperature [deg C]
depth, # pressure [dbar]
hoff.ix['nitrate', :, :] / 16, # phosphate [uM]
hoff.ix['silicon', :, :]) # silicate [uM]
# Append pH and Omega timeseries to CHEM_TS
chem_TS = pd.concat([chem_TS, pd.DataFrame({
'surface pH': pH_sur,
'3 m avg pH': pH_3m,
'surface Omega_A': Omega_A_sur,
'3 m avg Omega_A': Omega_A_3m})], axis=1)
return phys_TS, bio_TS, chem_TS, hoff
def loadSOG_batch(filesystem, bloomyear, filestr):
'''Loads SOG timeseries and hoffmueller files given parameters FILESYSTEM,
BLOOMYEAR, and FILESTR and returns PHYS_TS, BIO_TS, CHEM_TS, and HOFF
'''
# Specify standard timeseries output paths
filepath = '/ocean/bmoorema/research/SOG/{0}/{1}/{2}/{3}/{3}_{4}/'.format(
filesystem['category'], filesystem['test'], filesystem['type'],
bloomyear, filestr)
# Load timeseries and hoffmueller files from FILEPATH
phys_TS, bio_TS, chem_TS, hoff = loadSOG(filepath)
return phys_TS, bio_TS, chem_TS, hoff
def calc_sigma(T, S):
'''Calculate and return density anomaly SIGMA_T given temperature T and
salinity S
'''
# Calculate the square root of the salinities
sqrtS = np.sqrt(S)
# Calculate the density profile at the grid point depths
# Pure water density at atmospheric pressure
# (<NAME>., (1967) Br. J. Applied Physics 8 pp 521-537)
R1 = ((((6.536332e-9 * T - 1.120083e-6) * T + 1.001685e-4) * T
- 9.095290e-3) * T + 6.793952e-2) * T - 28.263737
# Seawater density at atmospheric pressure
# Coefficients of salinity
R2 = (((5.3875e-9 * T - 8.2467e-7) * T + 7.6438e-5) * T
- 4.0899e-3) * T + 8.24493e-1
R3 = (-1.6546e-6 * T + 1.0227e-4) * T - 5.72466e-3
# International one-atmosphere equation of state of seawater
sig = (4.8314e-4 * S + R3 * sqrtS + R2) * S + R1
# Specific volume at atmospheric pressure
V350P = 1.0 / 1028.1063
sva = -sig * V350P / (1028.1063 + sig)
# Density anomoly at atmospheric pressure
sigma = 28.106331 - sva / (V350P * (V350P + sva))
return sigma
| [
"numpy.sqrt",
"numpy.ones",
"pandas.read_csv",
"datetime.datetime.strptime",
"carbonate.calc_carbonate",
"xarray.DataArray",
"pandas.DataFrame",
"datetime.timedelta",
"pandas.concat"
] | [((1462, 1559), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'delim_whitespace': '(True)', 'header': '(0)', 'names': 'field_names', 'skiprows': '(index + 1)'}), '(filename, delim_whitespace=True, header=0, names=field_names,\n skiprows=index + 1)\n', (1473, 1559), True, 'import pandas as pd\n'), ((3029, 3149), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'delim_whitespace': '(True)', 'header': '(0)', 'names': 'field_names', 'skiprows': 'index', 'chunksize': '(82)', 'index_col': '(0)'}), '(filename, delim_whitespace=True, header=0, names=field_names,\n skiprows=index, chunksize=82, index_col=0)\n', (3040, 3149), True, 'import pandas as pd\n'), ((5956, 6211), 'carbonate.calc_carbonate', 'carb.calc_carbonate', (["hoff.ix['alkalinity', :, :]", "hoff.ix['dissolved inorganic carbon', :, :]", "hoff.ix['sigma-t', :, :]", "hoff.ix['salinity', :, :]", "hoff.ix['temperature', :, :]", 'depth', "(hoff.ix['nitrate', :, :] / 16)", "hoff.ix['silicon', :, :]"], {}), "(hoff.ix['alkalinity', :, :], hoff.ix[\n 'dissolved inorganic carbon', :, :], hoff.ix['sigma-t', :, :], hoff.ix[\n 'salinity', :, :], hoff.ix['temperature', :, :], depth, hoff.ix[\n 'nitrate', :, :] / 16, hoff.ix['silicon', :, :])\n", (5975, 6211), True, 'import carbonate as carb\n'), ((7673, 7683), 'numpy.sqrt', 'np.sqrt', (['S'], {}), '(S)\n', (7680, 7683), True, 'import numpy as np\n'), ((3249, 3298), 'datetime.datetime.strptime', 'datetime.strptime', (['(year_start + day_start)', '"""%Y%j"""'], {}), "(year_start + day_start, '%Y%j')\n", (3266, 3298), False, 'from datetime import datetime, timedelta\n'), ((4450, 4474), 'numpy.ones', 'np.ones', (['date_array.size'], {}), '(date_array.size)\n', (4457, 4474), True, 'import numpy as np\n'), ((3723, 3777), 'xarray.DataArray', 'xr.DataArray', (['datetime_index'], {'name': '"""time"""', 'dims': '"""time"""'}), "(datetime_index, name='time', dims='time')\n", (3735, 3777), True, 'import xarray as xr\n'), ((6622, 6746), 'pandas.DataFrame', 'pd.DataFrame', (["{'surface pH': pH_sur, '3 m avg pH': pH_3m, 'surface Omega_A': Omega_A_sur,\n '3 m avg Omega_A': Omega_A_3m}"], {}), "({'surface pH': pH_sur, '3 m avg pH': pH_3m, 'surface Omega_A':\n Omega_A_sur, '3 m avg Omega_A': Omega_A_3m})\n", (6634, 6746), True, 'import pandas as pd\n'), ((1850, 1871), 'datetime.timedelta', 'timedelta', ([], {'hours': 'hour'}), '(hours=hour)\n', (1859, 1871), False, 'from datetime import datetime, timedelta\n'), ((1928, 1959), 'pandas.concat', 'pd.concat', (['[date, data]'], {'axis': '(1)'}), '([date, data], axis=1)\n', (1937, 1959), True, 'import pandas as pd\n')] |
import sys
try:
#TODO this is a hack, at minimum should be done s.t. it'll work for aaaany ros distribution
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
sys.path.append('/opt/ros/kinetic/lib/python2.7/dist-packages')
except Exception as e:
print(e)
print("no ros kinetic found in path")
import numpy as np
import rospy
import cv2
class KinovaHomography:
"""
class that handles transformations between kinova's coordinate system and image coordinate system
"""
def __init__(self, kinova_sender, kinova_listener,
control_joints, num_joints,
robot_preprocess=None,
num_pts=4, error_thresh=0.002,
save_homography='./homography.npy'):
self.num_pts = num_pts
self.kinova_sender = kinova_sender
self.kinova_listener = kinova_listener
self.control_joints = control_joints
self.num_joints = num_joints
self.error_thresh = error_thresh
self.robot_preprocess = self.default_robot_preprocess if robot_preprocess is None else robot_preprocess
self.save_homography = save_homography
self.find_transform()
def default_robot_preprocess(self, coord):
#default behavior is to just take 1st 2 coordinates
return np.array([coord[0], coord[1]])
def find_transform(self):
load_success = False
points_robot = []
points_image = []
try:
load_file = np.load(self.save_homography, allow_pickle=True).item()
print(load_file)
load_success = True
points_robot = load_file['robot']
points_image = load_file['image']
except Exception as e:
print("Could not find homography, recalibrating")
print("error:", e)
if not load_success:
N = self.num_pts
# collect four measurements
self.kinova_sender.stopMovement()
rospy.sleep(1.0)
#TODO: make this more general....
for i in np.linspace(0,N,N):
joint_angles = np.zeros(self.num_joints)
joint_angles[self.control_joints[0]] = (i - N/2)/ N* 2 * np.pi /8
joint_angles[self.control_joints[1]] = (i - N/2)/ N* 2 * np.pi /2
self.kinova_sender.send_joint_angles(joint_angles)
thetas = self.kinova_listener.get_thetas()
while np.any(np.abs(thetas - joint_angles) > self.error_thresh) and not rospy.is_shutdown():
thetas = self.kinova_listener.get_thetas()
rospy.sleep(0.1)
rospy.sleep(2.0)
robot = self.robot_preprocess(self.kinova_listener.get_cartesian_robot())
camera = self.kinova_listener.get_position()
points_robot.append(robot)
points_image.append(camera)
points_robot = np.array(points_robot,dtype=np.float32)
points_image = np.array(points_image,dtype=np.float32)
save_file = {'robot': points_robot, 'image': points_image}
np.save(self.save_homography, save_file)
self.imge2robot_mat = cv2.findHomography(points_image, points_robot)[0]
self.robot2image_mat = np.linalg.inv(self.imge2robot_mat)
print(self.transform(points_robot, self.robot2image_mat))
def transform(self,a,H):
b = H.dot( np.concatenate((a.T,np.ones((1,a.shape[0]))),axis=0)).T
b /= b[:,-1:]
return b[:,:-1]
def robot_to_image(self, coord):
return self.transform(coord, self.robot2image_mat)
def image_to_robot(self, coord):
return self.transform(coord, self.imge2robot_mat)
| [
"numpy.abs",
"numpy.ones",
"rospy.is_shutdown",
"cv2.findHomography",
"sys.path.remove",
"numpy.array",
"numpy.linalg.inv",
"numpy.linspace",
"numpy.zeros",
"rospy.sleep",
"numpy.load",
"sys.path.append",
"numpy.save"
] | [((116, 179), 'sys.path.remove', 'sys.path.remove', (['"""/opt/ros/kinetic/lib/python2.7/dist-packages"""'], {}), "('/opt/ros/kinetic/lib/python2.7/dist-packages')\n", (131, 179), False, 'import sys\n'), ((184, 247), 'sys.path.append', 'sys.path.append', (['"""/opt/ros/kinetic/lib/python2.7/dist-packages"""'], {}), "('/opt/ros/kinetic/lib/python2.7/dist-packages')\n", (199, 247), False, 'import sys\n'), ((1345, 1375), 'numpy.array', 'np.array', (['[coord[0], coord[1]]'], {}), '([coord[0], coord[1]])\n', (1353, 1375), True, 'import numpy as np\n'), ((3352, 3386), 'numpy.linalg.inv', 'np.linalg.inv', (['self.imge2robot_mat'], {}), '(self.imge2robot_mat)\n', (3365, 3386), True, 'import numpy as np\n'), ((2016, 2032), 'rospy.sleep', 'rospy.sleep', (['(1.0)'], {}), '(1.0)\n', (2027, 2032), False, 'import rospy\n'), ((2101, 2121), 'numpy.linspace', 'np.linspace', (['(0)', 'N', 'N'], {}), '(0, N, N)\n', (2112, 2121), True, 'import numpy as np\n'), ((2996, 3036), 'numpy.array', 'np.array', (['points_robot'], {'dtype': 'np.float32'}), '(points_robot, dtype=np.float32)\n', (3004, 3036), True, 'import numpy as np\n'), ((3063, 3103), 'numpy.array', 'np.array', (['points_image'], {'dtype': 'np.float32'}), '(points_image, dtype=np.float32)\n', (3071, 3103), True, 'import numpy as np\n'), ((3199, 3239), 'numpy.save', 'np.save', (['self.save_homography', 'save_file'], {}), '(self.save_homography, save_file)\n', (3206, 3239), True, 'import numpy as np\n'), ((3271, 3317), 'cv2.findHomography', 'cv2.findHomography', (['points_image', 'points_robot'], {}), '(points_image, points_robot)\n', (3289, 3317), False, 'import cv2\n'), ((2152, 2177), 'numpy.zeros', 'np.zeros', (['self.num_joints'], {}), '(self.num_joints)\n', (2160, 2177), True, 'import numpy as np\n'), ((2695, 2711), 'rospy.sleep', 'rospy.sleep', (['(2.0)'], {}), '(2.0)\n', (2706, 2711), False, 'import rospy\n'), ((1526, 1574), 'numpy.load', 'np.load', (['self.save_homography'], {'allow_pickle': '(True)'}), '(self.save_homography, allow_pickle=True)\n', (1533, 1574), True, 'import numpy as np\n'), ((2662, 2678), 'rospy.sleep', 'rospy.sleep', (['(0.1)'], {}), '(0.1)\n', (2673, 2678), False, 'import rospy\n'), ((2558, 2577), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (2575, 2577), False, 'import rospy\n'), ((3531, 3555), 'numpy.ones', 'np.ones', (['(1, a.shape[0])'], {}), '((1, a.shape[0]))\n', (3538, 3555), True, 'import numpy as np\n'), ((2499, 2528), 'numpy.abs', 'np.abs', (['(thetas - joint_angles)'], {}), '(thetas - joint_angles)\n', (2505, 2528), True, 'import numpy as np\n')] |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from extensions.middle.lstm_sequence_normalize import LSTMSequenceNormalize
from mo.utils.unittest.graph import compare_graphs, build_graph_with_attrs
from mo.graph.graph import Node
class LSTMSequenceNormalizeTest(unittest.TestCase):
def test_squeeze_num_directions(self):
tested_obj = LSTMSequenceNormalize()
pattern = tested_obj.pattern()
orig_shape = np.array([10, 1, 20, 128], dtype=np.int64) # seq_length, num_dims, batch_size, data_size
new_shape = np.array([10, 20, 128], dtype=np.int64)
graph = build_graph_with_attrs(
nodes_with_attrs=pattern['nodes'],
edges_with_attrs=pattern['edges'],
update_edge_attrs={
('W', 'lstm', 0): {'in': 1},
('R', 'lstm', 0): {'in': 2},
},
new_nodes_with_attrs=[
('output', {'shape': orig_shape}),
],
new_edges_with_attrs=[
('lstm', 'output', {'out': 0}),
],
)
lstm = Node(graph, 'lstm')
match = {'lstm': lstm}
tested_obj.squeeze_num_directions(graph, match)
self.assertTrue(np.array_equal(lstm.out_node(0).shape, new_shape))
reshape_node = lstm.out_node(0).out_node(0)
self.assertTrue(reshape_node.op == 'Reshape')
self.assertTrue(np.array_equal(reshape_node.dim, orig_shape))
self.assertTrue(reshape_node.out_node(0).id == 'output')
| [
"mo.utils.unittest.graph.build_graph_with_attrs",
"numpy.array",
"mo.graph.graph.Node",
"numpy.array_equal",
"extensions.middle.lstm_sequence_normalize.LSTMSequenceNormalize"
] | [((921, 944), 'extensions.middle.lstm_sequence_normalize.LSTMSequenceNormalize', 'LSTMSequenceNormalize', ([], {}), '()\n', (942, 944), False, 'from extensions.middle.lstm_sequence_normalize import LSTMSequenceNormalize\n'), ((1005, 1047), 'numpy.array', 'np.array', (['[10, 1, 20, 128]'], {'dtype': 'np.int64'}), '([10, 1, 20, 128], dtype=np.int64)\n', (1013, 1047), True, 'import numpy as np\n'), ((1115, 1154), 'numpy.array', 'np.array', (['[10, 20, 128]'], {'dtype': 'np.int64'}), '([10, 20, 128], dtype=np.int64)\n', (1123, 1154), True, 'import numpy as np\n'), ((1171, 1467), 'mo.utils.unittest.graph.build_graph_with_attrs', 'build_graph_with_attrs', ([], {'nodes_with_attrs': "pattern['nodes']", 'edges_with_attrs': "pattern['edges']", 'update_edge_attrs': "{('W', 'lstm', 0): {'in': 1}, ('R', 'lstm', 0): {'in': 2}}", 'new_nodes_with_attrs': "[('output', {'shape': orig_shape})]", 'new_edges_with_attrs': "[('lstm', 'output', {'out': 0})]"}), "(nodes_with_attrs=pattern['nodes'], edges_with_attrs=\n pattern['edges'], update_edge_attrs={('W', 'lstm', 0): {'in': 1}, ('R',\n 'lstm', 0): {'in': 2}}, new_nodes_with_attrs=[('output', {'shape':\n orig_shape})], new_edges_with_attrs=[('lstm', 'output', {'out': 0})])\n", (1193, 1467), False, 'from mo.utils.unittest.graph import compare_graphs, build_graph_with_attrs\n'), ((1651, 1670), 'mo.graph.graph.Node', 'Node', (['graph', '"""lstm"""'], {}), "(graph, 'lstm')\n", (1655, 1670), False, 'from mo.graph.graph import Node\n'), ((1963, 2007), 'numpy.array_equal', 'np.array_equal', (['reshape_node.dim', 'orig_shape'], {}), '(reshape_node.dim, orig_shape)\n', (1977, 2007), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import numpy as np
def ltr_parts(parts_dict):
# when we flip image left parts became right parts and vice versa. This is the list of parts to exchange each other.
leftParts = [ parts_dict[p] for p in ["Lsho", "Lelb", "Lwri", "Lhip", "Lkne", "Lank", "Leye", "Lear"] ]
rightParts = [ parts_dict[p] for p in ["Rsho", "Relb", "Rwri", "Rhip", "Rkne", "Rank", "Reye", "Rear"] ]
return leftParts,rightParts
class RmpeGlobalConfig:
width = 368
height = 368
stride = 8
parts = ["nose", "neck", "Rsho", "Relb", "Rwri", "Lsho", "Lelb", "Lwri", "Rhip", "Rkne", "Rank", "Lhip", "Lkne", "Lank", "Reye", "Leye", "Rear", "Lear"]
num_parts = len(parts)
parts_dict = dict(zip(parts, range(num_parts)))
parts += ["background"]
num_parts_with_background = len(parts)
leftParts, rightParts = ltr_parts(parts_dict)
# this numbers probably copied from matlab they are 1.. based not 0.. based
limb_from = [2, 9, 10, 2, 12, 13, 2, 3, 4, 3, 2, 6, 7, 6, 2, 1, 1, 15, 16]
limb_to = [9, 10, 11, 12, 13, 14, 3, 4, 5, 17, 6, 7, 8, 18, 1, 15, 16, 17, 18]
limbs_conn = zip(limb_from, limb_to)
limbs_conn = [(fr - 1, to - 1) for (fr, to) in limbs_conn]
paf_layers = 2*len(limbs_conn)
heat_layers = num_parts
num_layers = paf_layers + heat_layers + 1
paf_start = 0
heat_start = paf_layers
bkg_start = paf_layers + heat_layers
data_shape = (3, height, width) # 3, 368, 368
mask_shape = (height//stride, width//stride) # 46, 46
parts_shape = (num_layers, height//stride, width//stride) # 57, 46, 46
class TransformationParams:
target_dist = 0.6;
scale_prob = 0; # TODO: this is actually scale unprobability, i.e. 1 = off, 0 = always, not sure if it is a bug or not
scale_min = 0.5;
scale_max = 0.9;
max_rotate_degree = 40.
center_perterb_max = 40.
flip_prob = 0.5
sigma = 7.
paf_thre = 8. # it is original 1.0 * stride in this program
class RmpeCocoConfig:
parts = ['nose', 'Leye', 'Reye', 'Lear', 'Rear', 'Lsho', 'Rsho', 'Lelb',
'Relb', 'Lwri', 'Rwri', 'Lhip', 'Rhip', 'Lkne', 'Rkne', 'Lank',
'Rank']
num_parts = len(parts)
# for COCO neck is calculated like mean of 2 shoulders.
parts_dict = dict(zip(parts, range(num_parts)))
@staticmethod
def convert(joints):
result = np.zeros((joints.shape[0], RmpeGlobalConfig.num_parts, 3), dtype=np.float)
result[:,:,2]=2. # 2 - abstent, 1 visible, 0 - invisible
for p in RmpeCocoConfig.parts:
coco_id = RmpeCocoConfig.parts_dict[p]
global_id = RmpeGlobalConfig.parts_dict[p]
assert global_id!=1, "neck shouldn't be known yet"
result[:,global_id,:]=joints[:,coco_id,:]
neckG = RmpeGlobalConfig.parts_dict['neck']
RshoC = RmpeCocoConfig.parts_dict['Rsho']
LshoC = RmpeCocoConfig.parts_dict['Lsho']
# no neck in coco database, we calculate it as averahe of shoulders
# TODO: we use 0 - hidden, 1 visible, 2 absent - it is not coco values they processed by generate_hdf5
both_shoulders_known = (joints[:, LshoC, 2]<2) & (joints[:, RshoC, 2]<2)
result[both_shoulders_known, neckG, 0:2] = (joints[both_shoulders_known, RshoC, 0:2] +
joints[both_shoulders_known, LshoC, 0:2]) / 2
result[both_shoulders_known, neckG, 2] = np.minimum(joints[both_shoulders_known, RshoC, 2],
joints[both_shoulders_known, LshoC, 2])
return result
class RpmeMPIIConfig:
parts = ["HeadTop", "Neck", "RShoulder", "RElbow", "RWrist", "LShoulder", "LElbow", "LWrist", "RHip", "RKnee",
"RAnkle", "LHip", "LKnee", "LAnkle"]
numparts = len(parts)
#14 - Chest is calculated like "human center location provided by the annotated data"
@staticmethod
def convert(joints):
raise "Not implemented"
# more information on keypoints mapping is here
# https://github.com/ZheC/Realtime_Multi-Person_Pose_Estimation/issues/7
def check_layer_dictionary():
dct = RmpeGlobalConfig.parts[:]
dct = [None]*(RmpeGlobalConfig.num_layers-len(dct)) + dct
for (i,(fr,to)) in enumerate(RmpeGlobalConfig.limbs_conn):
name = "%s->%s" % (RmpeGlobalConfig.parts[fr], RmpeGlobalConfig.parts[to])
print(i, name)
x = i*2
y = i*2+1
assert dct[x] is None
dct[x] = name + ":x"
assert dct[y] is None
dct[y] = name + ":y"
print(dct)
if __name__ == "__main__":
check_layer_dictionary()
| [
"numpy.zeros",
"numpy.minimum"
] | [((2382, 2456), 'numpy.zeros', 'np.zeros', (['(joints.shape[0], RmpeGlobalConfig.num_parts, 3)'], {'dtype': 'np.float'}), '((joints.shape[0], RmpeGlobalConfig.num_parts, 3), dtype=np.float)\n', (2390, 2456), True, 'import numpy as np\n'), ((3453, 3548), 'numpy.minimum', 'np.minimum', (['joints[both_shoulders_known, RshoC, 2]', 'joints[both_shoulders_known, LshoC, 2]'], {}), '(joints[both_shoulders_known, RshoC, 2], joints[\n both_shoulders_known, LshoC, 2])\n', (3463, 3548), True, 'import numpy as np\n')] |
import numpy as np
import random
from scipy.misc import imresize
from PIL import Image
import math
# from torchvision import transforms
import torch
class MultiRescale(object):
"""MultiScale the input image in a sample by given scales.
Args:
scales_list (tuple or int): Desired output scale list.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
# height, width = sample['size'][0], sample['size'][1]
rescale = lambda x: Image.fromarray(imresize(x, self.output_size))
# if (height%8 is not 0) or (width%8 is not 0):
for k, v in sample.items():
if k is not 'name' and k is not 'size':
sample[k] = rescale(v)
return sample
class RandomCrop(object):
"""Crop the images randomly in a sample.
Args:
output_size (tuple or int): Desired output size. If int, square crop is made.
"""
random_crop_list = [320, 480, 640]
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self, sample):
random_crop_size = random.choice(self.random_crop_list)
trimap = sample['trimap']
trimap_ = np.asarray(trimap)
if (min(trimap_.shape) < random_crop_size):
h_start = w_start = 0
h_end = w_end = min(trimap_.shape)
else:
h_start, h_end, w_start, w_end = validUnknownRegion(trimap_, random_crop_size)
crop = lambda x: Image.fromarray(imresize(np.asarray(x)[h_start:h_end, w_start:w_end], self.output_size))
cropped_sample = {}
cropped_sample['name'] = sample['name']
for k, v in sample.items():
if k is not 'name':
cropped_sample[k] = crop(v)
return cropped_sample
class MultiToTensor(object):
def __call__(self, sample):
mean_ = (114., 121., 134.,)
def trans(x):
if x.mode == 'RGB':
x = torch.from_numpy(
np.transpose(np.asarray(x)-mean_, (2,0,1)) / 255.
).type(torch.FloatTensor)
else:
x = torch.from_numpy(
np.expand_dims(np.asarray(x), axis=0) / 255.
).type(torch.FloatTensor)
return x
sample_ = {}
sample_['name'] = sample['name']
sample_['size'] = sample['size']
for k, v in sample.items():
if k is not 'name' and k is not 'size':
sample_[k] = trans(v)
return sample_
def generate_gradient_map(grad, area=3):
## Generate gradient map based on computed gradient.
# This function is used to count the gradient pixels passed a certain small area.
# Parameters:
# grad: a gradient matrix
# area: small area to average
# Output:
# grad_map
num_pixel = int(area / 2)
col_ = grad.shape[1]
row_ = grad.shape[0] + 2*num_pixel
new_row = np.zeros([num_pixel, col_], dtype=np.float32)
new_col = np.zeros([row_, num_pixel], dtype=np.float32)
result = np.zeros_like(grad)
_tmp = np.r_[new_row, grad, new_row]
_tmp = np.c_[new_col, _tmp, new_col]
for i in range(grad.shape[0]):
for j in range(grad.shape[1]):
area_count = _tmp[i][j] + _tmp[i][j+1] + _tmp[i][j+2] +\
_tmp[i+1][j] + _tmp[i+1][j+1] + _tmp[i+1][j+2] +\
_tmp[i+2][j] + _tmp[i+2][j+1] + _tmp[i+2][j+2]
result[i][j] = area_count / (area **2)
return result
def getFileList(base, sub):
"""
Get file list of a directory:
Param:
base: base directory
sub: sub-directory name
Return:
a list of image file name
"""
path = os.path.join(base, sub)
files = os.listdir(path)
fileList = []
for f in files:
if (os.path.isfile(path + '/' + f)):
path_ = './' + sub
path_ = os.path.join(path_, f)
# add image file into list
fileList.append(path_)
return fileList
def candidateUnknownRegion(img):
'''
Propose a condidate of unknown region center randomly within the unknown area of img.
param:
img: trimap image
return:
an index for unknown region
'''
index = np.where(img == 128)
idx = random.choice([j for j in range(len(index[0]))])
return np.array(index)[:, idx]
def validUnknownRegion(img, output_size):
"""
Check wether the candidate unknown region is valid and return the index.
param:
img: trimap image
output_size: the desired output image size
return:
output the crop start and end index along h and w respectively.
"""
h_start = h_end = w_start = w_end = 0
cand = candidateUnknownRegion(img)
shape_ = img.shape
if (output_size == 320):
h_start = max(cand[0]-159, 0)
w_start = max(cand[1]-159, 0)
if (h_start+320 > shape_[0]):
h_start = shape_[0] - 320
if (w_start+320 > shape_[1]):
w_start = shape_[1] - 320
h_end = h_start + 320
w_end = w_start + 320
return h_start, h_end, w_start, w_end
elif (output_size == 480):
h_start = max(cand[0]-239, 0)
w_start = max(cand[1]-239, 0)
if (h_start+480 > shape_[0]):
h_start = shape_[0] - 480
if (w_start+480 > shape_[1]):
w_start = shape_[1] - 480
h_end = h_start + 480
w_end = w_start + 480
elif (output_size == 640):
h_start = max(cand[0]-319, 0)
w_start = max(cand[1]-319, 0)
if (h_start+640 > shape_[0]):
h_start = shape_[0] - 640
if (w_start+640 > shape_[1]):
w_start = shape_[1] - 640
h_end = h_start + 640
w_end = w_start + 640
return h_start, h_end, w_start, w_end
| [
"random.choice",
"numpy.where",
"numpy.asarray",
"numpy.array",
"numpy.zeros",
"scipy.misc.imresize",
"numpy.zeros_like"
] | [((3411, 3456), 'numpy.zeros', 'np.zeros', (['[num_pixel, col_]'], {'dtype': 'np.float32'}), '([num_pixel, col_], dtype=np.float32)\n', (3419, 3456), True, 'import numpy as np\n'), ((3471, 3516), 'numpy.zeros', 'np.zeros', (['[row_, num_pixel]'], {'dtype': 'np.float32'}), '([row_, num_pixel], dtype=np.float32)\n', (3479, 3516), True, 'import numpy as np\n'), ((3530, 3549), 'numpy.zeros_like', 'np.zeros_like', (['grad'], {}), '(grad)\n', (3543, 3549), True, 'import numpy as np\n'), ((4740, 4760), 'numpy.where', 'np.where', (['(img == 128)'], {}), '(img == 128)\n', (4748, 4760), True, 'import numpy as np\n'), ((1554, 1590), 'random.choice', 'random.choice', (['self.random_crop_list'], {}), '(self.random_crop_list)\n', (1567, 1590), False, 'import random\n'), ((1643, 1661), 'numpy.asarray', 'np.asarray', (['trimap'], {}), '(trimap)\n', (1653, 1661), True, 'import numpy as np\n'), ((4831, 4846), 'numpy.array', 'np.array', (['index'], {}), '(index)\n', (4839, 4846), True, 'import numpy as np\n'), ((752, 781), 'scipy.misc.imresize', 'imresize', (['x', 'self.output_size'], {}), '(x, self.output_size)\n', (760, 781), False, 'from scipy.misc import imresize\n'), ((1951, 1964), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1961, 1964), True, 'import numpy as np\n'), ((2647, 2660), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2657, 2660), True, 'import numpy as np\n'), ((2477, 2490), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (2487, 2490), True, 'import numpy as np\n')] |
import torch
from tqdm import tqdm
import os
import numpy as np
train_features=torch.load('train_features.pt')
n = len(list(np.load('/facebook/data/images/train_imlist.npy')))
print(n)
os.makedirs('/siim/sim_pt_256', exist_ok=True)
for i in tqdm(range(n)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.tensor(np.argpartition(np.array(a),-256)[0][-256:]),os.path.join('/siim/sim_pt_256',f'{i}_sim256.pt'))
for i in tqdm(range(n)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.tensor(np.argpartition(np.array(a),-512)[0][-512:]),os.path.join('/siim/sim_pt',f'{i}_sim512.pt'))
os.makedirs('/storage1/sim_pt',exist_ok=True)
if n < 65746:
for i in tqdm(range(n)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.argsort(a,descending=True)[0][:300],os.path.join('/storage1/sim_pt', f'{i}_sim2000.pt'))
else:
for i in tqdm(range(65746)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.argsort(a,descending=True)[0][:300],os.path.join('/storage1/sim_pt', f'{i}_sim2000.pt'))
for i in tqdm(range(65746,1000000)):
a=torch.mm(train_features[i:i+1],train_features.t())
torch.save(torch.tensor(np.argpartition(np.array(a),-24)[0][-24:]), os.path.join('/storage1/sim_pt',f'{i}_sim2000.pt'))
| [
"os.makedirs",
"torch.load",
"os.path.join",
"numpy.array",
"torch.argsort",
"numpy.load"
] | [((79, 110), 'torch.load', 'torch.load', (['"""train_features.pt"""'], {}), "('train_features.pt')\n", (89, 110), False, 'import torch\n'), ((185, 231), 'os.makedirs', 'os.makedirs', (['"""/siim/sim_pt_256"""'], {'exist_ok': '(True)'}), "('/siim/sim_pt_256', exist_ok=True)\n", (196, 231), False, 'import os\n'), ((641, 687), 'os.makedirs', 'os.makedirs', (['"""/storage1/sim_pt"""'], {'exist_ok': '(True)'}), "('/storage1/sim_pt', exist_ok=True)\n", (652, 687), False, 'import os\n'), ((124, 173), 'numpy.load', 'np.load', (['"""/facebook/data/images/train_imlist.npy"""'], {}), "('/facebook/data/images/train_imlist.npy')\n", (131, 173), True, 'import numpy as np\n'), ((387, 437), 'os.path.join', 'os.path.join', (['"""/siim/sim_pt_256"""', 'f"""{i}_sim256.pt"""'], {}), "('/siim/sim_pt_256', f'{i}_sim256.pt')\n", (399, 437), False, 'import os\n'), ((593, 639), 'os.path.join', 'os.path.join', (['"""/siim/sim_pt"""', 'f"""{i}_sim512.pt"""'], {}), "('/siim/sim_pt', f'{i}_sim512.pt')\n", (605, 639), False, 'import os\n'), ((852, 903), 'os.path.join', 'os.path.join', (['"""/storage1/sim_pt"""', 'f"""{i}_sim2000.pt"""'], {}), "('/storage1/sim_pt', f'{i}_sim2000.pt')\n", (864, 903), False, 'import os\n'), ((1066, 1117), 'os.path.join', 'os.path.join', (['"""/storage1/sim_pt"""', 'f"""{i}_sim2000.pt"""'], {}), "('/storage1/sim_pt', f'{i}_sim2000.pt')\n", (1078, 1117), False, 'import os\n'), ((1297, 1348), 'os.path.join', 'os.path.join', (['"""/storage1/sim_pt"""', 'f"""{i}_sim2000.pt"""'], {}), "('/storage1/sim_pt', f'{i}_sim2000.pt')\n", (1309, 1348), False, 'import os\n'), ((810, 843), 'torch.argsort', 'torch.argsort', (['a'], {'descending': '(True)'}), '(a, descending=True)\n', (823, 843), False, 'import torch\n'), ((1024, 1057), 'torch.argsort', 'torch.argsort', (['a'], {'descending': '(True)'}), '(a, descending=True)\n', (1037, 1057), False, 'import torch\n'), ((358, 369), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (366, 369), True, 'import numpy as np\n'), ((564, 575), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (572, 575), True, 'import numpy as np\n'), ((1269, 1280), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (1277, 1280), True, 'import numpy as np\n')] |
"""Functions for plotting data files."""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
def rivet_paths(file_name):
"""Return all :py:mod:`rivet` paths found at file_name."""
from . import yodaplot
return yodaplot.data_object_names(file_name)
def gridplot(file_name, uses_rivet_plot_info=True):
"""Convenience function to plot all :py:mod:`yoda` data objects
from a :py:mod:`yoda` file into a subplots grid.
:param str file_name: The path to the :py:mod:`yoda` file.
:return: fig, axes_list
"""
all_rivet_paths = rivet_paths(file_name)
# setup axes
if len(all_rivet_paths) == 1:
fig, axes_list = plt.subplots()
else:
ncols = 2
nrows = (len(all_rivet_paths) - 1) / ncols + 1
fig, axes_list = plt.subplots(nrows, ncols, squeeze=False)
# plot into axes
for rivet_path, axes in zip(all_rivet_paths, np.ravel(axes_list)):
plt.sca(axes)
plot(file_name, rivet_path, uses_rivet_plot_info=uses_rivet_plot_info)
return fig, axes_list
def plot(filename_or_data_object, rivet_path,
uses_rivet_plot_info=True, errors_enabled=None,
**kwargs):
"""Plot a :py:mod:`yoda` data object, potentially from a :py:mod:`yoda` file."""
from . import yodaplot
print("Plotting", rivet_path, end="")
if isinstance(filename_or_data_object, str):
print(" from", filename_or_data_object, "...")
else:
print()
if uses_rivet_plot_info and errors_enabled is None:
from . import rivetplot
errors_enabled = rivetplot.errors_enabled(rivet_path)
else:
errors_enabled = True if errors_enabled is None else errors_enabled
if "rebin_count" in kwargs:
rebin_count = kwargs["rebin_count"]
del kwargs["rebin_count"]
else:
if uses_rivet_plot_info:
from . import rivetplot
rebin_count = rivetplot.rebin_count(rivet_path)
else:
rebin_count = 1
result = yodaplot.plot(filename_or_data_object, rivet_path,
errors_enabled=errors_enabled,
rebin_count=rebin_count,
**kwargs)
if uses_rivet_plot_info:
from . import rivetplot
rivetplot.apply_plot_info(rivet_path)
return result
| [
"numpy.ravel",
"matplotlib.pyplot.sca",
"matplotlib.pyplot.subplots"
] | [((698, 712), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (710, 712), True, 'import matplotlib.pyplot as plt\n'), ((821, 862), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {'squeeze': '(False)'}), '(nrows, ncols, squeeze=False)\n', (833, 862), True, 'import matplotlib.pyplot as plt\n'), ((934, 953), 'numpy.ravel', 'np.ravel', (['axes_list'], {}), '(axes_list)\n', (942, 953), True, 'import numpy as np\n'), ((964, 977), 'matplotlib.pyplot.sca', 'plt.sca', (['axes'], {}), '(axes)\n', (971, 977), True, 'import matplotlib.pyplot as plt\n')] |
from enum import Enum
import numpy as np
import scipy.stats as scistats
import matplotlib.pyplot as plt
from tqdm import tqdm
class Noise(Enum):
NORMAL = 1
CAUCHY = 2
def generate(
n_steps=101,
plate_width=10,
plate_height=10,
dx=0.1,
dy=0.1,
thermal_diffusivity=4,
temperature_cool=0,
temperature_hot=700,
ring_params=(2, 5, 5),
trans_stoch=None,
trans_pdf=Noise.NORMAL,
emis_stoch=None,
emis_pdf=Noise.NORMAL
):
"""
Generate diffusion heatmap with specified initialization parameters.
Args:
n_steps (int): Total timesteps.
plate_width (int): Plate width.
plate_height (int): Plate height.
dx (float): x difference.
dy (float): y difference.
thermal_diffusivity (float): Thermal diffusivity constant.
temperature_cool (float): Base temperature.
temperature_hot (float): Hot temperature.
ring_params (float, float, float): Positional and size parameters of heat ring.
r (float): Radius of the ring.
cx (float): x position of center of ring.
cy (float): y position of center of ring.
trans_stoch (loc (float), scale (float)): Add gaussian noise on ODE transition function.
trans_pdf (Noise): Type of transition noise pdf
emis_stoch (loc (float), scale (float)): Add gaussian noise on emission function.
emis_pdf (Noise): Type of emission noise pdf
Return:
latents (np.array): Generated latent data.
obs (np.array): Generated observation data.
dt (float): Time difference between propagation.
"""
# Plate width and height in pixel space
nx, ny = int(plate_width / dx), int(plate_height / dy)
nvar = nx * ny
squared_dx = dx * dx
squared_dy = dy * dy
dt = squared_dx * squared_dy / \
(2 * thermal_diffusivity * (squared_dx + squared_dy)) # Maximum time difference
u_0 = temperature_cool * np.ones((nx, ny))
x_0 = u_0.copy()
# Initial conditions of ring
# Inner radius r, width dr centered at (cx, cy) (mm)
u_0 = add_ring(u_0, dx, dy, temperature=temperature_hot,
ring_params=ring_params)
if emis_stoch is not None:
x_0 = u_0 + generate_noise(emis_stoch, nx, ny)
# Solve equation with time-difference
latents = [u_0]
obs = [x_0]
for i in tqdm(range(n_steps)):
u_0 = do_timestep(u_0, dt, squared_dx,
squared_dy, thermal_diffusivity)
if trans_stoch is not None:
u_0 = u_0 + generate_noise(trans_stoch, nx, ny, type=trans_pdf)
if emis_stoch is not None:
x_0 = u_0 + generate_noise(emis_stoch, nx, ny, type=emis_pdf)
# Clip value below 0
u_0 = np.clip(u_0, temperature_cool, temperature_hot + 300)
x_0 = np.clip(x_0, temperature_cool, temperature_hot + 300)
latents.append(u_0)
obs.append(x_0)
data = np.array(latents)
obs = np.array(obs)
return data, obs, dt
def add_ring(heatmap, dx, dy, ring_params, temperature=700):
"""
Add ring of heat inside a specified heatmap.
Args:
heatmap (np.array): Heatmap that encompasses the plate.
dx (int): x difference.
dy (int): y difference.
ring_params (float, float, float): Positional and size parameter of heat ring.
r (float): Radius of the ring.
cx (float): x position of center of ring.
cy (float): y position of center of ring.
temperature (float): Ring temperature.
Return:
heatmap (np.array): Plate with added ring of heat.
"""
r, cx, cy = ring_params
squared_r = r ** 2
for i in range(heatmap.shape[0]):
for j in range(heatmap.shape[1]):
squared_p = (i * dx - cx) ** 2 + (j * dy - cy) ** 2
if squared_p < squared_r:
heatmap[i, j] = temperature
heatmap = zero_border(heatmap)
return heatmap
def do_timestep(u_prev, dt, squared_dx, squared_dy, thermal_diffusivity):
"""
Propagate equation with forward-difference method in time and central-difference method in space.
Args:
u_prev (np.array): Previous heatmap state.
Return:
u (np.array): Propagated heatmap state.
"""
nx = u_prev.shape[0]
ny = u_prev.shape[1]
u = np.empty((nx, ny))
u[1:-1, 1:-1] = u_prev[1:-1, 1:-1] + thermal_diffusivity * dt * (
(
u_prev[2:, 1:-1] - 2 * u_prev[1:-1, 1:-1] + u_prev[:-2, 1:-1]
) / squared_dx +
(
u_prev[1:-1, 2:] - 2 * u_prev[1:-1, 1:-1] + u_prev[1:-1, :-2]
) / squared_dy
)
return u
def generate_noise(params, nx, ny, type=Noise.NORMAL, is_zero_border=True):
"""
Generate noise according to specified pdf.
Args:
params (float, float): Mean and variance.
nx (int): Width of the plate.
ny (int): Height of the plate.
type (Noise): Type of pdf of the noise
zero_border (bool): Zeroes all the border pixels
Return:
noise (np.array): Generated noise.
"""
nvar = nx * ny
if type == Noise.NORMAL:
noise = np.random.normal(
params[0], params[1], nvar).reshape(nx, ny)
elif type == Noise.CAUCHY:
noise = scistats.cauchy.rvs(params[0], params[1], nvar).reshape(nx, ny)
if is_zero_border:
noise = zero_border(noise)
return noise
def zero_border(plate):
"""
Set the border of the plate into zero
Args:
plate (np.array): Plate to be zero-bordered
Return:
plate (np.array): Zero-bordered plate
"""
ny, nx = plate.shape
plate[0, :] = np.zeros(ny)
plate[-1, :] = np.zeros(ny)
plate[:, 0] = np.zeros(nx)
plate[:, -1] = np.zeros(nx)
return plate
def main():
latent, obs = generate()
if __name__ == '__main__':
main()
| [
"numpy.clip",
"numpy.random.normal",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"scipy.stats.cauchy.rvs"
] | [((2971, 2988), 'numpy.array', 'np.array', (['latents'], {}), '(latents)\n', (2979, 2988), True, 'import numpy as np\n'), ((2999, 3012), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (3007, 3012), True, 'import numpy as np\n'), ((4373, 4391), 'numpy.empty', 'np.empty', (['(nx, ny)'], {}), '((nx, ny))\n', (4381, 4391), True, 'import numpy as np\n'), ((5715, 5727), 'numpy.zeros', 'np.zeros', (['ny'], {}), '(ny)\n', (5723, 5727), True, 'import numpy as np\n'), ((5747, 5759), 'numpy.zeros', 'np.zeros', (['ny'], {}), '(ny)\n', (5755, 5759), True, 'import numpy as np\n'), ((5778, 5790), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (5786, 5790), True, 'import numpy as np\n'), ((5810, 5822), 'numpy.zeros', 'np.zeros', (['nx'], {}), '(nx)\n', (5818, 5822), True, 'import numpy as np\n'), ((1975, 1992), 'numpy.ones', 'np.ones', (['(nx, ny)'], {}), '((nx, ny))\n', (1982, 1992), True, 'import numpy as np\n'), ((2784, 2837), 'numpy.clip', 'np.clip', (['u_0', 'temperature_cool', '(temperature_hot + 300)'], {}), '(u_0, temperature_cool, temperature_hot + 300)\n', (2791, 2837), True, 'import numpy as np\n'), ((2852, 2905), 'numpy.clip', 'np.clip', (['x_0', 'temperature_cool', '(temperature_hot + 300)'], {}), '(x_0, temperature_cool, temperature_hot + 300)\n', (2859, 2905), True, 'import numpy as np\n'), ((5202, 5246), 'numpy.random.normal', 'np.random.normal', (['params[0]', 'params[1]', 'nvar'], {}), '(params[0], params[1], nvar)\n', (5218, 5246), True, 'import numpy as np\n'), ((5323, 5370), 'scipy.stats.cauchy.rvs', 'scistats.cauchy.rvs', (['params[0]', 'params[1]', 'nvar'], {}), '(params[0], params[1], nvar)\n', (5342, 5370), True, 'import scipy.stats as scistats\n')] |
import unittest
import numpy as np
from iScore.graphrank.graph import iscore_graph
from iScore.graphrank.kernel import Kernel
class TestKernel(unittest.TestCase):
"""Test the kernels."""
def test_kernel(self):
# init and load the data
ker = Kernel(
testIDs="./kernel/testID.lst",
trainIDs="./kernel/trainID.lst",
test_graph="./kernel/graph/",
train_graph="./kernel/graph/",
)
ker.import_from_mat()
# get the path of the check file
checkfile = ker.get_check_file(fname="./kernel/check/K.mat")
# run the calculations
check_values = ker.run(lamb=1.0, walk=4, check=checkfile)
if not np.all(check_values):
raise AssertionError()
def setUp(self):
# create all the graphs of the pdb in ./pdb/
iscore_graph(
pdb_path="./kernel/pdb/",
pssm_path="./kernel/pssm/",
outdir="./kernel/graph/",
)
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"numpy.all",
"iScore.graphrank.kernel.Kernel",
"iScore.graphrank.graph.iscore_graph"
] | [((1030, 1045), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1043, 1045), False, 'import unittest\n'), ((271, 406), 'iScore.graphrank.kernel.Kernel', 'Kernel', ([], {'testIDs': '"""./kernel/testID.lst"""', 'trainIDs': '"""./kernel/trainID.lst"""', 'test_graph': '"""./kernel/graph/"""', 'train_graph': '"""./kernel/graph/"""'}), "(testIDs='./kernel/testID.lst', trainIDs='./kernel/trainID.lst',\n test_graph='./kernel/graph/', train_graph='./kernel/graph/')\n", (277, 406), False, 'from iScore.graphrank.kernel import Kernel\n'), ((857, 954), 'iScore.graphrank.graph.iscore_graph', 'iscore_graph', ([], {'pdb_path': '"""./kernel/pdb/"""', 'pssm_path': '"""./kernel/pssm/"""', 'outdir': '"""./kernel/graph/"""'}), "(pdb_path='./kernel/pdb/', pssm_path='./kernel/pssm/', outdir=\n './kernel/graph/')\n", (869, 954), False, 'from iScore.graphrank.graph import iscore_graph\n'), ((717, 737), 'numpy.all', 'np.all', (['check_values'], {}), '(check_values)\n', (723, 737), True, 'import numpy as np\n')] |
from flask import Flask, render_template, request
import jsonify
import requests
import pickle
import numpy as np
import sklearn
from sklearn.preprocessing import StandardScaler
app = Flask(__name__)
model = pickle.load(open('random_forest_regression_model_car_prices.pkl','rb'))
@app.route('/',methods=['GET'])
def HOME():
return render_template('index.html')
standard_to = StandardScaler()
@app.route("/predict", methods = ['POST'])
def predict():
Fuel_Type_Diesel = 0
if request.method=='POST':
Year = int(request.form['Year'])
Present_Price=float(request.form['Present_Price'])
Kms_Driven=int(request.form['Kms_Driven'])
KMS_Driven2=np.log(Kms_Driven)
Owner = int(request.form['Owner'])
Fuel_Type_Petrol=request.form['Fuel_Type_Petrol']
if(Fuel_Type_Petrol=='Petrol'):
Fuel_Type_Petrol=1
Fuel_Type_Diesel=0
elif(Fuel_Type_Petrol=='Diesel'):
Fuel_Type_Petrol=0
Fuel_Type_Diesel=1
else:
Fuel_Type_Petrol=0
Fuel_Type_Diesel=0
Year = 2020-Year
Seller_Type_Individual = request.form['Seller_Type_Individual']
if(Seller_Type_Individual=='Individual'):
Seller_Type_Individual=1
else:
Seller_Type_Individual=0
Transmission_Manual = request.form['Transmission_Manual']
if(Transmission_Manual == 'Manual'):
Transmission_Manual=1
else:
Transmission_Manual=0
prediction=model.predict([[Present_Price,KMS_Driven2,Owner,Year,Fuel_Type_Diesel,Fuel_Type_Petrol,Seller_Type_Individual,Transmission_Manual]])
output=round(prediction[0],2)
if output<0:
return render_template('index.html',prediction_texts="Sorry you can not sell the car")
else:
return render_template('index.html',prediction_text="You can sell the car at {}".format(output))
else:
return render_template('index.html')
if __name__=="__main__":
app.run(debug=True) | [
"sklearn.preprocessing.StandardScaler",
"flask.render_template",
"numpy.log",
"flask.Flask"
] | [((191, 206), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (196, 206), False, 'from flask import Flask, render_template, request\n'), ((399, 415), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (413, 415), False, 'from sklearn.preprocessing import StandardScaler\n'), ((352, 381), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (367, 381), False, 'from flask import Flask, render_template, request\n'), ((711, 729), 'numpy.log', 'np.log', (['Kms_Driven'], {}), '(Kms_Driven)\n', (717, 729), True, 'import numpy as np\n'), ((2077, 2106), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (2092, 2106), False, 'from flask import Flask, render_template, request\n'), ((1835, 1920), 'flask.render_template', 'render_template', (['"""index.html"""'], {'prediction_texts': '"""Sorry you can not sell the car"""'}), "('index.html', prediction_texts='Sorry you can not sell the car'\n )\n", (1850, 1920), False, 'from flask import Flask, render_template, request\n')] |
import os
import pickle
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
import colorsys
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
number2name_ml10 = {
0: 'reach-v1',
1: 'push-v1',
2: 'pick-place-v1',
3: 'door-open-v1',
4: 'drawer-close-v1',
5: 'button-press-topdown-v1',
6: 'peg-insert-side-v1',
7: 'window-open-v1',
8: 'sweep-v1',
9: 'basketball-v1',
10: 'drawer-open-v1',
11: 'door-close-v1',
12: 'shelf-place-v1',
13: 'sweep-into-v1',
14: 'lever-pull-v1'}
number2name_cheetah_multi_task = {
1: 'velocity',
2: 'goal direction',
3: 'goal',
4: 'rollover',
5: 'stand-up'}
number2name = number2name_cheetah_multi_task
cycle = plt.rcParams['axes.prop_cycle'].by_key()['color']
def plot_encodings_split_with_rewards(epoch, exp_directory, save=False, normalize=False, legend=False):
encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb"))
base_tasks = list(encoding_storage.keys())
#rewards_per_base_task = [sum([encoding_storage[base][key]['reward_mean'] / len(list(encoding_storage[base].keys())) for key in encoding_storage[base].keys()]) for base in base_tasks]
if len(base_tasks) == 15:
figsize = (20, 5)
elif len(base_tasks) == 10:
figsize = (15, 5)
elif len(base_tasks) == 1:
figsize = (7, 5)
elif len(base_tasks) == 3:
figsize = (7, 5)
else:
figsize = None
fig, axes_tuple = plt.subplots(nrows=3, ncols=len(base_tasks), sharex='col', sharey='row', gridspec_kw={'height_ratios': [3, 1, 1]}, figsize=figsize)
if len(axes_tuple.shape) == 1:
axes_tuple = np.expand_dims(axes_tuple, 1)
latent_dim = encoding_storage[base_tasks[0]][next(iter(encoding_storage[base_tasks[0]]))]['mean'].shape[0]
# Normalization over base tasks of dim
if normalize:
normalizer = []
mean_std = ['mean', 'std']
for dim in range(latent_dim):
temp_dict = {}
for element in mean_std:
values = np.array([a[element][dim] for base in base_tasks for a in list(encoding_storage[base].values())])
temp_dict[element] = dict(mean=values.mean(), std=values.std())
normalizer.append(temp_dict)
for i, base in enumerate(base_tasks):
# encodings
#target_values = np.array([encoding_storage[base][key]['target'][2] for key in encoding_storage[base].keys()])
#sort_indices = np.argsort(target_values)
for dim in range(latent_dim):
x_values = np.array([a['mean'][dim] for a in list(encoding_storage[base].values())])#[sort_indices]
y_values = np.array([a['std'][dim] for a in list(encoding_storage[base].values())])#[sort_indices]
#Normalize
if normalize:
x_values = (x_values - normalizer[dim]['mean']['mean']) / (normalizer[dim]['mean']['std'] + 1e-9)
y_values = (y_values - normalizer[dim]['std']['mean']) / (normalizer[dim]['std']['std'] + 1e-9)
label_string = "Encoding $z_" + str(dim) + "$"
#axes_tuple[0][i].errorbar(target_values[sort_indices], x_values, yerr=y_values, fmt=".", label=label_string)
axes_tuple[0][i].errorbar(np.array(list(encoding_storage[base].keys())), x_values, yerr=y_values, fmt=".", label=label_string)#, capsize=2
if axes_tuple.shape[1] > 1:
#axes_tuple[0][i].set_title("Base Task " + str(i))
nameWithoutVersion = '-'.join(number2name[base].split('-')[:-1])
if len(nameWithoutVersion.split('-')) > 2:
split_name = '-'.join(nameWithoutVersion.split('-')[:2]) + " \n " + '-'.join(nameWithoutVersion.split('-')[2:])
else:
split_name = nameWithoutVersion
axes_tuple[0][i].set_title(split_name)
else:
axes_tuple[0][i].set_title("Epoch " + str(epoch), fontsize=14)
# rewards
#axes_tuple[2][i].plot(np.array(list(encoding_storage[base].keys())), [encoding_storage[base][i]['reward_mean'] for i in encoding_storage[base].keys()], 'x')
axes_tuple[2][i].bar(np.array(list(encoding_storage[base].keys())), [encoding_storage[base][i]['reward_mean'] for i in encoding_storage[base].keys()], width=0.01, align='center')
# base task encodings
#axes_tuple[1][i].plot(target_values[sort_indices], [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Base encoding $\mathbf{y}$")
axes_tuple[1][i].plot(list(encoding_storage[base].keys()), [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Base encoding $\mathbf{y}$")
axes_tuple[1][i].set_xlabel("Specification", fontsize=12)
axes_tuple[1][i].set_yticks(np.arange(-1, len(base_tasks), 1), minor=True)
axes_tuple[1][0].set_ylim(-1, 10) #len(base_tasks)
axes_tuple[0][i].grid()
axes_tuple[1][i].grid(which='minor')
axes_tuple[1][i].grid(which='major')
axes_tuple[2][i].grid()
axes_tuple[0][0].set_ylabel('Encoding $\mathbf{z}$', fontsize=12)
axes_tuple[1][0].set_ylabel('Base task \n encoding $\mathbf{y}$', fontsize=12)
axes_tuple[2][0].set_ylabel('Average \n reward $R$', fontsize=12)
if legend:
axes_tuple[0][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True)
axes_tuple[1][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True)
if save:
plt.tight_layout()
fig.savefig(exp_directory + "/encoding_epoch_" + str(epoch) + ("_normalized" if normalize else "") + "_with_rewards" + ".pdf", format="pdf")
fig.show()
# print("Here to create plot 1")
print("Created plot")
def plot_encodings_split_with_rewards_cheetah(epoch, exp_directory, save=False, normalize=False, legend=False):
encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb"))
base_tasks = list(encoding_storage.keys())
#rewards_per_base_task = [sum([encoding_storage[base][key]['reward_mean'] / len(list(encoding_storage[base].keys())) for key in encoding_storage[base].keys()]) for base in base_tasks]
if len(base_tasks) == 15:
figsize = (20, 5)
elif len(base_tasks) == 10:
figsize = (15, 5)
elif len(base_tasks) == 1:
figsize = (7, 5)
elif len(base_tasks) == 3:
figsize = (7, 5)
else:
figsize = None
fig, axes_tuple = plt.subplots(nrows=3, ncols=len(base_tasks), sharex='col', sharey='row', gridspec_kw={'height_ratios': [3, 1, 1]}, figsize=figsize)
if len(axes_tuple.shape) == 1:
axes_tuple = np.expand_dims(axes_tuple, 1)
latent_dim = encoding_storage[base_tasks[0]][next(iter(encoding_storage[base_tasks[0]]))]['mean'].shape[0]
# Normalization over base tasks of dim
if normalize:
normalizer = []
mean_std = ['mean', 'std']
for dim in range(latent_dim):
temp_dict = {}
for element in mean_std:
values = np.array([a[element][dim] for base in base_tasks for a in list(encoding_storage[base].values())])
temp_dict[element] = dict(mean=values.mean(), std=values.std())
normalizer.append(temp_dict)
for i, base in enumerate(base_tasks):
# encodings
#target_values = np.array([encoding_storage[base][key]['target'][2] for key in encoding_storage[base].keys()])
#sort_indices = np.argsort(target_values)
for dim in range(latent_dim):
x_values = np.array([a['mean'][dim] for a in list(encoding_storage[base].values())])#[sort_indices]
y_values = np.array([a['std'][dim] for a in list(encoding_storage[base].values())])#[sort_indices]
#Normalize
if normalize:
x_values = (x_values - normalizer[dim]['mean']['mean']) / (normalizer[dim]['mean']['std'] + 1e-9)
y_values = (y_values - normalizer[dim]['std']['mean']) / (normalizer[dim]['std']['std'] + 1e-9)
label_string = "Encoding $z_" + str(dim) + "$"
#axes_tuple[0][i].errorbar(target_values[sort_indices], x_values, yerr=y_values, fmt=".", label=label_string)
axes_tuple[0][i].errorbar(np.array(list(encoding_storage[base].keys())), x_values, yerr=y_values, fmt=".", label=label_string)#, capsize=2
if axes_tuple.shape[1] > 1:
#axes_tuple[0][i].set_title("Base Task " + str(i))
nameWithoutVersion = '-'.join(number2name[base].split('-')[:-1])
if len(nameWithoutVersion.split('-')) > 2:
split_name = '-'.join(nameWithoutVersion.split('-')[:2]) + " \n " + '-'.join(nameWithoutVersion.split('-')[2:])
else:
split_name = nameWithoutVersion
split_name = number2name[base]
axes_tuple[0][i].set_title(split_name)
else:
axes_tuple[0][i].set_title("Epoch " + str(epoch), fontsize=14)
# rewards
#axes_tuple[2][i].plot(np.array(list(encoding_storage[base].keys())), [encoding_storage[base][i]['reward_mean'] for i in encoding_storage[base].keys()], 'x')
axes_tuple[2][i].bar(np.array(list(encoding_storage[base].keys())), [encoding_storage[base][i]['reward_mean'] for i in encoding_storage[base].keys()], width=0.01, align='center')
# base task encodings
#axes_tuple[1][i].plot(target_values[sort_indices], [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Base encoding $\mathbf{y}$")
axes_tuple[1][i].plot(list(encoding_storage[base].keys()), [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Base encoding $\mathbf{y}$")
axes_tuple[1][i].set_xlabel("Specification", fontsize=12)
axes_tuple[1][i].set_yticks(np.arange(-1, len(base_tasks), 1), minor=True)
axes_tuple[1][0].set_ylim(-1, 10) #len(base_tasks)
axes_tuple[0][i].grid()
axes_tuple[1][i].grid(which='minor')
axes_tuple[1][i].grid(which='major')
axes_tuple[2][i].grid()
axes_tuple[0][0].set_ylabel('Encoding $\mathbf{z}$', fontsize=12)
axes_tuple[1][0].set_ylabel('Base task \n encoding $\mathbf{y}$', fontsize=12)
axes_tuple[2][0].set_ylabel('Average \n reward $R$', fontsize=12)
if legend:
axes_tuple[0][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True)
axes_tuple[1][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True)
if save:
plt.tight_layout()
fig.savefig(exp_directory + "/encoding_epoch_" + str(epoch) + ("_normalized" if normalize else "") + "_with_rewards" + ".pdf", format="pdf")
fig.show()
print("Created plot")
def plot_encodings_split(epoch, exp_directory, save=False, normalize=False, legend=False):
encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb"))
base_tasks = list(encoding_storage.keys())
if len(base_tasks) == 10:
figsize = (15, 5)
elif len(base_tasks) == 1:
figsize = (7, 5)
elif len(base_tasks) == 3:
figsize = (7, 5)
else:
figsize = None
fig, axes_tuple = plt.subplots(nrows=2, ncols=len(base_tasks), sharex='col', sharey='row', gridspec_kw={'height_ratios': [3, 1]}, figsize=figsize)
if len(axes_tuple.shape) == 1:
axes_tuple = np.expand_dims(axes_tuple, 1)
latent_dim = encoding_storage[base_tasks[0]][next(iter(encoding_storage[base_tasks[0]]))]['mean'].shape[0]
base_task_encodings = [np.argmax(a['base']) for base in base_tasks for a in list(encoding_storage[base].values())]
# Normalization over base tasks of dim
if normalize:
normalizer = []
mean_std = ['mean', 'std']
for dim in range(latent_dim):
temp_dict = {}
for element in mean_std:
values = np.array([a[element][dim] for base in base_tasks for a in list(encoding_storage[base].values())])
temp_dict[element] = dict(mean=values.mean(), std=values.std())
normalizer.append(temp_dict)
for i, base in enumerate(base_tasks):
fontsize=26
# encodings
#target_values = np.array([encoding_storage[base][key]['target'][2] for key in encoding_storage[base].keys()])
#sort_indices = np.argsort(target_values)
for dim in range(latent_dim):
x_values = np.array([a['mean'][dim] for a in list(encoding_storage[base].values())])#[sort_indices]
y_values = np.array([a['std'][dim] for a in list(encoding_storage[base].values())])#[sort_indices]
#Normalize
if normalize:
x_values = (x_values - normalizer[dim]['mean']['mean']) / (normalizer[dim]['mean']['std'] + 1e-9)
y_values = (y_values - normalizer[dim]['std']['mean']) / (normalizer[dim]['std']['std'] + 1e-9)
label_string = "Encoding $z_" + str(dim) + "$"
# 2 classes: capsize=3, elinewidth=3, capthick=3, markersize=9
# more classes: capsize=2, elinewidth=2, capthick=2, markersize=7
axes_tuple[0][i].errorbar(np.array(list(encoding_storage[base].keys())), x_values, yerr=y_values,
fmt="d", color='tab:green', label=label_string, capsize=2, elinewidth=2, capthick=2, markersize=7,
markerfacecolor='yellow', markeredgecolor='black')
if axes_tuple.shape[1] > 1:
#axes_tuple[0][i].set_title("Base Task " + str(i))
nameWithoutVersion = '-'.join(number2name[base].split('-')[:-1])
if len(nameWithoutVersion.split('-')) > 2:
split_name = '-'.join(nameWithoutVersion.split('-')[:2]) + " \n " + '-'.join(nameWithoutVersion.split('-')[2:])
else:
split_name = nameWithoutVersion
split_name = number2name[base]
axes_tuple[0][i].set_title(split_name, fontsize=fontsize)
else:
axes_tuple[0][i].set_title("Epoch " + str(epoch), fontsize=fontsize)
# base task encodings
axes_tuple[1][i].plot(list(encoding_storage[base].keys()), [np.argmax(task['base']) for task in list(encoding_storage[base].values())], 'd', color='yellow', markersize=7, markerfacecolor='yellow', markeredgecolor='black') # markersize=7 for multiple tasks, 9 for two
axes_tuple[1][i].set_xlabel("Specification", fontsize=fontsize)
axes_tuple[1][i].set_ylim(-1, np.max(base_task_encodings) + 1)
axes_tuple[0][i].tick_params(axis="x", labelsize=fontsize)
axes_tuple[0][i].tick_params(axis="y", labelsize=fontsize)
axes_tuple[1][i].tick_params(axis="x", labelsize=fontsize)
axes_tuple[1][i].tick_params(axis="y", labelsize=fontsize)
axes_tuple[1][i].set_yticks(np.arange(-1, np.max(base_task_encodings) + 2, 1))
axes_tuple[0][i].grid(b=True, which='major', alpha=1)
axes_tuple[1][i].grid(which='minor')
axes_tuple[1][i].grid(which='major')
axes_tuple[0][0].set_ylabel('Encoding $\mathbf{z}$', fontsize=fontsize)
axes_tuple[1][0].set_ylabel('Encoding $\mathbf{y}$', fontsize=fontsize)
plt.tight_layout()
if legend:
axes_tuple[0][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True, fontsize=fontsize)
axes_tuple[1][-1].legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True, fontsize=fontsize)
plt.subplots_adjust(wspace=0.15, hspace=0.15)
plt.grid(b=True, which='major', alpha=1)
if save:
fig.savefig(exp_directory + "/encoding_epoch_" + str(epoch) + ("_normalized" if normalize else "") + ".pdf", format="pdf", bbox_inches = "tight")
plt.show()
print(exp_directory)
print("Created plot")
def plot_encodings(epoch, exp_directory, save=False, normalize=False):
encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb"))
base_tasks = list(encoding_storage.keys())
fig, axes_tuple = plt.subplots(ncols=len(base_tasks), sharey='row')
#fig, axes_tuple = plt.subplots(ncols=len(base_tasks), sharey='row')
fig, axes_tuple = plt.subplots(ncols=len(base_tasks), sharey='row', figsize=(15, 3))
#fig.suptitle("Epoch " + str(epoch), fontsize="x-large")
if len(base_tasks) == 1: axes_tuple = [axes_tuple]
latent_dim = encoding_storage[base_tasks[0]][next(iter(encoding_storage[base_tasks[0]]))]['mean'].shape[0]
for i, base in enumerate(base_tasks):
for dim in range(latent_dim):
x_values = np.array([a['mean'][dim] for a in list(encoding_storage[base].values())])
y_values = np.array([a['std'][dim] for a in list(encoding_storage[base].values())])
#Normalize
if normalize:
mean = x_values.mean()
std = x_values.std()
x_values = (x_values - mean) / (std + 1e-9)
mean = y_values.mean()
std = y_values.std()
y_values = (y_values - mean) / (std + 1e-9)
axes_tuple[i].errorbar(list(encoding_storage[base].keys()), x_values, yerr=y_values, fmt=".", label="Encoding $\mathbf{z}$")
axes_tuple[i].plot(list(encoding_storage[base].keys()), [np.argmax(a['base']) for a in list(encoding_storage[base].values())], 'x', label="Class encoding $\mathbf{y}$")
#axes_tuple[i].set_title("Base Task " + str(i) + ", Epoch " + str(epoch))
axes_tuple[i].set_title("Base Task " + str(i))
#axes_tuple[i].set_title("Epoch " + str(epoch))
#axes_tuple[i].set_xlabel("Specification") #, fontsize=10
axes_tuple[i].grid()
#axes_tuple[i].set_ylim(-0.1, 0.1)
#axes_tuple[i].legend()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, shadow=True)
#fig.text(0.0, 0.5, 'Encoding $\mathbf{z}$', va='center', rotation='vertical')
#plt.subplots_adjust(wspace=0, hspace=0)
if save:
fig.savefig(exp_directory + "/encoding_epoch" + str(epoch) + ("_normalized" if normalize else "") +".pdf", dpi=300, format="pdf")
plt.show()
print("Created plot")
def plot_encodings_2D(epoch, exp_directory):
encoding_storage = pickle.load(open(os.path.join(exp_directory, "encoding_" + str(epoch) + ".p"), "rb"))
base_tasks = list(encoding_storage.keys())
fig, ax = plt.subplots()
for i, base in enumerate(base_tasks):
specification = np.array(list(encoding_storage[base].keys()))
spec_max = specification.max()
means1 = [a['mean'][0] for a in list(encoding_storage[base].values())]
means2 = [a['mean'][1] for a in list(encoding_storage[base].values())]
vars1 = [a['mean'][0] for a in list(encoding_storage[base].values())]
vars2 = [a['mean'][1] for a in list(encoding_storage[base].values())]
points = ax.scatter(means1, means2, c=specification, cmap='autumn', zorder=0)
ax.errorbar(means1, means2, xerr=np.array(vars1) / 2, yerr=np.array(vars2) / 2, alpha=0.2, fmt="o", color="black", zorder=-2)
for j in range(len(encoding_storage[base])):
#color = np.expand_dims(np.array(colorsys.hsv_to_rgb(hue[j], 1, 1)), 0)
e = Ellipse((means1[j], means2[j]), vars1[j], vars2[j], fill=False, zorder=-1)
ax.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.2)
#e.set_color(color[j])
fig.colorbar(points)
plt.show()
if __name__ == "__main__":
#plot_encodings_split(0, "/path/to/exp", save=False, normalize=False)
pass | [
"matplotlib.pyplot.grid",
"matplotlib.pyplot.legend",
"numpy.argmax",
"numpy.max",
"numpy.array",
"matplotlib.pyplot.rc",
"numpy.expand_dims",
"matplotlib.pyplot.tight_layout",
"matplotlib.patches.Ellipse",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.s... | [((131, 158), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (137, 158), True, 'import matplotlib.pyplot as plt\n'), ((159, 189), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""'}), "('font', family='serif')\n", (165, 189), True, 'import matplotlib.pyplot as plt\n'), ((15802, 15847), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.15)', 'hspace': '(0.15)'}), '(wspace=0.15, hspace=0.15)\n', (15821, 15847), True, 'import matplotlib.pyplot as plt\n'), ((15852, 15892), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'b': '(True)', 'which': '"""major"""', 'alpha': '(1)'}), "(b=True, which='major', alpha=1)\n", (15860, 15892), True, 'import matplotlib.pyplot as plt\n'), ((16064, 16074), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16072, 16074), True, 'import matplotlib.pyplot as plt\n'), ((18091, 18177), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""center left"""', 'bbox_to_anchor': '(1, 0.5)', 'fancybox': '(True)', 'shadow': '(True)'}), "(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True,\n shadow=True)\n", (18101, 18177), True, 'import matplotlib.pyplot as plt\n'), ((18457, 18467), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (18465, 18467), True, 'import matplotlib.pyplot as plt\n'), ((18711, 18725), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (18723, 18725), True, 'import matplotlib.pyplot as plt\n'), ((1736, 1765), 'numpy.expand_dims', 'np.expand_dims', (['axes_tuple', '(1)'], {}), '(axes_tuple, 1)\n', (1750, 1765), True, 'import numpy as np\n'), ((5649, 5667), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5665, 5667), True, 'import matplotlib.pyplot as plt\n'), ((6822, 6851), 'numpy.expand_dims', 'np.expand_dims', (['axes_tuple', '(1)'], {}), '(axes_tuple, 1)\n', (6836, 6851), True, 'import numpy as np\n'), ((10782, 10800), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10798, 10800), True, 'import matplotlib.pyplot as plt\n'), ((11648, 11677), 'numpy.expand_dims', 'np.expand_dims', (['axes_tuple', '(1)'], {}), '(axes_tuple, 1)\n', (11662, 11677), True, 'import numpy as np\n'), ((11816, 11836), 'numpy.argmax', 'np.argmax', (["a['base']"], {}), "(a['base'])\n", (11825, 11836), True, 'import numpy as np\n'), ((15516, 15534), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15532, 15534), True, 'import matplotlib.pyplot as plt\n'), ((19806, 19816), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19814, 19816), True, 'import matplotlib.pyplot as plt\n'), ((19564, 19638), 'matplotlib.patches.Ellipse', 'Ellipse', (['(means1[j], means2[j])', 'vars1[j]', 'vars2[j]'], {'fill': '(False)', 'zorder': '(-1)'}), '((means1[j], means2[j]), vars1[j], vars2[j], fill=False, zorder=-1)\n', (19571, 19638), False, 'from matplotlib.patches import Ellipse\n'), ((4692, 4712), 'numpy.argmax', 'np.argmax', (["a['base']"], {}), "(a['base'])\n", (4701, 4712), True, 'import numpy as np\n'), ((9825, 9845), 'numpy.argmax', 'np.argmax', (["a['base']"], {}), "(a['base'])\n", (9834, 9845), True, 'import numpy as np\n'), ((14490, 14513), 'numpy.argmax', 'np.argmax', (["task['base']"], {}), "(task['base'])\n", (14499, 14513), True, 'import numpy as np\n'), ((14808, 14835), 'numpy.max', 'np.max', (['base_task_encodings'], {}), '(base_task_encodings)\n', (14814, 14835), True, 'import numpy as np\n'), ((17612, 17632), 'numpy.argmax', 'np.argmax', (["a['base']"], {}), "(a['base'])\n", (17621, 17632), True, 'import numpy as np\n'), ((15159, 15186), 'numpy.max', 'np.max', (['base_task_encodings'], {}), '(base_task_encodings)\n', (15165, 15186), True, 'import numpy as np\n'), ((19318, 19333), 'numpy.array', 'np.array', (['vars1'], {}), '(vars1)\n', (19326, 19333), True, 'import numpy as np\n'), ((19344, 19359), 'numpy.array', 'np.array', (['vars2'], {}), '(vars2)\n', (19352, 19359), True, 'import numpy as np\n')] |
__all__ = ["BoundaryConditions", "BoundaryConditionsConf"]
from collections import namedtuple
import numpy as np
from ef.config.section import register, ConfigSection
from ef.config.component import ConfigComponent
class BoundaryConditions(ConfigComponent):
def __init__(self, potential=0):
self.potential = float(potential)
def to_conf(self):
return BoundaryConditionsConf(*[self.potential] * 6)
def visualize(self, visualizer, volume_size=(1, 1, 1)):
visualizer.draw_box(np.array(volume_size, np.float), wireframe=True,
colors=visualizer.potential_mapper.to_rgba(self.potential))
@register
class BoundaryConditionsConf(ConfigSection):
section = "Boundary conditions"
ContentTuple = namedtuple("BoundaryConditionsTuple",
('boundary_phi_right', 'boundary_phi_left', 'boundary_phi_bottom',
'boundary_phi_top', 'boundary_phi_near', 'boundary_phi_far'))
convert = ContentTuple(*[float] * 6)
def make(self):
if any(v != self.content[0] for v in self.content):
raise ValueError("Expecting all boundary_phi to be the same.")
return BoundaryConditions(self.content.boundary_phi_right)
| [
"numpy.array",
"collections.namedtuple"
] | [((765, 939), 'collections.namedtuple', 'namedtuple', (['"""BoundaryConditionsTuple"""', "('boundary_phi_right', 'boundary_phi_left', 'boundary_phi_bottom',\n 'boundary_phi_top', 'boundary_phi_near', 'boundary_phi_far')"], {}), "('BoundaryConditionsTuple', ('boundary_phi_right',\n 'boundary_phi_left', 'boundary_phi_bottom', 'boundary_phi_top',\n 'boundary_phi_near', 'boundary_phi_far'))\n", (775, 939), False, 'from collections import namedtuple\n'), ((516, 547), 'numpy.array', 'np.array', (['volume_size', 'np.float'], {}), '(volume_size, np.float)\n', (524, 547), True, 'import numpy as np\n')] |
import glob
import json
import os.path
import math
from functools import reduce
from typing import Union
from collections.abc import Iterable
from datetime import datetime
import numpy as np
import scipy.interpolate
from matplotlib import transforms, rcParams
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle, Ellipse
from matplotlib.widgets import Button
import seaborn as sns
import pint
from icecream import ic
from scripts.data_path import *
rcParams['figure.constrained_layout.use'] = True
rcParams['figure.dpi'] = 100
rcParams['font.size'] = 12
sns.set_style('darkgrid')
inf = float('inf')
reg = pint.UnitRegistry()
def unit_converter(m, src=reg.inch, dst=reg.meter):
return (m * src).to(dst).magnitude
def json_load(fnm):
f = open(fnm, 'r')
scans = json.load(f)
f.close()
return scans
def now(as_str=True):
d = datetime.now()
return d.strftime('%Y-%m-%d %H:%M:%S') if as_str else d
def get(dic, ks):
# return reduce(lambda acc, elm: acc[elm], keys, dic)
return reduce(lambda acc, elm: acc[elm] if elm in acc else None, ks.split('.'), dic)
def config(attr):
"""
Retrieves the queried attribute value from the config file.
Loads the config file on first call.
"""
if not hasattr(config, 'config'):
with open(f'{PATH_BASE}/{DIR_PROJ}/config.json') as f:
config.config = json.load(f)
return get(config.config, attr)
# ic('here', attr)
# if isinstance(ret, dict) and list(ret.keys())[0].isdigit(): # For JSON write
# ic('here')
# ret = {int(k): v for k, v in ret.items()}
# return ret
def eg_hsr_scan(k1=0, k2=77):
"""
:return: Example HSR laser scan as 2D coordinates, given file name and measurement number
"""
path = os.path.join(PATH_BASE, DIR_DATA)
fls = sorted(glob.iglob(f'{path}/{config(f"{DIR_DATA}.eg.HSR.fmt")}', recursive=True))
hsr_scans = json_load(fls[k1])
s = hsr_scans[k2]
return laser_polar2planar(s['angle_max'], s['angle_min'])(np.array(s['ranges']))
def pts2max_dist(pts):
"""
:param pts: List of 2d points
:return: The maximum distance between any two pairs of points
"""
assert pts.shape[1] == 2
def dist(a, b):
return (a[0] - b[0])**2 + (a[1] - b[1])**2
n = pts.shape[0]
idxs = ((i, j) for i in range(n-1) for j in range(i, n))
# ic(list(idxs))
return math.sqrt(max(dist(pts[a], pts[b]) for a, b in idxs))
def clipper(low, high):
"""
:return: A clipping function for range [low, high]
"""
return lambda x: max(min(x, high), low)
def get_3rd_side(a, b):
"""
Returns hypotenuse of a right-angled triangle, given its other sides
"""
# return np.sqrt(np.sum(np.square(mags)))
return math.sqrt(a**2 + b**2)
class JsonWriter:
"""
Each time the object is called, the data is appended to the end of a list which is serialized to a JSON file
"""
def __init__(self, fnm):
self.fnm = fnm
self.data = []
self.fnm_ext = f'data/{self.fnm}.json'
open(self.fnm_ext, 'a').close() # Create file in OS
def __call__(self, data):
f = open(self.fnm_ext, 'w')
self.data.append(data)
json.dump(self.data, f, indent=4)
# ic(self.data)
def laser_scan2dict(data):
"""
:param data: Of type [sensor_msgs/LaserScan](https://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/LaserScan.html)
"""
h = data.header
d_h = dict(
seq=h.seq,
stamp=dict(
secs=h.stamp.secs,
nsecs=h.stamp.nsecs
),
frame_id=h.frame_id
)
return dict(
header=d_h,
angle_min=data.angle_min,
angle_max=data.angle_max,
angle_increment=data.angle_increment,
time_increment=data.time_increment,
scan_time=data.scan_time,
range_min=data.range_min,
ranges=data.ranges,
intensities=data.intensities
)
def extend_1s(arr):
"""
Return array with column of 1's appended
:param arr: 2D array
"""
return np.hstack([arr, np.ones([arr.shape[0], 1])])
def cartesian(arrs: list, out=None):
"""
:param arrs: list of 1D arrays
:param out: Array to place the cartesian product in.
:return: Cartesian product of `arrs` of shape
Modified from https://stackoverflow.com/a/1235363/10732321
"""
arrs = [np.asarray(x) for x in arrs]
n = np.prod([x.size for x in arrs])
if out is None:
out = np.zeros([n, len(arrs)], dtype=arrs[0].dtype)
m = int(n / arrs[0].size)
out[:, 0] = np.repeat(arrs[0], m)
if arrs[1:]:
cartesian(arrs[1:], out=out[0:m, 1:])
for j in range(1, arrs[0].size):
out[j*m:(j+1)*m, 1:] = out[0:m, 1:]
return out
def polar2planar(dist, angle):
return (
dist * np.cos(angle),
dist * np.sin(angle)
)
def laser_polar2planar(a_max, a_min, split=False):
"""
:param a_max: Maximum angle
:param a_min: Minimum angle
:param split: If True, the function returns a 2-tuple of x and y coordinates
:return: A function that returns an array of 2D points
Assumes the angles are [a_min, a_max)
"""
def _get(ranges):
"""
:param ranges: Array of laser scan ranges
Number of beams taken from size of `range`;
"""
theta = np.linspace(a_min, a_max, num=ranges.size + 1)[:-1]
x, y = polar2planar(ranges, theta)
return (x, y) if split else np.vstack([x, y]).T
return _get
def rot_mat(theta):
c, s = np.cos(theta), np.sin(theta)
return np.array([
[c, -s],
[s, c]
])
def tsl_n_angle2tsf(tsl: Iterable = np.array([0, 0]), theta: Union[int, float] = 0):
"""
Converts translation in 2D & an angle into matrix transformation
:param tsl: 3-array of (translation_x, translation_y, theta),
or 2-array of (translation_x, translation_y)
:param theta: Angle in radians
"""
tsl = np.asarray(tsl)
tsf = np.identity(3)
tsf[:2, 2] = tsl[:2]
# ic(tsl[-1] if tsl.size == 3 else theta)
tsf[:2, :2] = rot_mat(tsl[-1] if tsl.size == 3 else theta)
return tsf
def tsf2tsl_n_angle(tsf):
"""
:return: 2-tuple of 2D translation and angle in radians from transformation matrix
"""
return tsf[:2, 2], math.acos(tsf[0][0])
def apply_tsf_2d(arr, tsf):
"""
Syntactic sugar
:param arr: Array of 2D points
:param tsf: Transformation matrix in R^{3 x 3}
:return: Array of 2D points with transformation matrix applied
"""
return (extend_1s(arr[:, :2]) @ tsf.T)[:, :2]
def get_kuka_pointcloud():
# d_dim = config('dimensions.KUKA')
# return get_rect_pointcloud(d_dim['length'], d_dim['width'])
return get_rect_pointcloud(config('dimensions.KUKA'))
def get_rect_pointcloud(dim, n=240, visualize=False):
"""
:param dim: 2-tuple of (length, width) of dict with keys `length` and `width`
:param n: Number of points/beams
:param visualize: If True, shows an illustration of the process
:return: Array of 2D points of a rectangular contour, as if by a 360 degree of beams
"""
ln, wd = dim['length'], dim['width'] if isinstance(dim, dict) else dim
r = max(ln, wd)
r = np.full(n, r)
theta = np.linspace(0, 2 * math.pi, num=n+1)[:-1]
x, y = polar2planar(r, theta)
boundaries = (-ln/2, -wd/2, ln/2, wd/2)
def intersec_rect(left, bot, right, top):
""" :return: function that returns the intersection of point relative to a rectangle """
def _get(x_, y_):
"""
x, y should be outside of the rectangle
"""
ct_x = (left + right) / 2
ct_y = (bot + top) / 2
slope = (ct_y - y_) / (ct_x - x_)
if x_ <= ct_x:
y__ = slope * (left - x_) + y_
if bot <= y__ <= top:
return left, y__
if x_ >= ct_x:
y__ = slope * (right - x_) + y_
if bot <= y__ <= top:
return right, y__
if y_ <= ct_y:
x__ = (bot - y_) / slope + x_
if left <= x__ <= right:
return x__, bot
if y_ >= ct_y:
x__ = (top - y_) / slope + x_
if left <= x__ <= right:
return x__, top
if x_ == ct_x and y_ == ct_y:
return x_, y_
return _get
if visualize:
fig, ax = plt.subplots(figsize=(16, 9), constrained_layout=True)
for x_i, y_i in zip(x, y):
x_int, y_int = intersec_rect(*boundaries)(x_i, y_i)
ax.add_patch(Rectangle((-ln/2, -wd/2), ln, wd, edgecolor='b', fill=False))
ax.plot((0, x_int), (0, y_int), marker='o', c='c', ms=2, lw=0.5, ls='dotted')
ax.plot((x_i, x_int), (y_i, y_int), marker='o', c='orange', ms=2, ls='dotted')
plt.gca().set_aspect('equal')
plt.show()
intersec = intersec_rect(*boundaries)
return np.apply_along_axis(lambda i: intersec(*i), 1, np.vstack([x, y]).T)
def save_fig(save, title):
if save:
fnm = f'{title}.png'
plt.savefig(os.path.join(PATH_BASE, DIR_PROJ, 'plot', fnm), dpi=300)
def plot_points(arr, **kwargs):
"""
:param arr: Array of 2d points to plot
:param kwargs: Arguments are forwarded to `matplotlib.axes.Axes.plot`
"""
arr = np.asarray(arr)
kwargs_ = dict(
marker='.', lw=0.5, ms=1,
c='orange',
)
plt.plot(arr[:, 0], arr[:, 1], **(kwargs_ | kwargs))
def plot_2d(arr, label=None, title=None, save=False, show=True, **kwargs):
""" Plot potentially list pf 2D points """
def _plot(a, lb):
plt.plot(a[:, 0], a[:, 1], marker='o', ms=0.3, lw=0.25, label=lb, **kwargs)
plt.figure(figsize=(16, 9), constrained_layout=True)
if not isinstance(arr, list):
arr = [arr]
lbl = [None for _ in arr] if label is None else label
_ = [_plot(a, lb) for a, lb in zip(arr, lbl)] # Execute
if label:
plt.legend()
if title:
plt.title(title)
plt.gca().set_aspect('equal')
save_fig(save, title)
if show:
plt.show()
def plot_points3d(arr, **kwargs):
"""
:param arr: Array of 3d points to plot
:param kwargs: Arguments are forwarded to `matplotlib.axes.Axes.plot`
"""
arr = np.asarray(arr)
kwargs_ = dict(
marker='.', lw=0.5, ms=1,
c='orange',
)
plt.plot(arr[:, 0], arr[:, 1], arr[:, 2], **(kwargs_ | kwargs))
def plot_line_seg(c1, c2, with_arrow=True, **kwargs):
kwargs_ = dict(
ls='dotted', marker='o', lw=1, ms=2,
c='orange',
)
kwargs = kwargs_ | kwargs
plt.plot((c1[0], c2[0]), (c1[1], c2[1]), **kwargs)
if with_arrow:
plot_line_seg_arrow(c1, c2, color=get(kwargs, 'c'), alpha=get(kwargs, 'alpha'))
def plot_line_seg_arrow(c1, c2, r=0.01, **kwargs):
coords = np.array([c1, c2])
mean = coords.mean(axis=0)
mags = (coords[1] - coords[0]) * r
width = 5 * get_3rd_side(*mags)
if not hasattr(plot_icp_result, 'clp'):
plot_icp_result.clp = clipper(0.01, 0.05)
width = plot_icp_result.clp(width)
kwargs_ = dict(
alpha=0.5,
# head_width=0.05,
head_width=width,
length_includes_head=True,
lw=0,
overhang=0.2,
)
plt.arrow(
*(mean-mags/2), *mags,
**(kwargs_ | kwargs)
)
def plot_icp_result(
src, tgt, tsf,
title=None, save=False, states=None, xlim=None, ylim=None, with_arrow=True, show=True,
init_tsf=np.identity(3), mode='static', scl=1
):
"""
:param src: Source coordinates
:param tgt: Target coordinates
:param tsf: ICP result transformation
:param title: Plot title
:param save: If true, plot saved as image
:param states: A list of source-target matched points & transformation for each iteration
:param xlim: X limit for plot, inferred if not given
:param ylim: Y limit for plot, inferred if not given
:param with_arrow: If true, matched points are shown with arrows
:param show: If true, show the plot
:param init_tsf: Initial transformation guess for ICP
:param mode: Plotting mode, one of [`static`, `animate`, `control`]
:param scl: Plot window scale
.. note:: Assumes 2d data
"""
def _plot_matched_points(stt, **kwargs):
src__, tgt__ = stt[0], stt[1]
for s_, t_ in zip(src__, tgt__):
plot_line_seg(s_, t_, with_arrow=with_arrow, **kwargs)
N_STT = (states and len(states)) or 0
x_rang, y_rang = (
abs(xlim[0] - xlim[1]), abs(ylim[0] - ylim[1])
) if xlim and ylim else (
np.ptp(tgt[:, 0]), np.ptp(tgt[:, 1])
)
ratio = 1 / x_rang * y_rang
d = 8 * scl
plt.figure(figsize=(d, d * ratio), constrained_layout=True)
# ic((d, d * ratio))
plt.xlabel('Target space dim 1 (m)')
plt.ylabel('Target space dim 2 (m)')
t = 'ICP results'
if title:
t = f'{t}, {title}'
plt.suptitle(t)
plt.gca().set_aspect('equal')
def _step(idx_, t_):
tsf_ = states[idx_][2] if states else tsf
plt.cla()
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
if mode == 'control':
t_ = f'{t_}, iteration {idx_}'
plt.suptitle(t_)
tsl, theta = tsf2tsl_n_angle(tsf_)
ic(tsf_, tsl, math.degrees(theta))
unit_sqr = np.array([
[0, 0],
[0, 1],
[1, 1],
[1, 0],
[0, 0]
])
unit_sqr_tsf = (extend_1s(unit_sqr) @ tsf_.T)[:, :2]
cs = iter(sns.color_palette(palette='husl', n_colors=5))
c = next(cs)
plot_points(src, c=c, alpha=0.5, label='Source points')
if not np.array_equal(init_tsf, np.identity(3)):
plot_points(apply_tsf_2d(src, init_tsf), c=c, alpha=0.5, label='Source points, initial guess')
plot_points(apply_tsf_2d(src, tsf_), c=c, label='Source points, transformed')
plot_points(tgt, c=next(cs), label='Target points')
c = next(cs)
if states:
_plot_matched_points(states[0], c=c, ms=1, ls='solid', alpha=0.5, label='Matched points, initial')
_plot_matched_points(states[idx_], c=c, ms=1, ls='solid', label='Matched points, final')
c = next(cs)
plt.plot(0, 0, marker='o', c=c, ms=4)
plot_points(unit_sqr, ms=0, marker=None, c=c, alpha=0.6, label='Unit square')
plot_points(unit_sqr_tsf, ms=0.5, marker=None, c=c, alpha=0.9, label='Unit square, transformed')
for i in zip(unit_sqr, unit_sqr_tsf):
plot_line_seg(*i, with_arrow=with_arrow, marker=None, c=c, alpha=0.5)
handles, labels_ = plt.gca().get_legend_handles_labels() # Distinct labels
by_label = dict(zip(labels_, handles))
plt.legend(by_label.values(), by_label.keys())
save_fig(save, t_)
if mode != 'static':
plt.pause(1 if mode == 'animate' else 0.1) # 'control'
if mode == 'animate':
plt.ion()
for idx in range(N_STT):
_step(idx, t)
elif mode == 'control':
class PlotFrame:
def __init__(self, i=0):
self.idx = i
self.clp = clipper(0, N_STT-1)
def next(self, event):
prev_idx = self.idx
self.idx = self.clp(self.idx+1)
if prev_idx != self.idx:
_step(self.idx, t)
def prev(self, event):
prev_idx = self.idx
self.idx = self.clp(self.idx-1)
if prev_idx != self.idx:
_step(self.idx, t)
init = 0
pf = PlotFrame(i=init)
ax = plt.gca()
btn_next = Button(plt.axes([0.81, 0.05, 0.1, 0.075]), 'Next')
btn_next.on_clicked(pf.next)
btn_prev = Button(plt.axes([0.7, 0.05, 0.1, 0.075]), 'Previous')
btn_prev.on_clicked(pf.prev)
plt.sca(ax)
_step(init, t)
else:
_step(N_STT-1, t)
plt.ioff() # So that window doesn't close
if show:
plt.show()
def plot_cluster(
data, data_labels,
title=None, save=False, new_fig=True, show_eclipse=True, line_kwargs=None,
cls_kwargs=None
):
d_clusters = {lb: data[np.where(data_labels == lb)] for lb in np.unique(data_labels)}
cs = iter(sns.color_palette(palette='husl', n_colors=len(d_clusters) + 1))
x, y = data[:, 0], data[:, 1]
if new_fig:
fig, ax = plt.subplots(figsize=(12, 12 / np.ptp(x) * np.ptp(y)), constrained_layout=True)
else:
ax = plt.gca()
# ic(ex)
if line_kwargs is None:
line_kwargs = dict()
plt.plot(x, y, **(dict(marker='o', ms=0.3, lw=0.25, c=next(cs), alpha=0.5, label='Whole') | line_kwargs))
line_kwargs = dict(
marker='o', ms=0.4, lw=0.25
) | line_kwargs
# if cls_kwargs is not None: # Kwargs for each cluster
# labels = None, colors = None
# if labels is not None and not isinstance(labels, list):
# labels = [labels] * len(data_labels)
# labels = None, colors = None
if cls_kwargs is not None and not isinstance(cls_kwargs, list):
cls_kwargs = [cls_kwargs] * len(data_labels)
for idx, (lb, d) in enumerate(d_clusters.items()):
x_, y_ = d[:, 0], d[:, 1]
c = next(cs)
def confidence_ellipse(n_std=1., **kwargs):
"""
Modified from https://matplotlib.org/stable/gallery/statistics/confidence_ellipse.html
Create a plot of the covariance confidence ellipse of x and y
:param n_std: number of standard deviations to determine the ellipse's radius'
:return matplotlib.patches.Ellipse
"""
cov = np.cov(x_, y_)
pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1])
ell_radius_x = np.sqrt(1 + pearson)
ell_radius_y = np.sqrt(1 - pearson)
ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2,
**(dict(fc='none') | kwargs))
tsf = transforms.Affine2D().rotate_deg(45)
tsf = tsf.scale(
np.sqrt(cov[0, 0]) * n_std,
np.sqrt(cov[1, 1]) * n_std
)
tsf = tsf.translate(np.mean(x_), np.mean(y_))
ellipse.set_transform(tsf + ax.transData)
return ax.add_patch(ellipse)
if show_eclipse and lb != -1: # Noise as in DBSCAN
confidence_ellipse(n_std=1.25, fc=c, alpha=0.25)
# lb = labels[idx] if labels is not None else f'Cluster {lb + 1}'
lb = f'Cluster {lb + 1}'
cls_kwarg = (cls_kwargs is not None and cls_kwargs[idx]) or dict()
ax.plot(x_, y_, **(dict(c=c, label=lb) | line_kwargs | cls_kwarg))
if new_fig:
t = 'Clustering results'
if title:
t = f'{t}, {title}'
plt.title(t)
plt.legend()
if not hasattr(ax, 'get_zlim'): # Not supported in 3D projection
ax.set_aspect('equal')
save_fig(save, t)
plt.show()
def scale(arr, factor=4, as_sz=False):
"""
:param arr: 1D array of uniform values
:param factor: Factor to sample
:param as_sz: If true, return the size of scaled aray
:return: 1D array of `arr` with a finer sample,
in particular, the distance between two adjacent points is `factor` times smaller
"""
num = (arr.size - 1) * factor + 1
return num if as_sz else np.linspace(arr[0], arr[-1], num=num)
def interpolate(X, Y, Z, x_coords, y_coords, factor=4, method='cubic'):
"""
:return: Interpolated X, Y, Z coordinate tuples, given by a factor, and a method
in `scipy.interpolate.griddata`
"""
X_f, Y_f, Z_f = X.flatten(), Y.flatten(), Z.flatten()
x_inter = scale(x_coords, factor=factor).reshape(1, -1)
y_inter = scale(y_coords, factor=factor).reshape(-1, 1)
X_, Y_ = np.meshgrid(x_inter, y_inter)
Z_ = scipy.interpolate.griddata((X_f, Y_f), Z_f, (x_inter, y_inter), method=method)
return X_, Y_, Z_
def get_offset(arr, frac=2**4):
"""
:param arr: Array-like
:param frac: Difference between range of `arr` and the offset
:return: 2-tuple offset value pair for `arr`, with a relative gap factor,
in the order of (`down`, `up`)
"""
ma, mi = arr.max(), arr.min()
diff = (ma-mi) / frac
return mi-diff, ma+diff
def pts2bins(pts, prec=0.25):
"""
:param pts: List of 2d points
:param prec: Width of each bin
:return: A dictionary of bin centers and points in the bin
"""
# for pt in pts:
# def val2bin_int(val):
# return math.ceil(val / prec) if val > 0 else math.floor(pt[1] / prec)
# f =
# loc = math.floor(pt[0] / prec), math.floor(pt[1] / prec)
# ic(loc, pt)
locs_pt = {tuple(pt.tolist()): (math.floor(pt[0] / prec), math.floor(pt[1] / prec)) for pt in pts}
d_bin = dict()
for pt, loc in locs_pt.items():
# ic(pt, loc)
if loc in d_bin:
d_bin[loc].append(pt)
else:
d_bin[loc] = [pt]
def unit2val(u):
return (u+0.5)*prec
return {(unit2val(k1), unit2val(k2)): v for (k1, k2), v in d_bin.items()}
def plot_grid_search(
pcr, pts, opns_x, opns_y, opns_ang, errs, labels=None,
interp=True, inverse_loss=False, inverse_pts=False,
title=None, save=False, zlabel='Loss', offset_frac=2**3,
tsf_ideal=None,
interp_kwargs=None,
plot3d_kwargs=None
):
"""
Plot grid search result per `PoseEstimator.FusePose`, i.e. for the get-pose first approach
The 3 search spaces, `opns_x`, `opns_y`, `opns_ang`, should be uniformly sampled and increasing numbers
Plots loss against translation (x, y pair), for each setup, the angle/rotation with the lowest loss is picked
"""
label_idxs = None
if labels is not None:
assert len(errs) == 2
label_idxs, errs = errs
if inverse_pts: # Illustrate as if ICP ran in the reversed order
opns_x = -opns_x[::-1]
opns_y = -opns_y[::-1]
errs = errs[::-1]
pcr, pts = pts, pcr
errs_by_tsl = np.min(errs.reshape(-1, opns_ang.size * len(label_idxs) if labels is not None else 1), axis=-1)
errs_by_tsl = errs_by_tsl.reshape(-1, opns_x.size) # Shape flipped for `np.meshgrid`
d = 12
fig, ax = plt.subplots(figsize=(d, d), subplot_kw=dict(projection='3d'))
[X, Y], Z = np.meshgrid(opns_x, opns_y), errs_by_tsl # Negated cos lower error = better
if inverse_loss:
Z = -Z
interp_kwargs = dict(
factor=2**3
) | (dict() if interp_kwargs is None else interp_kwargs)
if interp:
X, Y, Z = interpolate(X, Y, Z, opns_x, opns_y, **interp_kwargs)
bot, top = get_offset(Z, frac=offset_frac) # Level/Height of contour plot & 2d point plots, respectively
if plot3d_kwargs is None:
plot3d_kwargs = dict()
ord_3d, ord_2d = 1, 20
kwargs_surf = dict(
zorder=ord_3d, antialiased=True,
alpha=0.9, cmap='Spectral_r', edgecolor='black', lw=0.3
) | plot3d_kwargs
kwargs_cont = dict(
zorder=ord_3d, antialiased=True,
linewidths=1, levels=np.linspace(Z.min(), Z.max(), 2 ** 4), offset=bot, zdir='z',
cmap='Spectral_r'
) | plot3d_kwargs
surf = ax.plot_surface(X, Y, Z, **kwargs_surf)
ax.contour(X, Y, Z, **kwargs_cont)
cp = sns.color_palette(palette='husl', n_colors=7)
cp = list(reversed(cp)) if inverse_loss else cp
cs = iter(cp)
lb_tgt = 'Laser scan, target'
plot_points([[0, 0]], zs=top, zorder=ord_2d, ms=10, alpha=0.5)
c = next(cs)
plot_points(pcr, zorder=ord_2d, zs=top, c=c, alpha=0.5, label='Point cloud representation, source')
if labels is not None:
plot_cluster(pts, labels, new_fig=False, show_eclipse=False, line_kwargs=dict(
zorder=ord_2d, zs=top, label=lb_tgt
))
else:
plot_points(pts, zorder=ord_2d, zs=top, c=next(cs), label=lb_tgt)
if tsf_ideal is not None: # Illustrate the ideal translation+-
plot_points(
apply_tsf_2d(pcr, tsf_ideal),
zorder=ord_2d, zs=top, c=c, alpha=0.7,
label='Point cloud representation at actual pose'
)
bot, top = get_offset(Z, frac=offset_frac/1.25)
ran = max(opns_x[-1] - opns_x[0], opns_y[-1] - opns_y[0]) / 2**4
wd = pts2max_dist(pcr)/2
wd = min(ran, wd)
segs = np.array([
[wd/2, 0, bot],
[0, 0, bot],
[0, 0, top],
[wd/2, 0, top],
])
api = 'Actual pose indicator'
kwargs_origin = dict(zorder=ord_2d, ms=10)
plot_points([tsf2tsl_n_angle(tsf_ideal)[0]], zs=top, **kwargs_origin)
plot_points([tsf2tsl_n_angle(tsf_ideal)[0]], zs=bot, **kwargs_origin)
segs[:, :2] = apply_tsf_2d(segs, tsf_ideal)
plot_points3d(segs, zorder=ord_2d, c='black', ls='dashed', lw=1, label=api)
fig.colorbar(surf, shrink=0.5, aspect=2**5, pad=2**-4)
plt.xlabel('Translation in X (m)')
plt.ylabel('Translation in y (m)')
ax.set_zlabel(zlabel + ', inverted' if inverse_loss else '')
plt.legend()
handles, labels_ = plt.gca().get_legend_handles_labels() # Distinct labels
by_label = dict(zip(labels_, handles))
plt.legend(by_label.values(), by_label.keys())
def prec_pair(nm, mag):
return r'$\Delta ' + nm + r' = ' + str(mag) + r'$'
# prec = str(tuple([opns_x[1]-opns_x[0], opns_y[1]-opns_y[0], opns_ang[1]-opns_ang[0]]))
prec = f'{prec_pair("x", opns_x[1]-opns_x[0])}, ' \
f'{prec_pair("y", opns_y[1]-opns_y[0])}'
prec += ', ' + prec_pair(r'\theta', round(opns_ang[1]-opns_ang[0], 2))
t = r'Loss against translation grid search, by best $\theta$'
t = f'{t} for {prec}'
if title:
t = f'{t}, {title}'
plt.title(t)
save_fig(save, t)
plt.show()
if __name__ == '__main__':
def _create():
jw = JsonWriter('jw')
for d in [1, 'as', {1: 2, "a": "b"}, [12, 4]]:
jw(d)
def _check():
with open('jw.json') as f:
l = json.load(f)
ic(l)
# _create()
# _check()
def _kuka_pointcloud():
# pc = get_rect_pointcloud(2, 0.8, visualize=False)
ptc = get_kuka_pointcloud()
plt.figure(figsize=(16, 9), constrained_layout=True)
plt.plot(ptc[:, 0], ptc[:, 1], marker='o', ms=1, lw=0.5)
plt.show()
# ic(config('dimensions.KUKA.length'))
# ic(cartesian([[1, 2, 3], [4, 5], [6, 7]]))
| [
"numpy.prod",
"numpy.ptp",
"icecream.ic",
"numpy.sqrt",
"matplotlib.pyplot.ylabel",
"math.acos",
"math.floor",
"math.sqrt",
"seaborn.set_style",
"numpy.array",
"numpy.sin",
"numpy.cov",
"matplotlib.pyplot.arrow",
"pint.UnitRegistry",
"numpy.mean",
"numpy.repeat",
"seaborn.color_palet... | [((578, 603), 'seaborn.set_style', 'sns.set_style', (['"""darkgrid"""'], {}), "('darkgrid')\n", (591, 603), True, 'import seaborn as sns\n'), ((632, 651), 'pint.UnitRegistry', 'pint.UnitRegistry', ([], {}), '()\n', (649, 651), False, 'import pint\n'), ((802, 814), 'json.load', 'json.load', (['f'], {}), '(f)\n', (811, 814), False, 'import json\n'), ((878, 892), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (890, 892), False, 'from datetime import datetime\n'), ((2780, 2806), 'math.sqrt', 'math.sqrt', (['(a ** 2 + b ** 2)'], {}), '(a ** 2 + b ** 2)\n', (2789, 2806), False, 'import math\n'), ((4457, 4488), 'numpy.prod', 'np.prod', (['[x.size for x in arrs]'], {}), '([x.size for x in arrs])\n', (4464, 4488), True, 'import numpy as np\n'), ((4616, 4637), 'numpy.repeat', 'np.repeat', (['arrs[0]', 'm'], {}), '(arrs[0], m)\n', (4625, 4637), True, 'import numpy as np\n'), ((5637, 5664), 'numpy.array', 'np.array', (['[[c, -s], [s, c]]'], {}), '([[c, -s], [s, c]])\n', (5645, 5664), True, 'import numpy as np\n'), ((5725, 5741), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (5733, 5741), True, 'import numpy as np\n'), ((6024, 6039), 'numpy.asarray', 'np.asarray', (['tsl'], {}), '(tsl)\n', (6034, 6039), True, 'import numpy as np\n'), ((6050, 6064), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (6061, 6064), True, 'import numpy as np\n'), ((7303, 7316), 'numpy.full', 'np.full', (['n', 'r'], {}), '(n, r)\n', (7310, 7316), True, 'import numpy as np\n'), ((9477, 9492), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (9487, 9492), True, 'import numpy as np\n'), ((9577, 9627), 'matplotlib.pyplot.plot', 'plt.plot', (['arr[:, 0]', 'arr[:, 1]'], {}), '(arr[:, 0], arr[:, 1], **kwargs_ | kwargs)\n', (9585, 9627), True, 'import matplotlib.pyplot as plt\n'), ((9864, 9916), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)', 'constrained_layout': '(True)'}), '(figsize=(16, 9), constrained_layout=True)\n', (9874, 9916), True, 'import matplotlib.pyplot as plt\n'), ((10435, 10450), 'numpy.asarray', 'np.asarray', (['arr'], {}), '(arr)\n', (10445, 10450), True, 'import numpy as np\n'), ((10535, 10596), 'matplotlib.pyplot.plot', 'plt.plot', (['arr[:, 0]', 'arr[:, 1]', 'arr[:, 2]'], {}), '(arr[:, 0], arr[:, 1], arr[:, 2], **kwargs_ | kwargs)\n', (10543, 10596), True, 'import matplotlib.pyplot as plt\n'), ((10780, 10830), 'matplotlib.pyplot.plot', 'plt.plot', (['(c1[0], c2[0])', '(c1[1], c2[1])'], {}), '((c1[0], c2[0]), (c1[1], c2[1]), **kwargs)\n', (10788, 10830), True, 'import matplotlib.pyplot as plt\n'), ((11004, 11022), 'numpy.array', 'np.array', (['[c1, c2]'], {}), '([c1, c2])\n', (11012, 11022), True, 'import numpy as np\n'), ((11437, 11493), 'matplotlib.pyplot.arrow', 'plt.arrow', (['*(mean - mags / 2)', '*mags'], {}), '(*(mean - mags / 2), *mags, **kwargs_ | kwargs)\n', (11446, 11493), True, 'import matplotlib.pyplot as plt\n'), ((11672, 11686), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (11683, 11686), True, 'import numpy as np\n'), ((12874, 12933), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(d, d * ratio)', 'constrained_layout': '(True)'}), '(figsize=(d, d * ratio), constrained_layout=True)\n', (12884, 12933), True, 'import matplotlib.pyplot as plt\n'), ((12963, 12999), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Target space dim 1 (m)"""'], {}), "('Target space dim 1 (m)')\n", (12973, 12999), True, 'import matplotlib.pyplot as plt\n'), ((13004, 13040), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Target space dim 2 (m)"""'], {}), "('Target space dim 2 (m)')\n", (13014, 13040), True, 'import matplotlib.pyplot as plt\n'), ((13109, 13124), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['t'], {}), '(t)\n', (13121, 13124), True, 'import matplotlib.pyplot as plt\n'), ((16189, 16199), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (16197, 16199), True, 'import matplotlib.pyplot as plt\n'), ((20106, 20135), 'numpy.meshgrid', 'np.meshgrid', (['x_inter', 'y_inter'], {}), '(x_inter, y_inter)\n', (20117, 20135), True, 'import numpy as np\n'), ((23622, 23667), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': '"""husl"""', 'n_colors': '(7)'}), "(palette='husl', n_colors=7)\n", (23639, 23667), True, 'import seaborn as sns\n'), ((25250, 25284), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Translation in X (m)"""'], {}), "('Translation in X (m)')\n", (25260, 25284), True, 'import matplotlib.pyplot as plt\n'), ((25289, 25323), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Translation in y (m)"""'], {}), "('Translation in y (m)')\n", (25299, 25323), True, 'import matplotlib.pyplot as plt\n'), ((25393, 25405), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (25403, 25405), True, 'import matplotlib.pyplot as plt\n'), ((26083, 26095), 'matplotlib.pyplot.title', 'plt.title', (['t'], {}), '(t)\n', (26092, 26095), True, 'import matplotlib.pyplot as plt\n'), ((26122, 26132), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26130, 26132), True, 'import matplotlib.pyplot as plt\n'), ((2034, 2055), 'numpy.array', 'np.array', (["s['ranges']"], {}), "(s['ranges'])\n", (2042, 2055), True, 'import numpy as np\n'), ((3242, 3275), 'json.dump', 'json.dump', (['self.data', 'f'], {'indent': '(4)'}), '(self.data, f, indent=4)\n', (3251, 3275), False, 'import json\n'), ((4420, 4433), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4430, 4433), True, 'import numpy as np\n'), ((5597, 5610), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5603, 5610), True, 'import numpy as np\n'), ((5612, 5625), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (5618, 5625), True, 'import numpy as np\n'), ((6368, 6388), 'math.acos', 'math.acos', (['tsf[0][0]'], {}), '(tsf[0][0])\n', (6377, 6388), False, 'import math\n'), ((7329, 7367), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * math.pi)'], {'num': '(n + 1)'}), '(0, 2 * math.pi, num=n + 1)\n', (7340, 7367), True, 'import numpy as np\n'), ((8552, 8606), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 9)', 'constrained_layout': '(True)'}), '(figsize=(16, 9), constrained_layout=True)\n', (8564, 8606), True, 'import matplotlib.pyplot as plt\n'), ((9020, 9030), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9028, 9030), True, 'import matplotlib.pyplot as plt\n'), ((9784, 9859), 'matplotlib.pyplot.plot', 'plt.plot', (['a[:, 0]', 'a[:, 1]'], {'marker': '"""o"""', 'ms': '(0.3)', 'lw': '(0.25)', 'label': 'lb'}), "(a[:, 0], a[:, 1], marker='o', ms=0.3, lw=0.25, label=lb, **kwargs)\n", (9792, 9859), True, 'import matplotlib.pyplot as plt\n'), ((10112, 10124), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (10122, 10124), True, 'import matplotlib.pyplot as plt\n'), ((10147, 10163), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (10156, 10163), True, 'import matplotlib.pyplot as plt\n'), ((10245, 10255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10253, 10255), True, 'import matplotlib.pyplot as plt\n'), ((13243, 13252), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (13250, 13252), True, 'import matplotlib.pyplot as plt\n'), ((13550, 13600), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]'], {}), '([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n', (13558, 13600), True, 'import numpy as np\n'), ((14476, 14513), 'matplotlib.pyplot.plot', 'plt.plot', (['(0)', '(0)'], {'marker': '"""o"""', 'c': 'c', 'ms': '(4)'}), "(0, 0, marker='o', c=c, ms=4)\n", (14484, 14513), True, 'import matplotlib.pyplot as plt\n'), ((15181, 15190), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (15188, 15190), True, 'import matplotlib.pyplot as plt\n'), ((16253, 16263), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16261, 16263), True, 'import matplotlib.pyplot as plt\n'), ((16762, 16771), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (16769, 16771), True, 'import matplotlib.pyplot as plt\n'), ((19071, 19083), 'matplotlib.pyplot.title', 'plt.title', (['t'], {}), '(t)\n', (19080, 19083), True, 'import matplotlib.pyplot as plt\n'), ((19092, 19104), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (19102, 19104), True, 'import matplotlib.pyplot as plt\n'), ((19248, 19258), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19256, 19258), True, 'import matplotlib.pyplot as plt\n'), ((19662, 19699), 'numpy.linspace', 'np.linspace', (['arr[0]', 'arr[-1]'], {'num': 'num'}), '(arr[0], arr[-1], num=num)\n', (19673, 19699), True, 'import numpy as np\n'), ((22660, 22687), 'numpy.meshgrid', 'np.meshgrid', (['opns_x', 'opns_y'], {}), '(opns_x, opns_y)\n', (22671, 22687), True, 'import numpy as np\n'), ((24677, 24749), 'numpy.array', 'np.array', (['[[wd / 2, 0, bot], [0, 0, bot], [0, 0, top], [wd / 2, 0, top]]'], {}), '([[wd / 2, 0, bot], [0, 0, bot], [0, 0, top], [wd / 2, 0, top]])\n', (24685, 24749), True, 'import numpy as np\n'), ((26551, 26603), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)', 'constrained_layout': '(True)'}), '(figsize=(16, 9), constrained_layout=True)\n', (26561, 26603), True, 'import matplotlib.pyplot as plt\n'), ((26612, 26668), 'matplotlib.pyplot.plot', 'plt.plot', (['ptc[:, 0]', 'ptc[:, 1]'], {'marker': '"""o"""', 'ms': '(1)', 'lw': '(0.5)'}), "(ptc[:, 0], ptc[:, 1], marker='o', ms=1, lw=0.5)\n", (26620, 26668), True, 'import matplotlib.pyplot as plt\n'), ((26677, 26687), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (26685, 26687), True, 'import matplotlib.pyplot as plt\n'), ((1391, 1403), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1400, 1403), False, 'import json\n'), ((4118, 4144), 'numpy.ones', 'np.ones', (['[arr.shape[0], 1]'], {}), '([arr.shape[0], 1])\n', (4125, 4144), True, 'import numpy as np\n'), ((4866, 4879), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4872, 4879), True, 'import numpy as np\n'), ((4896, 4909), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4902, 4909), True, 'import numpy as np\n'), ((5397, 5443), 'numpy.linspace', 'np.linspace', (['a_min', 'a_max'], {'num': '(ranges.size + 1)'}), '(a_min, a_max, num=ranges.size + 1)\n', (5408, 5443), True, 'import numpy as np\n'), ((9131, 9148), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (9140, 9148), True, 'import numpy as np\n'), ((10168, 10177), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10175, 10177), True, 'import matplotlib.pyplot as plt\n'), ((12779, 12796), 'numpy.ptp', 'np.ptp', (['tgt[:, 0]'], {}), '(tgt[:, 0])\n', (12785, 12796), True, 'import numpy as np\n'), ((12798, 12815), 'numpy.ptp', 'np.ptp', (['tgt[:, 1]'], {}), '(tgt[:, 1])\n', (12804, 12815), True, 'import numpy as np\n'), ((13129, 13138), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13136, 13138), True, 'import matplotlib.pyplot as plt\n'), ((13282, 13296), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (13290, 13296), True, 'import matplotlib.pyplot as plt\n'), ((13326, 13340), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (13334, 13340), True, 'import matplotlib.pyplot as plt\n'), ((13426, 13442), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['t_'], {}), '(t_)\n', (13438, 13442), True, 'import matplotlib.pyplot as plt\n'), ((13509, 13528), 'math.degrees', 'math.degrees', (['theta'], {}), '(theta)\n', (13521, 13528), False, 'import math\n'), ((13751, 13796), 'seaborn.color_palette', 'sns.color_palette', ([], {'palette': '"""husl"""', 'n_colors': '(5)'}), "(palette='husl', n_colors=5)\n", (13768, 13796), True, 'import seaborn as sns\n'), ((15090, 15132), 'matplotlib.pyplot.pause', 'plt.pause', (["(1 if mode == 'animate' else 0.1)"], {}), "(1 if mode == 'animate' else 0.1)\n", (15099, 15132), True, 'import matplotlib.pyplot as plt\n'), ((15877, 15886), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (15884, 15886), True, 'import matplotlib.pyplot as plt\n'), ((16112, 16123), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (16119, 16123), True, 'import matplotlib.pyplot as plt\n'), ((16448, 16475), 'numpy.where', 'np.where', (['(data_labels == lb)'], {}), '(data_labels == lb)\n', (16456, 16475), True, 'import numpy as np\n'), ((16487, 16509), 'numpy.unique', 'np.unique', (['data_labels'], {}), '(data_labels)\n', (16496, 16509), True, 'import numpy as np\n'), ((17920, 17934), 'numpy.cov', 'np.cov', (['x_', 'y_'], {}), '(x_, y_)\n', (17926, 17934), True, 'import numpy as np\n'), ((18027, 18047), 'numpy.sqrt', 'np.sqrt', (['(1 + pearson)'], {}), '(1 + pearson)\n', (18034, 18047), True, 'import numpy as np\n'), ((18075, 18095), 'numpy.sqrt', 'np.sqrt', (['(1 - pearson)'], {}), '(1 - pearson)\n', (18082, 18095), True, 'import numpy as np\n'), ((21050, 21074), 'math.floor', 'math.floor', (['(pt[0] / prec)'], {}), '(pt[0] / prec)\n', (21060, 21074), False, 'import math\n'), ((21076, 21100), 'math.floor', 'math.floor', (['(pt[1] / prec)'], {}), '(pt[1] / prec)\n', (21086, 21100), False, 'import math\n'), ((25429, 25438), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (25436, 25438), True, 'import matplotlib.pyplot as plt\n'), ((26355, 26367), 'json.load', 'json.load', (['f'], {}), '(f)\n', (26364, 26367), False, 'import json\n'), ((26380, 26385), 'icecream.ic', 'ic', (['l'], {}), '(l)\n', (26382, 26385), False, 'from icecream import ic\n'), ((5528, 5545), 'numpy.vstack', 'np.vstack', (['[x, y]'], {}), '([x, y])\n', (5537, 5545), True, 'import numpy as np\n'), ((8731, 8795), 'matplotlib.patches.Rectangle', 'Rectangle', (['(-ln / 2, -wd / 2)', 'ln', 'wd'], {'edgecolor': '"""b"""', 'fill': '(False)'}), "((-ln / 2, -wd / 2), ln, wd, edgecolor='b', fill=False)\n", (8740, 8795), False, 'from matplotlib.patches import Rectangle, Ellipse\n'), ((8982, 8991), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (8989, 8991), True, 'import matplotlib.pyplot as plt\n'), ((13923, 13937), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (13934, 13937), True, 'import numpy as np\n'), ((14861, 14870), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (14868, 14870), True, 'import matplotlib.pyplot as plt\n'), ((15913, 15947), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.81, 0.05, 0.1, 0.075]'], {}), '([0.81, 0.05, 0.1, 0.075])\n', (15921, 15947), True, 'import matplotlib.pyplot as plt\n'), ((16020, 16053), 'matplotlib.pyplot.axes', 'plt.axes', (['[0.7, 0.05, 0.1, 0.075]'], {}), '([0.7, 0.05, 0.1, 0.075])\n', (16028, 16053), True, 'import matplotlib.pyplot as plt\n'), ((17969, 17999), 'numpy.sqrt', 'np.sqrt', (['(cov[0, 0] * cov[1, 1])'], {}), '(cov[0, 0] * cov[1, 1])\n', (17976, 17999), True, 'import numpy as np\n'), ((18461, 18472), 'numpy.mean', 'np.mean', (['x_'], {}), '(x_)\n', (18468, 18472), True, 'import numpy as np\n'), ((18474, 18485), 'numpy.mean', 'np.mean', (['y_'], {}), '(y_)\n', (18481, 18485), True, 'import numpy as np\n'), ((18262, 18283), 'matplotlib.transforms.Affine2D', 'transforms.Affine2D', ([], {}), '()\n', (18281, 18283), False, 'from matplotlib import transforms, rcParams\n'), ((18344, 18362), 'numpy.sqrt', 'np.sqrt', (['cov[0, 0]'], {}), '(cov[0, 0])\n', (18351, 18362), True, 'import numpy as np\n'), ((18388, 18406), 'numpy.sqrt', 'np.sqrt', (['cov[1, 1]'], {}), '(cov[1, 1])\n', (18395, 18406), True, 'import numpy as np\n'), ((16702, 16711), 'numpy.ptp', 'np.ptp', (['y'], {}), '(y)\n', (16708, 16711), True, 'import numpy as np\n'), ((16690, 16699), 'numpy.ptp', 'np.ptp', (['x'], {}), '(x)\n', (16696, 16699), True, 'import numpy as np\n')] |
import numpy as np
from PIL import Image
import json
import open3d as o3d
import cv2
def ply_vtx(path):
f = open(path)
assert f.readline().strip() == "ply"
f.readline()
f.readline()
N = int(f.readline().split()[-1])
while f.readline().strip() != "end_header":
continue
pts = []
for _ in range(N):
pts.append(np.float32(f.readline().split()[:3]))
return np.array(pts)
def xyz_vtx(path):
f = open(path)
pts = []
while True:
line = f.readline()
if not line:
break
pts.append(np.float32(line.split()[:3]))
return np.array(pts)
def changeDet(arr):
# det to +_1
for i in range(3):
arr[1][i] = -arr[1][i]
return arr
def quaternion_to_rotation_matrix(quat):
q = quat.copy()
n = np.dot(q, q)
if n < np.finfo(q.dtype).eps:
return np.identity(4)
q = q * np.sqrt(2.0 / n)
q = np.outer(q, q)
rot_matrix = np.array(
[[1.0 - q[2, 2] - q[3, 3], q[1, 2] + q[3, 0], q[1, 3] - q[2, 0],],
[q[1, 2] - q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] + q[1, 0],],
[q[1, 3] + q[2, 0], q[2, 3] - q[1, 0], 1.0 - q[1, 1] - q[2, 2],],
],
dtype=q.dtype)
return rot_matrix
t = [[0, -1, 0],
[1, 0, 0],
[0, 0, 1]]
print(np.linalg.det(t))
p = np.array([[ 0, 0, 1],
[ 1, 0, 0],
[ 0, -1, 0]])
print(np.linalg.det(p))
index = '000005.right'
floder = 'power_drill_with_model'
#fixed config
fixed_config = open('./{0}/_object_settings.json'.format(floder), )
fixed_data = json.load(fixed_config)
fixed_transform = np.array(fixed_data['exported_objects'][0]['fixed_model_transform'])
fixed_rotation = fixed_transform[:3, :3].T
fixed_translation = np.zeros(3)
for i in range(3):
fixed_translation[i] = fixed_transform[3][i]
#fixed_rotation = changeDet(fixed_rotation)
#img and related config
img = Image.open('./{1}/{0}.jpg'.format(index, floder))
Image._show(img)
f = open('./{1}/{0}.json'.format(index, floder),)
data = json.load(f)
bb = data['objects'][0]['bounding_box']
top_left_x = int(bb['top_left'][0])
top_left_y = int(bb['top_left'][1])
bottom_right_x = int(bb['bottom_right'][0])
bottom_right_y = int(bb['bottom_right'][1])
cam_f = open(floder + '/' +'_camera_settings.json','r')
cam_data = json.load(cam_f)
cx = float(cam_data["camera_settings"][1]["intrinsic_settings"]["cx"])
cy = float(cam_data["camera_settings"][1]["intrinsic_settings"]["cy"])
fx = float(cam_data["camera_settings"][1]["intrinsic_settings"]["fx"])
fy = float(cam_data["camera_settings"][1]["intrinsic_settings"]["fy"])
# cuboid = data['objects'][0]['cuboid']
# projected_cuboid = data['objects'][0]['projected_cuboid']
# print("projected cuboid")
# K = np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])
# projected = []
# for pt in cuboid:
# pixel = K @ pt
# pixel /= pixel[2]
# projected.append(pixel[:2])
cropped = img.crop((top_left_y, top_left_x, bottom_right_y, bottom_right_x)) # (left, upper, right, lower)
Image._show(cropped)
target_transform = np.array(data['objects'][0]['pose_transform_permuted'])
target_rotation = target_transform[:3, :3]
real_target_rotation = np.dot(target_rotation.T, p)
target_translation = np.zeros(3)
for i in range(3):
target_translation[i] = target_transform[3][i]
target_translation = target_translation
qua = np.array(data['objects'][0]['quaternion_xyzw'])
print("real_target_rotation")
print(real_target_rotation)
print(np.linalg.det(real_target_rotation))
'''
S_rot = quaternion_to_rotation_matrix(qua)
print(np.linalg.det(S_rot))
print(np.linalg.det(target_rotation))
print(np.linalg.det(fixed_rotation))
print(S_rot)
print(target_rotation.T)
'''
#target_rotation = changeDet(target_rotation)
points = xyz_vtx('./models/points_1.xyz')
points = o3d.io.read_triangle_mesh('./power_drill_with_model/035_power_drill/textured.obj')
points = np.array(points.vertices)
pcd = o3d.geometry.PointCloud()
pcd_o = o3d.geometry.PointCloud()
#target = np.dot(np.dot(points, fixed_rotation.T), (target_rotation).T)
def get_xprime(cx,cy,fx,fy,pt,depth):
point = np.zeros(2)
point[0] = (pt[1]-cx)/fx
point[1] = (pt[0]-cy)/fy
point = np.append(point, 1)
point = point*depth
# print(point)
point = point
return point
object_f = open(floder + '/' +'_object_settings.json','r')
object_data = json.load(object_f)
seg_id = object_data['exported_objects'][0]['segmentation_class_id']
image = cv2.imread(floder + '/' + index + '.depth.png', cv2.IMREAD_UNCHANGED)
print(np.array(image).shape)
seg = cv2.imread(floder + '/' + index + '.seg.png', cv2.IMREAD_UNCHANGED)
seg_arr = np.array(seg)
pts = []
for i in range(len(seg_arr)):
for j in range(len(seg_arr[0])):
if seg_arr[i][j] == seg_id:
pts.append([i, j])
#print(seg_arr)
#print(seg_arr.shape)
# cv2.imshow('dep',image)
# cv2.waitKey(0) # waits until a key is pressed
# cv2.destroyAllWindows()
# print(image.shape)
pc_whole = []
pc_whole_2 = []
for i in range(len(pts)):
u = pts[i][0]
v = pts[i][1]
depth = image[u][v]
twoD_point = np.array([u,v],dtype='float64')
threeD_point = get_xprime(cx,cy,fx,fy,twoD_point,depth)
pc_whole.append(threeD_point)
for u in range(image.shape[0]):
for v in range(image.shape[1]):
depth = image[u][v]
twoD_point = np.array([u, v], dtype='float64')
threeD_point = get_xprime(cx, cy, fx, fy, twoD_point, depth)
pc_whole_2.append(threeD_point)
scale = 100
FOR = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1, origin=[0,0,0])
mycloud = o3d.geometry.PointCloud()
mycloud.points = o3d.utility.Vector3dVector(pc_whole_2)
object_cloud = o3d.geometry.PointCloud()
object_cloud.points = o3d.utility.Vector3dVector(np.array(pc_whole) / 10000)
fixed_rotation/=100
fixed_translation/=100
target_translation/=100
print('fixed_rotation')
print(fixed_rotation)
print(fixed_translation)
print('real_target_rotation')
print(real_target_rotation)
print(target_translation)
print(np.linalg.det(fixed_rotation))
print(np.linalg.det(real_target_rotation))
model_fixed = points @ fixed_rotation.T + fixed_translation
print("model not fixed origin", np.mean(points, axis=0))
print("model fixed origin", np.mean(model_fixed, axis=0))
target = np.dot(model_fixed, real_target_rotation.T) + target_translation
pcd.points = o3d.utility.Vector3dVector(target)
pcd_o.points = o3d.utility.Vector3dVector(points)
o3d.io.write_point_cloud("target.ply", pcd)
o3d.io.write_point_cloud("projected.ply", object_cloud)
o3d.io.write_point_cloud("identity.ply", pcd_o)
#o3d.visualization.draw_geometries([FOR] + [pcd_o] + [pcd] + [object_cloud] )
'''
cam_scale = 1.0
pt2 = depth_masked / cam_scale
cam_cx =
cam_cy =
cam_fx =
cam_fy =
pt0 = []
pt1 = []
for x in range(rmin, rmax + 1):
for y in range(cmin, cmax + 1):
pt0.append(y - )
pt0 = (ymap_masked - cam_cx) * pt2 / cam_fx
pt1 = (xmap_masked - cam_cy) * pt2 / cam_fy
'''
'''
target = np.dot(points, S_rot.T)
pcd.points = o3d.utility.Vector3dVector(target)
o3d.visualization.draw_geometries([pcd])
#o3d.io.write_point_cloud('target.ply', pcd)
target = np.dot(np.dot(points, S_rot.T), fixed_rotation.T)
pcd.points = o3d.utility.Vector3dVector(target)
o3d.visualization.draw_geometries([pcd])
target = np.dot(np.dot(points, fixed_rotation.T), S_rot.T)
pcd.points = o3d.utility.Vector3dVector(target)
o3d.visualization.draw_geometries([pcd])
target = np.dot(np.dot(points, fixed_rotation), target_rotation)
pcd.points = o3d.utility.Vector3dVector(target)
o3d.visualization.draw_geometries([pcd])
'''
| [
"numpy.identity",
"numpy.mean",
"numpy.sqrt",
"numpy.finfo",
"PIL.Image._show",
"numpy.linalg.det",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"open3d.io.read_triangle_mesh",
"open3d.utility.Vector3dVector",
"open3d.geometry.PointCloud",
"open3d.io.write_point_cloud",
"js... | [((1285, 1329), 'numpy.array', 'np.array', (['[[0, 0, 1], [1, 0, 0], [0, -1, 0]]'], {}), '([[0, 0, 1], [1, 0, 0], [0, -1, 0]])\n', (1293, 1329), True, 'import numpy as np\n'), ((1536, 1559), 'json.load', 'json.load', (['fixed_config'], {}), '(fixed_config)\n', (1545, 1559), False, 'import json\n'), ((1578, 1646), 'numpy.array', 'np.array', (["fixed_data['exported_objects'][0]['fixed_model_transform']"], {}), "(fixed_data['exported_objects'][0]['fixed_model_transform'])\n", (1586, 1646), True, 'import numpy as np\n'), ((1710, 1721), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1718, 1721), True, 'import numpy as np\n'), ((1915, 1931), 'PIL.Image._show', 'Image._show', (['img'], {}), '(img)\n', (1926, 1931), False, 'from PIL import Image\n'), ((1989, 2001), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1998, 2001), False, 'import json\n'), ((2271, 2287), 'json.load', 'json.load', (['cam_f'], {}), '(cam_f)\n', (2280, 2287), False, 'import json\n'), ((2986, 3006), 'PIL.Image._show', 'Image._show', (['cropped'], {}), '(cropped)\n', (2997, 3006), False, 'from PIL import Image\n'), ((3026, 3081), 'numpy.array', 'np.array', (["data['objects'][0]['pose_transform_permuted']"], {}), "(data['objects'][0]['pose_transform_permuted'])\n", (3034, 3081), True, 'import numpy as np\n'), ((3148, 3176), 'numpy.dot', 'np.dot', (['target_rotation.T', 'p'], {}), '(target_rotation.T, p)\n', (3154, 3176), True, 'import numpy as np\n'), ((3198, 3209), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (3206, 3209), True, 'import numpy as np\n'), ((3328, 3375), 'numpy.array', 'np.array', (["data['objects'][0]['quaternion_xyzw']"], {}), "(data['objects'][0]['quaternion_xyzw'])\n", (3336, 3375), True, 'import numpy as np\n'), ((3769, 3856), 'open3d.io.read_triangle_mesh', 'o3d.io.read_triangle_mesh', (['"""./power_drill_with_model/035_power_drill/textured.obj"""'], {}), "(\n './power_drill_with_model/035_power_drill/textured.obj')\n", (3794, 3856), True, 'import open3d as o3d\n'), ((3861, 3886), 'numpy.array', 'np.array', (['points.vertices'], {}), '(points.vertices)\n', (3869, 3886), True, 'import numpy as np\n'), ((3894, 3919), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (3917, 3919), True, 'import open3d as o3d\n'), ((3928, 3953), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (3951, 3953), True, 'import open3d as o3d\n'), ((4339, 4358), 'json.load', 'json.load', (['object_f'], {}), '(object_f)\n', (4348, 4358), False, 'import json\n'), ((4437, 4506), 'cv2.imread', 'cv2.imread', (["(floder + '/' + index + '.depth.png')", 'cv2.IMREAD_UNCHANGED'], {}), "(floder + '/' + index + '.depth.png', cv2.IMREAD_UNCHANGED)\n", (4447, 4506), False, 'import cv2\n'), ((4542, 4609), 'cv2.imread', 'cv2.imread', (["(floder + '/' + index + '.seg.png')", 'cv2.IMREAD_UNCHANGED'], {}), "(floder + '/' + index + '.seg.png', cv2.IMREAD_UNCHANGED)\n", (4552, 4609), False, 'import cv2\n'), ((4620, 4633), 'numpy.array', 'np.array', (['seg'], {}), '(seg)\n', (4628, 4633), True, 'import numpy as np\n'), ((5485, 5562), 'open3d.geometry.TriangleMesh.create_coordinate_frame', 'o3d.geometry.TriangleMesh.create_coordinate_frame', ([], {'size': '(0.1)', 'origin': '[0, 0, 0]'}), '(size=0.1, origin=[0, 0, 0])\n', (5534, 5562), True, 'import open3d as o3d\n'), ((5571, 5596), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (5594, 5596), True, 'import open3d as o3d\n'), ((5614, 5652), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['pc_whole_2'], {}), '(pc_whole_2)\n', (5640, 5652), True, 'import open3d as o3d\n'), ((5668, 5693), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (5691, 5693), True, 'import open3d as o3d\n'), ((6341, 6375), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['target'], {}), '(target)\n', (6367, 6375), True, 'import open3d as o3d\n'), ((6391, 6425), 'open3d.utility.Vector3dVector', 'o3d.utility.Vector3dVector', (['points'], {}), '(points)\n', (6417, 6425), True, 'import open3d as o3d\n'), ((6427, 6470), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['"""target.ply"""', 'pcd'], {}), "('target.ply', pcd)\n", (6451, 6470), True, 'import open3d as o3d\n'), ((6471, 6526), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['"""projected.ply"""', 'object_cloud'], {}), "('projected.ply', object_cloud)\n", (6495, 6526), True, 'import open3d as o3d\n'), ((6527, 6574), 'open3d.io.write_point_cloud', 'o3d.io.write_point_cloud', (['"""identity.ply"""', 'pcd_o'], {}), "('identity.ply', pcd_o)\n", (6551, 6574), True, 'import open3d as o3d\n'), ((406, 419), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (414, 419), True, 'import numpy as np\n'), ((615, 628), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (623, 628), True, 'import numpy as np\n'), ((802, 814), 'numpy.dot', 'np.dot', (['q', 'q'], {}), '(q, q)\n', (808, 814), True, 'import numpy as np\n'), ((906, 920), 'numpy.outer', 'np.outer', (['q', 'q'], {}), '(q, q)\n', (914, 920), True, 'import numpy as np\n'), ((936, 1165), 'numpy.array', 'np.array', (['[[1.0 - q[2, 2] - q[3, 3], q[1, 2] + q[3, 0], q[1, 3] - q[2, 0]], [q[1, 2] -\n q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] + q[1, 0]], [q[1, 3] + q[2, 0\n ], q[2, 3] - q[1, 0], 1.0 - q[1, 1] - q[2, 2]]]'], {'dtype': 'q.dtype'}), '([[1.0 - q[2, 2] - q[3, 3], q[1, 2] + q[3, 0], q[1, 3] - q[2, 0]],\n [q[1, 2] - q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] + q[1, 0]], [q[1, \n 3] + q[2, 0], q[2, 3] - q[1, 0], 1.0 - q[1, 1] - q[2, 2]]], dtype=q.dtype)\n', (944, 1165), True, 'import numpy as np\n'), ((1262, 1278), 'numpy.linalg.det', 'np.linalg.det', (['t'], {}), '(t)\n', (1275, 1278), True, 'import numpy as np\n'), ((1364, 1380), 'numpy.linalg.det', 'np.linalg.det', (['p'], {}), '(p)\n', (1377, 1380), True, 'import numpy as np\n'), ((3440, 3475), 'numpy.linalg.det', 'np.linalg.det', (['real_target_rotation'], {}), '(real_target_rotation)\n', (3453, 3475), True, 'import numpy as np\n'), ((4081, 4092), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (4089, 4092), True, 'import numpy as np\n'), ((4163, 4182), 'numpy.append', 'np.append', (['point', '(1)'], {}), '(point, 1)\n', (4172, 4182), True, 'import numpy as np\n'), ((5076, 5109), 'numpy.array', 'np.array', (['[u, v]'], {'dtype': '"""float64"""'}), "([u, v], dtype='float64')\n", (5084, 5109), True, 'import numpy as np\n'), ((6001, 6030), 'numpy.linalg.det', 'np.linalg.det', (['fixed_rotation'], {}), '(fixed_rotation)\n', (6014, 6030), True, 'import numpy as np\n'), ((6038, 6073), 'numpy.linalg.det', 'np.linalg.det', (['real_target_rotation'], {}), '(real_target_rotation)\n', (6051, 6073), True, 'import numpy as np\n'), ((6169, 6192), 'numpy.mean', 'np.mean', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (6176, 6192), True, 'import numpy as np\n'), ((6222, 6250), 'numpy.mean', 'np.mean', (['model_fixed'], {'axis': '(0)'}), '(model_fixed, axis=0)\n', (6229, 6250), True, 'import numpy as np\n'), ((6263, 6306), 'numpy.dot', 'np.dot', (['model_fixed', 'real_target_rotation.T'], {}), '(model_fixed, real_target_rotation.T)\n', (6269, 6306), True, 'import numpy as np\n'), ((858, 872), 'numpy.identity', 'np.identity', (['(4)'], {}), '(4)\n', (869, 872), True, 'import numpy as np\n'), ((883, 899), 'numpy.sqrt', 'np.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (890, 899), True, 'import numpy as np\n'), ((4513, 4528), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (4521, 4528), True, 'import numpy as np\n'), ((5322, 5355), 'numpy.array', 'np.array', (['[u, v]'], {'dtype': '"""float64"""'}), "([u, v], dtype='float64')\n", (5330, 5355), True, 'import numpy as np\n'), ((5743, 5761), 'numpy.array', 'np.array', (['pc_whole'], {}), '(pc_whole)\n', (5751, 5761), True, 'import numpy as np\n'), ((824, 841), 'numpy.finfo', 'np.finfo', (['q.dtype'], {}), '(q.dtype)\n', (832, 841), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.