input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
copy(self, site_properties=None, sanitize=False):
"""
Convenience method to get a copy of the structure, with options to add
site properties.
Args:
site_properties (dict): Properties to add or override. The
properties are specified in the same way as the constructor,
i.e., as a dict of the form {property: [values]}. The
properties should be in the order of the *original* structure
if you are performing sanitization.
sanitize (bool): If True, this method will return a sanitized
structure. Sanitization performs a few things: (i) The sites are
sorted by electronegativity, (ii) a LLL lattice reduction is
carried out to obtain a relatively orthogonalized cell,
(iii) all fractional coords for sites are mapped into the
unit cell.
Returns:
A copy of the Structure, with optionally new site_properties and
optionally sanitized.
"""
props = self.site_properties
if site_properties:
props.update(site_properties)
if not sanitize:
return self.__class__(self._lattice,
self.species_and_occu,
self.frac_coords,
site_properties=props)
else:
reduced_latt = self._lattice.get_lll_reduced_lattice()
new_sites = []
for i, site in enumerate(self):
frac_coords = reduced_latt.get_fractional_coords(site.coords)
site_props = {}
for p in props:
site_props[p] = props[p][i]
new_sites.append(PeriodicSite(site.species_and_occu,
frac_coords, reduced_latt,
to_unit_cell=True,
properties=site_props))
new_sites = sorted(new_sites)
return self.__class__.from_sites(new_sites)
def interpolate(self, end_structure, nimages=10,
interpolate_lattices=False, pbc=True):
"""
Interpolate between this structure and end_structure. Useful for
construction of NEB inputs.
Args:
end_structure (Structure): structure to interpolate between this
structure and end.
nimages (int): No. of interpolation images. Defaults to 10 images.
interpolate_lattices (bool): Whether to interpolate the lattices.
Interpolates the lengths and angles (rather than the matrix)
so orientation may be affected.
pbc (bool): Whether to use periodic boundary conditions to find
the shortest path between endpoints.
Returns:
List of interpolated structures. The starting and ending
structures included as the first and last structures respectively.
A total of (nimages + 1) structures are returned.
"""
#Check length of structures
if len(self) != len(end_structure):
raise ValueError("Structures have different lengths!")
if interpolate_lattices:
#interpolate lattices
lstart = np.array(self.lattice.lengths_and_angles)
lend = np.array(end_structure.lattice.lengths_and_angles)
lvec = lend - lstart
#Check that both structures have the same lattice
elif not self.lattice == end_structure.lattice:
raise ValueError("Structures with different lattices!")
#Check that both structures have the same species
for i in range(0, len(self)):
if self[i].species_and_occu != end_structure[i].species_and_occu:
raise ValueError("Different species!\nStructure 1:\n" +
str(self) + "\nStructure 2\n" +
str(end_structure))
start_coords = np.array(self.frac_coords)
end_coords = np.array(end_structure.frac_coords)
vec = end_coords - start_coords
if pbc:
vec -= np.round(vec)
sp = self.species_and_occu
structs = []
for x in range(nimages+1):
if interpolate_lattices:
l_a = lstart + x / nimages * lvec
l = Lattice.from_lengths_and_angles(*l_a)
else:
l = self.lattice
fcoords = start_coords + x / nimages * vec
structs.append(self.__class__(l, sp, fcoords,
site_properties=self.site_properties))
return structs
def get_primitive_structure(self, tolerance=0.25):
"""
This finds a smaller unit cell than the input. Sometimes it doesn"t
find the smallest possible one, so this method is recursively called
until it is unable to find a smaller cell.
The method works by finding possible smaller translations
and then using that translational symmetry instead of one of the
lattice basis vectors if more than one vector is found (usually the
case for large cells) the one with the smallest norm is used.
Things are done in fractional coordinates because its easier to
translate back to the unit cell.
NOTE: if the tolerance is greater than 1/2 the minimum inter-site
distance, the algorithm may find 2 non-equivalent sites that are
within tolerance of each other. The algorithm will reject this
lattice.
Args:
tolerance (float): Tolerance for each coordinate of a particular
site. For example, [0.5, 0, 0.5] in cartesian coordinates
will be considered to be on the same coordinates as
[0, 0, 0] for a tolerance of 0.5. Defaults to 0.5.
Returns:
The most primitive structure found. The returned structure is
guaranteed to have len(new structure) <= len(structure).
"""
original_volume = self.volume
#get the possible symmetry vectors
sites = sorted(self._sites, key=lambda site: site.species_string)
grouped_sites = [list(a[1]) for a
in itertools.groupby(sites,
key=lambda s: s.species_string)]
num_fu = reduce(gcd, map(len, grouped_sites))
min_vol = original_volume * 0.5 / num_fu
min_site_list = min(grouped_sites, key=lambda group: len(group))
min_site_list = [site.to_unit_cell for site in min_site_list]
org = min_site_list[0].coords
possible_vectors = [min_site_list[i].coords - org
for i in range(1, len(min_site_list))]
#Let's try to use the shortest vector possible first. Allows for faster
#convergence to primitive cell.
possible_vectors = sorted(possible_vectors,
key=lambda x: np.linalg.norm(x))
# Pre-create a few varibles for faster lookup.
all_coords = [site.coords for site in sites]
all_sp = [site.species_and_occu for site in sites]
new_structure = None
#all lattice points need to be projected to 0 under new basis
l_points = np.array([[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0],
[1, 0, 1], [1, 1, 0], [1, 1, 1]])
l_points = self._lattice.get_cartesian_coords(l_points)
for v, repl_pos in itertools.product(possible_vectors, range(3)):
#Try combinations of new lattice vectors with existing lattice
#vectors.
latt = self._lattice.matrix
latt[repl_pos] = v
#Exclude coplanar lattices from consideration.
if abs(np.dot(np.cross(latt[0], latt[1]), latt[2])) < min_vol:
continue
latt = Lattice(latt)
#Convert to fractional tol
tol = tolerance / np.array(latt.abc)
#check validity of new basis
new_l_points = latt.get_fractional_coords(l_points)
f_l_dist = np.abs(new_l_points - np.round(new_l_points))
if np.any(f_l_dist > tol[None, None, :]):
continue
all_frac = latt.get_fractional_coords(np.array(all_coords))
#calculate grouping of equivalent sites, represented by
#adjacency matrix
fdist = all_frac[None, :, :] - all_frac[:, None, :]
fdist = np.abs(fdist - np.round(fdist))
groups = np.all(fdist < tol[None, None, :], axis=2)
#check that all group sizes are the same
sizes = np.unique(np.sum(groups, axis=0))
if len(sizes) > 1:
continue
#check that reduction in number of sites was by the same
#amount as the volume reduction
if round(self._lattice.volume / latt.volume) != sizes[0]:
continue
new_sp = []
new_frac = []
#this flag is set to ensure that all sites in a group are
#the same species, it is set to false if a group is found
#where this is not the case
correct = True
added = np.zeros(len(groups), dtype='bool')
for i, g in enumerate(groups):
if added[i]:
continue
indices = np.where(g)[0]
i0 = indices[0]
sp = all_sp[i0]
added[indices] = 1
if not all([all_sp[i] == sp for i in indices]):
correct = False
break
new_sp.append(all_sp[i0])
new_frac.append(all_frac[i0])
if correct:
new_structure = self.__class__(
latt, new_sp, new_frac, to_unit_cell=True)
break
if new_structure and len(new_structure) != len(self):
# If a more primitive structure has been found, try to find an
# even more primitive structure again.
return new_structure.get_primitive_structure(tolerance=tolerance)
else:
return self
def __repr__(self):
outs = ["Structure Summary", repr(self.lattice)]
for s in self:
outs.append(repr(s))
return "\n".join(outs)
def __str__(self):
outs = ["Structure Summary ({s})".format(s=str(self.composition)),
"Reduced Formula: {}"
.format(self.composition.reduced_formula)]
to_s = lambda x: "%0.6f" % x
outs.append("abc : " + " ".join([to_s(i).rjust(10)
for i in self.lattice.abc]))
outs.append("angles: " + " ".join([to_s(i).rjust(10)
for i in self.lattice.angles]))
outs.append("Sites ({i})".format(i=len(self)))
for i, site in enumerate(self):
outs.append(" ".join([str(i + 1), site.species_string,
" ".join([to_s(j).rjust(12)
for j in site.frac_coords])]))
return "\n".join(outs)
@property
def to_dict(self):
"""
Json-serializable dict representation of Structure
"""
latt_dict = self._lattice.to_dict
del latt_dict["@module"]
del latt_dict["@class"]
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"lattice": latt_dict, "sites": []}
for site in self:
site_dict = site.to_dict
del site_dict["lattice"]
del site_dict["@module"]
del site_dict["@class"]
d["sites"].append(site_dict)
return d
@classmethod
def from_dict(cls, d):
"""
Reconstitute a Structure object from a dict representation of Structure
created using to_dict.
Args:
d (dict): Dict representation of structure.
Returns:
Structure object
"""
lattice = Lattice.from_dict(d["lattice"])
sites = [PeriodicSite.from_dict(sd, lattice) for sd in d["sites"]]
return cls.from_sites(sites)
def dot(self, coords_a, coords_b, space="r", frac_coords=False):
"""
Compute the scalar product of vector(s) either in real space or
reciprocal space.
Args:
coords (3x1 array): Array-like object with the coordinates.
space (str): "r" for real space, "g" for reciprocal space.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
Returns:
one-dimensional `numpy` array.
"""
lattice = {"r": self.lattice,
"g": self.reciprocal_lattice}[space.lower()]
return lattice.dot(coords_a, coords_b, frac_coords=frac_coords)
def norm(self, coords, space="r", frac_coords=True):
"""
Compute the norm of vector(s) either in real space or reciprocal space.
Args:
coords (3x1 array): Array-like object with the coordinates.
space (str): "r" for real space, "g" for reciprocal space.
frac_coords (bool): Whether the vector corresponds to fractional or
cartesian coordinates.
Returns:
one-dimensional `numpy` array.
"""
return np.sqrt(self.dot(coords, | |
# pylint: disable=line-too-long,too-many-lines,missing-docstring
"""Something-something-v2 action classification dataset."""
import os
import numpy as np
from mxnet import nd
from mxnet.gluon.data import dataset
__all__ = ['SomethingSomethingV2']
class SomethingSomethingV2(dataset.Dataset):
"""Load the something-something-v2 action recognition dataset.
Refer to :doc:`../build/examples_datasets/something-something-v2` for the description of
this dataset and how to prepare it.
Parameters
----------
root : str, default '~/.mxnet/datasets/somethingsomethingv2/20bn-something-something-v2-frames'
Path to the folder stored the dataset.
setting : str, required
Config file of the prepared dataset.
train : bool, default True
Whether to load the training or validation set.
test_mode : bool, default False
Whether to perform evaluation on the test set
name_pattern : str, default None
The naming pattern of the decoded video frames.
For example, 000012.jpg
video_ext : str, default 'mp4'
If video_loader is set to True, please specify the video format accordinly.
is_color : bool, default True
Whether the loaded image is color or grayscale
modality : str, default 'rgb'
Input modalities, we support only rgb video frames for now.
Will add support for rgb difference image and optical flow image later.
num_segments : int, default 1
Number of segments to evenly divide the video into clips.
A useful technique to obtain global video-level information.
<NAME>, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016
new_length : int, default 1
The length of input video clip. Default is a single image, but it can be multiple video frames.
For example, new_length=16 means we will extract a video clip of consecutive 16 frames.
new_step : int, default 1
Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.
new_step=2 means we will extract a video clip of every other frame.
new_width : int, default 340
Scale the width of loaded image to 'new_width' for later multiscale cropping and resizing.
new_height : int, default 256
Scale the height of loaded image to 'new_height' for later multiscale cropping and resizing.
target_width : int, default 224
Scale the width of transformed image to the same 'target_width' for batch forwarding.
target_height : int, default 224
Scale the height of transformed image to the same 'target_height' for batch forwarding.
temporal_jitter : bool, default False
Whether to temporally jitter if new_step > 1.
video_loader : bool, default False
Whether to use video loader to load data.
use_decord : bool, default True
Whether to use Decord video loader to load data. Otherwise use mmcv video loader.
transform : function, default None
A function that takes data and label and transforms them.
"""
def __init__(self,
root=os.path.expanduser('~/.mxnet/datasets/somethingsomethingv2/20bn-something-something-v2-frames'),
setting=os.path.expanduser('~/.mxnet/datasets/somethingsomethingv2/train_videofolder.txt'),
train=True,
test_mode=False,
name_pattern='%06d.jpg',
video_ext='mp4',
is_color=True,
modality='rgb',
num_segments=1,
new_length=1,
new_step=1,
new_width=340,
new_height=256,
target_width=224,
target_height=224,
temporal_jitter=False,
video_loader=False,
use_decord=False,
transform=None):
super(SomethingSomethingV2, self).__init__()
from ...utils.filesystem import try_import_cv2, try_import_decord, try_import_mmcv
self.cv2 = try_import_cv2()
self.root = root
self.setting = setting
self.train = train
self.test_mode = test_mode
self.is_color = is_color
self.modality = modality
self.num_segments = num_segments
self.new_height = new_height
self.new_width = new_width
self.new_length = new_length
self.new_step = new_step
self.skip_length = self.new_length * self.new_step
self.target_height = target_height
self.target_width = target_width
self.transform = transform
self.temporal_jitter = temporal_jitter
self.name_pattern = name_pattern
self.video_loader = video_loader
self.video_ext = video_ext
self.use_decord = use_decord
if self.video_loader:
if self.use_decord:
self.decord = try_import_decord()
else:
self.mmcv = try_import_mmcv()
self.clips = self._make_dataset(root, setting)
if len(self.clips) == 0:
raise(RuntimeError("Found 0 video clips in subfolders of: " + root + "\n"
"Check your data directory (opt.data-dir)."))
def __getitem__(self, index):
directory, duration, target = self.clips[index]
if self.video_loader:
if self.use_decord:
decord_vr = self.decord.VideoReader('{}.{}'.format(directory, self.video_ext), width=self.new_width, height=self.new_height)
duration = len(decord_vr)
else:
mmcv_vr = self.mmcv.VideoReader('{}.{}'.format(directory, self.video_ext))
duration = len(mmcv_vr)
if self.train and not self.test_mode:
segment_indices, skip_offsets = self._sample_train_indices(duration)
elif not self.train and not self.test_mode:
segment_indices, skip_offsets = self._sample_val_indices(duration)
else:
segment_indices, skip_offsets = self._sample_test_indices(duration)
# N frames of shape H x W x C, where N = num_oversample * num_segments * new_length
if self.video_loader:
if self.use_decord:
clip_input = self._video_TSN_decord_batch_loader(directory, decord_vr, duration, segment_indices, skip_offsets)
else:
clip_input = self._video_TSN_mmcv_loader(directory, mmcv_vr, duration, segment_indices, skip_offsets)
else:
clip_input = self._image_TSN_cv2_loader(directory, duration, segment_indices, skip_offsets)
if self.transform is not None:
clip_input = self.transform(clip_input)
clip_input = np.stack(clip_input, axis=0)
clip_input = clip_input.reshape((-1,) + (self.new_length, 3, self.target_height, self.target_width))
clip_input = np.transpose(clip_input, (0, 2, 1, 3, 4))
if self.new_length == 1:
clip_input = np.squeeze(clip_input, axis=2) # this is for 2D input case
return nd.array(clip_input), target
def __len__(self):
return len(self.clips)
def _make_dataset(self, directory, setting):
if not os.path.exists(setting):
raise(RuntimeError("Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " % (setting)))
clips = []
with open(setting) as split_f:
data = split_f.readlines()
for line in data:
line_info = line.split()
# line format: video_path, video_duration, video_label
if len(line_info) < 3:
raise(RuntimeError('Video input format is not correct, missing one or more element. %s' % line))
clip_path = os.path.join(directory, line_info[0])
duration = int(line_info[1])
target = int(line_info[2])
item = (clip_path, duration, target)
clips.append(item)
return clips
def _sample_train_indices(self, num_frames):
average_duration = (num_frames - self.skip_length + 1) // self.num_segments
if average_duration > 0:
offsets = np.multiply(list(range(self.num_segments)),
average_duration)
offsets = offsets + np.random.randint(average_duration,
size=self.num_segments)
elif num_frames > max(self.num_segments, self.skip_length):
offsets = np.sort(np.random.randint(
num_frames - self.skip_length + 1,
size=self.num_segments))
else:
offsets = np.zeros((self.num_segments,))
if self.temporal_jitter:
skip_offsets = np.random.randint(
self.new_step, size=self.skip_length // self.new_step)
else:
skip_offsets = np.zeros(
self.skip_length // self.new_step, dtype=int)
return offsets + 1, skip_offsets
def _sample_val_indices(self, num_frames):
if num_frames > self.num_segments + self.skip_length - 1:
tick = (num_frames - self.skip_length + 1) / \
float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x)
for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
if self.temporal_jitter:
skip_offsets = np.random.randint(
self.new_step, size=self.skip_length // self.new_step)
else:
skip_offsets = np.zeros(
self.skip_length // self.new_step, dtype=int)
return offsets + 1, skip_offsets
def _sample_test_indices(self, num_frames):
if num_frames > self.skip_length - 1:
tick = (num_frames - self.skip_length + 1) / \
float(self.num_segments)
offsets = np.array([int(tick / 2.0 + tick * x)
for x in range(self.num_segments)])
else:
offsets = np.zeros((self.num_segments,))
if self.temporal_jitter:
skip_offsets = np.random.randint(
self.new_step, size=self.skip_length // self.new_step)
else:
skip_offsets = np.zeros(
self.skip_length // self.new_step, dtype=int)
return offsets + 1, skip_offsets
def _image_TSN_cv2_loader(self, directory, duration, indices, skip_offsets):
sampled_list = []
for seg_ind in indices:
offset = int(seg_ind)
for i, _ in enumerate(range(0, self.skip_length, self.new_step)):
if offset + skip_offsets[i] <= duration:
frame_path = os.path.join(directory, self.name_pattern % (offset + skip_offsets[i]))
else:
frame_path = os.path.join(directory, self.name_pattern % (offset))
cv_img = self.cv2.imread(frame_path)
if cv_img is None:
raise(RuntimeError("Could not load file %s starting at frame %d. Check data path." % (frame_path, offset)))
if self.new_width > 0 and self.new_height > 0:
h, w, _ = cv_img.shape
if h != self.new_height or w != self.new_width:
cv_img = self.cv2.resize(cv_img, (self.new_width, self.new_height))
cv_img = cv_img[:, :, ::-1]
sampled_list.append(cv_img)
if offset + self.new_step < duration:
offset += self.new_step
return sampled_list
def _video_TSN_mmcv_loader(self, directory, video_reader, duration, indices, skip_offsets):
sampled_list = []
for seg_ind in indices:
offset = int(seg_ind)
for i, _ in enumerate(range(0, self.skip_length, self.new_step)):
try:
if offset + skip_offsets[i] <= duration:
vid_frame = video_reader[offset + skip_offsets[i] - 1]
else:
vid_frame = video_reader[offset - 1]
except:
raise RuntimeError('Error occured in reading frames from video {} of duration {}.'.format(directory, duration))
if self.new_width > 0 and self.new_height > 0:
h, w, _ = vid_frame.shape
if h != self.new_height or w != self.new_width:
vid_frame = self.cv2.resize(vid_frame, (self.new_width, self.new_height))
vid_frame = vid_frame[:, :, ::-1]
sampled_list.append(vid_frame)
if offset + self.new_step < duration:
offset += self.new_step
return sampled_list
def _video_TSN_decord_loader(self, directory, video_reader, duration, indices, skip_offsets):
sampled_list = []
for seg_ind in indices:
offset = int(seg_ind)
for i, _ in enumerate(range(0, self.skip_length, self.new_step)):
try:
if offset + skip_offsets[i] <= duration:
vid_frame = video_reader[offset + skip_offsets[i] - 1].asnumpy()
else:
vid_frame = video_reader[offset - 1].asnumpy()
except:
raise RuntimeError('Error occured in reading frames from video {} of duration {}.'.format(directory, duration))
sampled_list.append(vid_frame)
if offset + self.new_step < duration:
offset += self.new_step
return sampled_list
def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, indices, skip_offsets):
sampled_list = []
frame_id_list = []
for seg_ind in indices:
offset = int(seg_ind)
for i, _ in enumerate(range(0, self.skip_length, self.new_step)):
if offset + skip_offsets[i] <= duration:
frame_id = offset + skip_offsets[i]
else:
frame_id = offset
frame_id_list.append(frame_id)
if offset + self.new_step < duration:
offset += self.new_step
try:
video_data = video_reader.get_batch(frame_id_list).asnumpy()
sampled_list | |
import os
from numpy import array,zeros
from generic import obj
from simulation import Simulation,SimulationAnalyzer
from vasp_input import VaspInput
from developer import DevBase
from debug import *
# vasp xml reader classes/functions
class VXML(DevBase):
basic_types = set('i v dimension field set time'.split())
data_types = obj(int=int,string=str,float=float)
def __init__(self,tag,attr=None):
self._tag = tag
self._lines = []
self._attr = None
self._value = None
if attr!=None and len(attr)>0:
self._attr = obj()
tokens = attr.split('"')
next=0
for token in tokens:
next+=1
if len(token)>0 and token[-1]=='=':
name = token.strip()[:-1]
self._attr[name]=tokens[next].replace(' ','_').replace('-','_').lower()
#end if
#end for
#end if
#end def __init__
def _is_empty(self):
return len(self)-4==0
#end def _is_empty
def _add(self,new):
tag = new._tag
if not tag in self:
self[tag] = new
else:
cur = self[tag]
if 0 in cur:
cur._append(new)
else:
coll = VXMLcoll(tag)
coll._append(cur)
coll._append(new)
self[tag] = coll
#end if
#end if
#end def _add
def _parse(self):
# rename sub objects if name is present
for name in list(self.keys()):
value = self[name]
if isinstance(value,VXML):
if value._attr!=None and 'name' in value._attr:
del self[name]
self[value._attr.name] = value
del value._attr.name
elif isinstance(value,VXMLcoll):
for n in list(value.keys()):
v = value[n]
if isinstance(v,VXML):
if v._attr!=None and 'name' in v._attr:
del value[n]
self[v._attr.name] = v
del v._attr.name
#end if
#end if
#end for
if len(value)==0:
del self[name]
else:
value._reorder()
#end if
#end if
#end if
#end for
# have all sub objects parse (read fields, set _value)
for v in self:
if isinstance(v,VXML):
v._parse()
#end if
#end for
lines = self._lines
if len(lines)>0:
#fail = False
#try:
if self._tag=='array':
self._parse_array(lines)
else:
self._parse_values(lines)
#end if
#except Exception,e:
# print '_parse failed!'
# print e
# print self
# print tag
# print attr
# print lines[0:min(10,len(lines))]
# print
# print
# fail = True
##end try
#if fail:
# self.error('parse failed please see information above','read_vxml')
##end if
#end if
# if sub-objects resolve to a value, replace with that value
for name in list(self.keys()):
value = self[name]
if isinstance(value,VXML) and value._value!=None:
self[name] = value._value
#end if
#end for
# assign attributes
if self._attr!=None:
if 'type' in self._attr:
del self._attr.type
#end if
self.transfer_from(self._attr)
#end if
return
#end def _parse
def _parse_values(self,lines):
if len(lines)==1 and not '<' in lines[0]:
self._value = readval(lines[0])
else:
arr = None
for line in lines:
start = line.startswith('<') and not line.startswith('</')
end = line.endswith('>')
if start:
fline = line
else:
fline += line
#end if
if end:
tokens = fline.replace('<','|').replace('>','|').split('|')
tn = tokens[1]
val = readval(tokens[2].strip())
if 'name=' in tn:
name = tn.split('name="',1)[1].split('"')[0].lower()
self[name] = val
elif arr is None:
arr = [val]
else:
arr.append(val)
#end if
#end if
#end for
if arr!=None:
self._value = array(arr)
#end if
#end if
#end def parse_values
def _parse_array(self,lines):
#print 'parsing array'
dims = obj()
fields = obj()
dim_counts = None
field_list = []
level = -1
set_dims = False
for line in lines:
if line.startswith('<'):
if line.startswith('<dimension'):
tokens = line.replace('<','|').replace('>','|').split('|')
tn = tokens[1]
dname = tokens[2].lower().replace(' ','_').replace('-','_')
if 'dim=' in tn:
d = int(tn.split('dim="',1)[1].split('"')[0])
dims[d] = dname
else:
dims.append(dname)
#end if
elif line.startswith('<field'):
tokens = line.replace('<','|').replace('>','|').split('|')
tn = tokens[1]
fname = tokens[2].lower().replace(' ','_').replace('-','_')
if 'type=' in tn:
t = tn.split('type="',1)[1].split('"')[0]
if t in VXML.data_types:
dtype = VXML.data_types[t]
else:
self.error('field type {0} is unrecognized: {1}'.format(t,line))
#end if
else:
dtype = float
#end if
fields.append(obj(name=fname,dtype=dtype))
elif line.startswith('<set'):
if not set_dims:
dims = dims.list()
dims.reverse()
dims = tuple(dims)
dim_counts = zeros((len(dims),),dtype=int)
set_dims = True
#end if
level += 1
dim_counts[level]=0
elif line.startswith('</set'):
level -= 1
if level!=-1:
dim_counts[level]+=1
#end if
else:
self.error('array parsing failed\n unrecognized xml encountered: {0}'.format(line),'read_vxml')
#end if
else:
dim_counts[level]+=1
field_list.append(line.split())
#end if
#end for
self.dims = dims
for findex,field in fields.iteritems():
lst = []
for field_vals in field_list:
lst.append(field_vals[findex])
#end for
arr = array(lst,dtype=field.dtype).ravel()
arr.shape = tuple(dim_counts)
self[field.name] = arr
#end for
#print ' done'
#end def _parse_array
def _remove_empty(self):
for n in list(self.keys()):
v = self[n]
if isinstance(v,VXML):
v._remove_empty()
if isinstance(v,VXMLcoll) and v._is_empty():
del self[n]
#end if
#end if
#end for
#end def _remove_empty
def _remove_hidden(self):
del self._tag
del self._attr
del self._lines
del self._value
for v in self:
if isinstance(v,VXML):
v._remove_hidden()
#end if
#end for
#end def _remove_hidden()
#end class VXML
class VXMLcoll(VXML):
def _append(self,new):
index = len(self)-4
self[index]=new
#end def _append
def _reorder(self):
n=0
for key in sorted(self.keys()):
value = self[key]
if isinstance(value,VXML):
del self[key]
self[n]=value
n+=1
#end if
#end for
#end def _reorder
#end class VXMLcoll
booldict = dict(T=True,F=False)
def readval(val):
fail = False
split = False
if isinstance(val,str):
split = ' ' in val
#end if
if isinstance(val,list) or split:
if split:
val = val.split()
#end if
try:
v = array(val,dtype=int)
except:
try:
v = array(val,dtype=float)
except:
try:
v = array(val,dtype=str)
except:
fail = True
#end try
#end try
#end try
elif val in booldict:
v = booldict[val]
else:
try:
v = int(val)
except:
try:
v = float(val)
except:
v = val
#end try
#end try
#end if
if fail:
VXML.class_error('failed to read value: "{0}"'.format(val),'read_vxml')
#end if
return v
#end def readval
def read_vxml(filepath):
if not os.path.exists(filepath):
VXML.class_error('file {0} does not exist'.format(filepath),'read_vxml')
#end if
#print 'read'
contents = open(filepath,'r').read()
#print 'replace'
contents = contents.replace('<rc>',' ').replace('</rc>',' ')
contents = contents.replace('<r>' ,' ').replace('</r>' ,' ')
contents = contents.replace('<c>' ,' ').replace('</c>' ,' ')
#print 'split lines'
lines = contents.splitlines()
#print 'process lines'
root = VXML('vasprun')
stack = [root]
cur = stack[0]
for line in lines:
ls = line.strip()
if ls.startswith('</'):
tag = ls[2:-1]
if tag==cur._tag:
stack.pop()
cur = stack[-1]
#print len(stack)*' '+'end '+tag
else:
cur._lines.append(ls)
#end if
elif ls.startswith('<?'):
None
elif ls.startswith('<'):
ta,rest = ls[1:].split('>',1)
tokens = ta.split(' ',1)
tag = tokens[0]
if not tag in VXML.basic_types:
if len(tokens)==1:
attr = None
else:
attr = tokens[1].strip()
#end if
if ls.endswith('</{0}>'.format(tag)):
new = VXML(tag,attr)
new._lines.append(ls.replace('<','|').replace('>','|').split('|')[2])
cur._add(new)
#print len(stack)*' '+'end '+tag
else:
#print len(stack)*' '+tag
new = VXML(tag,attr)
cur._add(new)
cur = new
stack.append(cur)
#end if
else:
cur._lines.append(ls)
#end if
else:
cur._lines.append(ls)
#end if
#end for
if len(stack)!=1:
VXML.class_error('read failed\nxml tree did not seem to close')
#end if
#print 'parse'
root._parse()
root._remove_empty()
root._remove_hidden()
#print 'done'
return root
#end def read_vxml
# vasp outcar functions
class VaspLines(DevBase):
def __init__(self,lines):
self.pointer = 0
self.lines = lines
#end def __init__
def advance_line(self,amount):
self.pointer += amount
return self.lines[self.pointer]
#end def advance_line
def advance_token(self,token):
psave = self.pointer
for line in self.lines[self.pointer:]:
if token in line:
return line
#end if
self.pointer += 1
#end while
self.pointer = psave
return None
#end def advance
def advance(self,amount):
self.pointer += amount
#end def advance
def remainder(self):
return self.lines[self.pointer:]
#end def remainder
def rewind(self,point=0):
self.pointer = point
#end def rewind
def get_line(self,point=None):
if point is None:
point = self.pointer
#end if
return self.lines[point]
#end def get_line
def get_line_ahead(self,nahead):
return self.lines[self.pointer+nahead]
#end def get_line_ahead
#end class VaspLines
def read_outcar_header_values(vlines,odata):
line = vlines.advance_token('TOTEN')
odata.total_energy = float(line.split()[4])
vlines.advance_token('energy without entropy')
#end def read_outcar_header_values
def read_outcar_core_potentials(vlines,odata):
line = vlines.advance_token('the test charge radii are')
odata.core_potential_radii = array(line.split()[5:],dtype=float)
vlines.advance(2)
n = 0
cpots = []
for line in vlines.remainder():
ls = line.strip()
n+=1
if len(ls)==0:
break
#end if
tokens = line.replace('-',' -').split()
cpots.extend(tokens[1::2])
#end for
odata.core_potentials = array(cpots,dtype=float)
vlines.advance(n)
#end def read_outcar_core_potentials
def read_outcar_fermi_energy(vlines,odata):
line = vlines.advance_token('E-fermi')
odata.Efermi = float(line.split()[2])
#end def read_outcar_fermi_energy
def read_outcar_bands(vlines,odata):
bands = obj()
line = vlines.advance_token('spin component')
if line!=None:
last_empty = True
n = 0
for line in vlines.remainder():
if len(line)>2:
if line[1]=='s':
ns = int(line.split()[2])
spin = obj()
bands[ns] = spin
elif line[1]=='k':
tokens = line.split()
nk = int(tokens[1])
kp = array(tokens[3:],dtype=float)
kpoint = obj(kpoint=kp,energies=[],occupations=[])
spin[nk]=kpoint
elif line[2]=='b':
None
else:
bnum,energy,occ = line.split()
kpoint.energies.append(float(energy))
kpoint.occupations.append(float(occ))
#end if
last_empty = False
else:
if last_empty:
break
#end if
last_empty = True
#end if
n+=1
#end for
vlines.advance(n)
#end if
for ns,spin in bands.iteritems():
for nk,kpoint in spin.iteritems():
kpoint.energies = array(kpoint.energies,dtype=float)
kpoint.occupations = array(kpoint.occupations,dtype=float)
#end for
#end for
odata.bands = bands
#end def read_outcar_bands
def read_outcar_charge_mag(vlines,odata,token):
ion = obj(s=[],p=[],d=[],tot=[])
total = obj()
vlines.advance_token(token)
vlines.advance(4)
prev_end = False
n=0
for line in vlines.remainder():
n+=1
if prev_end:
break
#end if
if line[0]=='-':
prev_end = True
else:
vals = array(line.split()[1:],dtype=float)
ion.s.append(vals[0])
ion.p.append(vals[1])
ion.d.append(vals[2])
ion.tot.append(vals[3])
#end if
#end for
for channel,vals in ion.iteritems():
ion[channel] = array(vals,dtype=float)
#end for
| |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# author: <NAME>
# contact: <EMAIL>
import torch
import SimpleITK as sitk
import numpy as np
import nibabel as nib
from torch.autograd import Variable
from skimage.transform import resize
from torchvision import transforms
from time import gmtime, strftime
from tqdm import tqdm
import pdb
import os
from ..helpers.helper import *
from os.path import expanduser
home = expanduser("~")
#========================================================================================
# prediction functions.....................
bin_path = os.path.join('/opt/ANTs/bin/')
class tumorSeg():
"""
class performs segmentation for a given sequence of patient data.
to main platform for segmentation mask estimation
one for the patient data in brats format
other with any random format
step followed for in estimation of segmentation mask
1. ABLnet for reducing false positives outside the brain
Air Brain Lesson model (2D model, 103 layered)
2. BNet3Dnet 3D network for inner class classification
Dual Path way network
3. MNet2D 57 layered convolutional network for inner class
classification
4. Tir3Dnet 57 layered 3D convolutional network for inner class
classification
more on training details and network information:
(https://link.springer.com/chapter/10.1007/978-3-030-11726-9_43<Paste>)
=========================
quick: True (just evaluates on Dual path network (BNet3D)
else copmutes an ensumble over all four networks
"""
def __init__(self,
quick = False,
ants_path = bin_path):
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# device = "cpu"
map_location = device
#========================================================================================
ckpt_tir2D = os.path.join(home, '.DeepBrainSeg/BestModels/Tramisu_2D_FC57_best_loss.pth.tar')
ckpt_tir3D = os.path.join(home, '.DeepBrainSeg/BestModels/Tramisu_3D_FC57_best_acc.pth.tar')
ckpt_BNET3D = os.path.join(home, '.DeepBrainSeg/BestModels/BrainNet_3D_best_acc.pth.tar')
ckpt_ABL = os.path.join(home, '.DeepBrainSeg/BestModels/ABL_CE_best_model_loss_based.pth.tar')
#========================================================================================
# air brain lesion segmentation..............
from .models.modelABL import FCDenseNet103
self.ABLnclasses = 3
self.ABLnet = FCDenseNet103(n_classes = self.ABLnclasses) ## intialize the graph
saved_parms=torch.load(ckpt_ABL, map_location=map_location)
self.ABLnet.load_state_dict(saved_parms['state_dict']) ## fill the model with trained params
print ("=================================== ABLNET2D Loaded =================================")
self.ABLnet.eval()
self.ABLnet = self.ABLnet.to(device)
#========================================================================================
# Tir2D net.......................
from .models.modelTir2D import FCDenseNet57
self.Mnclasses = 4
self.MNET2D = FCDenseNet57(self.Mnclasses)
ckpt = torch.load(ckpt_tir2D, map_location=map_location)
self.MNET2D.load_state_dict(ckpt['state_dict'])
print ("=================================== MNET2D Loaded ===================================")
self.MNET2D.eval()
self.MNET2D = self.MNET2D.to(device)
#========================================================================================
if not quick:
# BrainNet3D model......................
from .models.model3DBNET import BrainNet_3D_Inception
self.B3Dnclasses = 5
self.BNET3Dnet = BrainNet_3D_Inception()
ckpt = torch.load(ckpt_BNET3D, map_location=map_location)
self.BNET3Dnet.load_state_dict(ckpt['state_dict'])
print ("=================================== KAMNET3D Loaded =================================")
self.BNET3Dnet.eval()
self.BNET3Dnet = self.BNET3Dnet.to(device)
#========================================================================================
# Tir3D model...................
from .models.modelTir3D import FCDenseNet57
self.T3Dnclasses = 5
self.Tir3Dnet = FCDenseNet57(self.T3Dnclasses)
ckpt = torch.load(ckpt_tir3D, map_location=map_location)
self.Tir3Dnet.load_state_dict(ckpt['state_dict'])
print ("================================== TIRNET2D Loaded =================================")
self.Tir3Dnet.eval()
self.Tir3Dnet = self.Tir3Dnet.to(device)
#========================================================================================
self.device = device
self.quick = quick
self.ants_path = ants_path
def get_ants_mask(self, t1_path):
"""
We make use of ants framework for generalized skull stripping
t1_path: t1 volume path (str)
saves the mask in the same location as t1 data directory
returns: maskvolume (numpy uint8 type)
"""
mask_path = os.path.join(os.path.dirname(t1_path), 'mask.nii.gz')
os.system(self.ants_path +'ImageMath 3 '+ mask_path +' Normalize '+ t1_path)
os.system(self.ants_path +'ThresholdImage 3 '+ mask_path +' '+ mask_path +' 0.01 1')
os.system(self.ants_path +'ImageMath 3 '+ mask_path +' MD '+ mask_path +' 1')
os.system(self.ants_path +'ImageMath 3 '+ mask_path +' ME '+ mask_path +' 1')
os.system(self.ants_path +'CopyImageHeaderInformation '+ t1_path+' '+ mask_path +' '+ mask_path +' 1 1 1')
mask = np.uint8(nib.load(mask_path).get_data())
return mask
def get_localization(self, t1_v, t1c_v, t2_v, flair_v, brain_mask):
"""
ABLnetwork output, finds the brain, Whole tumor region
t1_v = t1 volume (numpy array)
t1c_v = t1c volume (numpy array)
t2_v = t2 volume (numpy array)
flair_v = flair volume (numpy array)
brain_mask = brain, whole tumor mask (numpy array, output of ANTs pieline)
"""
t1_v = normalize(t1_v, brain_mask)
t1c_v = normalize(t1c_v, brain_mask)
t2_v = normalize(t2_v, brain_mask)
flair_v = normalize(flair_v, brain_mask)
generated_output_logits = np.empty((self.ABLnclasses, flair_v.shape[0],flair_v.shape[1],flair_v.shape[2]))
for slices in tqdm(range(flair_v.shape[2])):
flair_slice = np.transpose(flair_v[:,:,slices])
t2_slice = np.transpose(t2_v[:,:,slices])
t1ce_slice = np.transpose(t1c_v[:,:,slices])
t1_slice = np.transpose(t1_v[:,:,slices])
array = np.zeros((flair_slice.shape[0],flair_slice.shape[1],4))
array[:,:,0] = flair_slice
array[:,:,1] = t2_slice
array[:,:,2] = t1ce_slice
array[:,:,3] = t1_slice
transformed_array = torch.from_numpy(convert_image(array)).float()
transformed_array = transformed_array.unsqueeze(0) ## neccessary if batch size == 1
transformed_array = transformed_array.to(self.device)
logits = self.ABLnet(transformed_array).detach().cpu().numpy()# 3 x 240 x 240
generated_output_logits[:,:,:, slices] = logits.transpose(0, 1, 3, 2)
final_pred = apply_argmax_to_logits(generated_output_logits)
final_pred = perform_postprocessing(final_pred)
final_pred = adjust_classes_air_brain_tumour(np.uint8(final_pred))
return np.uint8(final_pred)
def inner_class_classification_with_logits_NCube(self, t1,
t1ce, t2, flair,
brain_mask, mask, N = 64):
"""
output of 3D tiramisu model (tir3Dnet)
mask = numpy array output of ABLnet
N = patch size during inference
"""
t1 = normalize(t1, brain_mask)
t1ce = normalize(t1ce, brain_mask)
t2 = normalize(t2, brain_mask)
flair = normalize(flair, brain_mask)
shape = t1.shape # to exclude batch_size
final_prediction = np.zeros((self.T3Dnclasses, shape[0], shape[1], shape[2]))
x_min, x_max, y_min, y_max, z_min, z_max = bbox(mask, pad = N)
x_min, x_max, y_min, y_max, z_min, z_max = x_min, min(shape[0] - N, x_max), y_min, min(shape[1] - N, y_max), z_min, min(shape[2] - N, z_max)
with torch.no_grad():
for x in tqdm(range(x_min, x_max, N//2)):
for y in range(y_min, y_max, N//2):
for z in range(z_min, z_max, N//2):
high = np.zeros((1, 4, N, N, N))
high[0, 0, :, :, :] = flair[x:x+N, y:y+N, z:z+N]
high[0, 1, :, :, :] = t2[x:x+N, y:y+N, z:z+N]
high[0, 2, :, :, :] = t1[x:x+N, y:y+N, z:z+N]
high[0, 3, :, :, :] = t1ce[x:x+N, y:y+N, z:z+N]
high = Variable(torch.from_numpy(high)).to(self.device).float()
pred = torch.nn.functional.softmax(self.Tir3Dnet(high).detach().cpu())
pred = pred.data.numpy()
final_prediction[:, x:x+N, y:y+N, z:z+N] = pred[0]
final_prediction = convert5class_logitsto_4class(final_prediction)
return final_prediction
def inner_class_classification_with_logits_DualPath(self, t1,
t1ce, t2, flair,
brain_mask, mask=None,
prediction_size = 9):
"""
output of BNet3D
prediction_size = mid inference patch size
"""
t1 = normalize(t1, brain_mask)
t1ce = normalize(t1ce, brain_mask)
t2 = normalize(t2, brain_mask)
flair = normalize(flair, brain_mask)
shape = t1.shape # to exclude batch_size
final_prediction = np.zeros((self.B3Dnclasses, shape[0], shape[1], shape[2]))
x_min, x_max, y_min, y_max, z_min, z_max = bbox(mask, pad = prediction_size)
# obtained by aspect ratio calculation
high_res_size = prediction_size + 16
resize_to = int(prediction_size ** 0.5) + 16
low_res_size = int(51*resize_to/19)
hl_pad = (high_res_size - prediction_size)//2
hr_pad = hl_pad + prediction_size
ll_pad = (low_res_size - prediction_size)//2
lr_pad = ll_pad + prediction_size
for x in tqdm(range(x_min, x_max - prediction_size, prediction_size)):
for y in (range(y_min, y_max - prediction_size, prediction_size)):
for z in (range(z_min, z_max - prediction_size, prediction_size)):
high = np.zeros((1, 4, high_res_size, high_res_size, high_res_size))
low = np.zeros((1, 4, low_res_size, low_res_size, low_res_size))
low1 = np.zeros((1, 4, resize_to, resize_to, resize_to))
high[0, 0], high[0, 1], high[0, 2], high[0, 3] = high[0, 0] + flair[0,0,0], high[0, 1] + t2[0,0,0], high[0, 2] + t1[0,0,0], high[0, 2] + t1ce[0,0,0]
low[0, 0], low[0, 1], low[0, 2], low[0, 3] = low[0, 0] + flair[0,0,0], low[0, 1] + t2[0,0,0], low[0, 2] + t1[0,0,0], low[0, 2] + t1ce[0,0,0]
low1[0, 0], low1[0, 1], low1[0, 2], low1[0, 3] = low1[0, 0] + flair[0,0,0], low1[0, 1] + t2[0,0,0], low1[0, 2] + t1[0,0,0], low1[0, 2] + t1ce[0,0,0]
# =========================================================================
vxf, vxt = max(0, x-hl_pad), min(shape[0], x+hr_pad)
vyf, vyt = max(0, y-hl_pad), min(shape[1], y+hr_pad)
vzf, vzt = max(0, z-hl_pad), min(shape[2], z+hr_pad)
txf, txt = max(0, hl_pad-x), max(0, hl_pad-x) + vxt - vxf
tyf, tyt = max(0, hl_pad-y), max(0, hl_pad-y) + vyt - vyf
tzf, tzt = max(0, hl_pad-z), max(0, hl_pad-z) + vzt - vzf
high[0, 0, txf:txt, tyf:tyt, tzf:tzt] = flair[vxf:vxt, vyf:vyt, vzf:vzt]
high[0, 1, txf:txt, tyf:tyt, tzf:tzt] = t2[vxf:vxt, vyf:vyt, vzf:vzt]
high[0, 2, txf:txt, tyf:tyt, tzf:tzt] = t1[vxf:vxt, vyf:vyt, vzf:vzt]
high[0, 3, txf:txt, tyf:tyt, tzf:tzt] = t1ce[vxf:vxt, vyf:vyt, vzf:vzt]
# =========================================================================
vxf, vxt = max(0, x-ll_pad), min(shape[0], x+lr_pad)
vyf, vyt = max(0, y-ll_pad), min(shape[1], y+lr_pad)
vzf, vzt = max(0, z-ll_pad), min(shape[2], z+lr_pad)
txf, txt = max(0, ll_pad-x), max(0, ll_pad-x) + vxt - vxf
tyf, tyt = max(0, ll_pad-y), max(0, ll_pad-y) + vyt - vyf
tzf, tzt = max(0, ll_pad-z), max(0, ll_pad-z) + vzt - vzf
low[0, 0, txf:txt, tyf:tyt, tzf:tzt] = flair[vxf:vxt, vyf:vyt, vzf:vzt]
low[0, 1, txf:txt, tyf:tyt, tzf:tzt] = t2[vxf:vxt, vyf:vyt, vzf:vzt]
low[0, 2, txf:txt, tyf:tyt, tzf:tzt] = t1[vxf:vxt, vyf:vyt, vzf:vzt]
low[0, 3, txf:txt, tyf:tyt, tzf:tzt] = t1ce[vxf:vxt, vyf:vyt, vzf:vzt]
# =========================================================================
low1[0] = [resize(low[0, i, :, :, :], (resize_to, resize_to, resize_to)) for i in range(4)]
high = Variable(torch.from_numpy(high)).to(self.device).float()
low1 = Variable(torch.from_numpy(low1)).to(self.device).float()
pred = torch.nn.functional.softmax(self.BNET3Dnet(high, low1, pred_size=prediction_size).detach().cpu())
pred = pred.numpy()
final_prediction[:, x:x+prediction_size, y:y+prediction_size, z:z+prediction_size] = pred[0]
final_prediction = convert5class_logitsto_4class(final_prediction)
return final_prediction
def inner_class_classification_with_logits_2D(self,
t1ce_volume,
t2_volume,
flair_volume):
"""
output of 2D tiramisu model (MNet)
"""
normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
transformList = []
transformList.append(transforms.ToTensor())
transformList.append(normalize)
transformSequence=transforms.Compose(transformList)
generated_output = np.empty((self.Mnclasses,flair_volume.shape[0],flair_volume.shape[1],flair_volume.shape[2]))
for slices in tqdm(range(flair_volume.shape[2])):
flair_slice = scale_every_slice_between_0_to_255(np.transpose(flair_volume[:,:,slices]))
t2_slice = scale_every_slice_between_0_to_255(np.transpose(t2_volume[:,:,slices]))
t1ce_slice = scale_every_slice_between_0_to_255(np.transpose(t1ce_volume[:,:,slices]))
array = np.zeros((flair_slice.shape[0],flair_slice.shape[1],3))
array[:,:,0] = flair_slice
array[:,:,1] = t2_slice
array[:,:,2] = t1ce_slice
array = np.uint8(array)
transformed_array | |
<gh_stars>0
# coding: utf-8
"""
Metal API
This is the API for Equinix Metal. The API allows you to programmatically interact with all of your Equinix Metal resources, including devices, networks, addresses, organizations, projects, and your user account. The official API docs are hosted at <https://metal.equinix.com/developers/api>. # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from metal.api_client import ApiClient
from metal.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class BGPApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_bgp_session(self, id, bgp_session, **kwargs): # noqa: E501
"""Create a BGP session # noqa: E501
Creates a BGP session. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_bgp_session(id, bgp_session, async_req=True)
>>> result = thread.get()
:param id: Device UUID (required)
:type id: str
:param bgp_session: BGP session to create (required)
:type bgp_session: BGPSessionInput
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: BgpSession
"""
kwargs['_return_http_data_only'] = True
return self.create_bgp_session_with_http_info(id, bgp_session, **kwargs) # noqa: E501
def create_bgp_session_with_http_info(self, id, bgp_session, **kwargs): # noqa: E501
"""Create a BGP session # noqa: E501
Creates a BGP session. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_bgp_session_with_http_info(id, bgp_session, async_req=True)
>>> result = thread.get()
:param id: Device UUID (required)
:type id: str
:param bgp_session: BGP session to create (required)
:type bgp_session: BGPSessionInput
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(BgpSession, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'id',
'bgp_session'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_bgp_session" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `create_bgp_session`") # noqa: E501
# verify the required parameter 'bgp_session' is set
if self.api_client.client_side_validation and ('bgp_session' not in local_var_params or # noqa: E501
local_var_params['bgp_session'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `bgp_session` when calling `create_bgp_session`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'bgp_session' in local_var_params:
body_params = local_var_params['bgp_session']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['x_auth_token'] # noqa: E501
response_types_map = {
201: "BgpSession",
401: "Error",
403: "Error",
422: "Error",
}
return self.api_client.call_api(
'/devices/{id}/bgp/sessions', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def delete_bgp_session(self, id, **kwargs): # noqa: E501
"""Delete the BGP session # noqa: E501
Deletes the BGP session. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_bgp_session(id, async_req=True)
>>> result = thread.get()
:param id: BGP session UUID (required)
:type id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
kwargs['_return_http_data_only'] = True
return self.delete_bgp_session_with_http_info(id, **kwargs) # noqa: E501
def delete_bgp_session_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete the BGP session # noqa: E501
Deletes the BGP session. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_bgp_session_with_http_info(id, async_req=True)
>>> result = thread.get()
:param id: BGP session UUID (required)
:type id: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: None
"""
local_var_params = locals()
all_params = [
'id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_bgp_session" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `delete_bgp_session`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['x_auth_token'] # noqa: E501
response_types_map = {}
return self.api_client.call_api(
'/bgp/sessions/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def find_bgp_config_by_project(self, id, **kwargs): # noqa: E501
"""Retrieve a bgp config # noqa: E501
Returns a bgp config # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.find_bgp_config_by_project(id, async_req=True)
>>> result = thread.get()
:param id: Project UUID (required)
:type id: str
:param include: Nested attributes to include. Included objects will return their full attributes. Attribute names can be dotted (up to 3 levels) to included deeply nested objects.
:type include: list[str]
:param exclude: Nested attributes to exclude. Excluded objects will return only the href attribute. Attribute names can be | |
we'll create a leave event locally, but that's only really
# for the benefit of the invited user - we don't have enough
# information to send it out over federation).
#
# (2a) rescinded knocks. These are identical to rejected invites.
#
# (3) knock events which we have sent over federation. As with
# invite rejections, the remote server should send them out to
# the federation.
#
# So, in all the above cases, we want to ignore such events.
#
# OOB memberships are always(?) outliers anyway, so if we *don't*
# ignore them, we'll get an exception further down when we try to
# fetch the membership list for the room.
#
# Arguably, we could equivalently ignore all outliers here, since
# in theory the only way for an outlier with a local `sender` to
# exist is by being an OOB membership (via one of (2), (2a) or (3)
# above).
#
if event.internal_metadata.is_out_of_band_membership():
logger.debug("Not sending OOB membership event %s", event)
return
# Finally, there are some other events that we should not send out
# until someone asks for them. They are explicitly flagged as such
# with `proactively_send: False`.
if not event.internal_metadata.should_proactively_send():
logger.debug(
"Not sending event with proactively_send=false: %s", event
)
return
destinations: Optional[Collection[str]] = None
if not event.prev_event_ids():
# If there are no prev event IDs then the state is empty
# and so no remote servers in the room
destinations = set()
else:
# We check the external cache for the destinations, which is
# stored per state group.
sg = await self._external_cache.get(
"event_to_prev_state_group", event.event_id
)
if sg:
destinations = await self._external_cache.get(
"get_joined_hosts", str(sg)
)
if destinations is None:
try:
# Get the state from before the event.
# We need to make sure that this is the state from before
# the event and not from after it.
# Otherwise if the last member on a server in a room is
# banned then it won't receive the event because it won't
# be in the room after the ban.
destinations = await self.state.get_hosts_in_room_at_events(
event.room_id, event_ids=event.prev_event_ids()
)
except Exception:
logger.exception(
"Failed to calculate hosts in room for event: %s",
event.event_id,
)
return
sharded_destinations = {
d
for d in destinations
if self._federation_shard_config.should_handle(
self._instance_name, d
)
}
if send_on_behalf_of is not None:
# If we are sending the event on behalf of another server
# then it already has the event and there is no reason to
# send the event to it.
sharded_destinations.discard(send_on_behalf_of)
logger.debug("Sending %s to %r", event, sharded_destinations)
if sharded_destinations:
await self._send_pdu(event, sharded_destinations)
now = self.clock.time_msec()
ts = await self.store.get_received_ts(event.event_id)
assert ts is not None
synapse.metrics.event_processing_lag_by_event.labels(
"federation_sender"
).observe((now - ts) / 1000)
async def handle_room_events(events: List[EventBase]) -> None:
logger.debug(
"Handling %i events in room %s", len(events), events[0].room_id
)
with Measure(self.clock, "handle_room_events"):
for event in events:
await handle_event(event)
events_by_room: Dict[str, List[EventBase]] = {}
for event in events:
events_by_room.setdefault(event.room_id, []).append(event)
await make_deferred_yieldable(
defer.gatherResults(
[
run_in_background(handle_room_events, evs)
for evs in events_by_room.values()
],
consumeErrors=True,
)
)
logger.debug("Successfully handled up to %i", next_token)
await self.store.update_federation_out_pos("events", next_token)
if events:
now = self.clock.time_msec()
ts = await self.store.get_received_ts(events[-1].event_id)
assert ts is not None
synapse.metrics.event_processing_lag.labels(
"federation_sender"
).set(now - ts)
synapse.metrics.event_processing_last_ts.labels(
"federation_sender"
).set(ts)
events_processed_counter.inc(len(events))
event_processing_loop_room_count.labels("federation_sender").inc(
len(events_by_room)
)
event_processing_loop_counter.labels("federation_sender").inc()
synapse.metrics.event_processing_positions.labels(
"federation_sender"
).set(next_token)
finally:
self._is_processing = False
async def _send_pdu(self, pdu: EventBase, destinations: Iterable[str]) -> None:
# We loop through all destinations to see whether we already have
# a transaction in progress. If we do, stick it in the pending_pdus
# table and we'll get back to it later.
destinations = set(destinations)
destinations.discard(self.server_name)
logger.debug("Sending to: %s", str(destinations))
if not destinations:
return
sent_pdus_destination_dist_total.inc(len(destinations))
sent_pdus_destination_dist_count.inc()
assert pdu.internal_metadata.stream_ordering
# track the fact that we have a PDU for these destinations,
# to allow us to perform catch-up later on if the remote is unreachable
# for a while.
await self.store.store_destination_rooms_entries(
destinations,
pdu.room_id,
pdu.internal_metadata.stream_ordering,
)
for destination in destinations:
self._get_per_destination_queue(destination).send_pdu(pdu)
async def send_read_receipt(self, receipt: ReadReceipt) -> None:
"""Send a RR to any other servers in the room
Args:
receipt: receipt to be sent
"""
# Some background on the rate-limiting going on here.
#
# It turns out that if we attempt to send out RRs as soon as we get them from
# a client, then we end up trying to do several hundred Hz of federation
# transactions. (The number of transactions scales as O(N^2) on the size of a
# room, since in a large room we have both more RRs coming in, and more servers
# to send them to.)
#
# This leads to a lot of CPU load, and we end up getting behind. The solution
# currently adopted is as follows:
#
# The first receipt in a given room is sent out immediately, at time T0. Any
# further receipts are, in theory, batched up for N seconds, where N is calculated
# based on the number of servers in the room to achieve a transaction frequency
# of around 50Hz. So, for example, if there were 100 servers in the room, then
# N would be 100 / 50Hz = 2 seconds.
#
# Then, after T+N, we flush out any receipts that have accumulated, and restart
# the timer to flush out more receipts at T+2N, etc. If no receipts accumulate,
# we stop the cycle and go back to the start.
#
# However, in practice, it is often possible to flush out receipts earlier: in
# particular, if we are sending a transaction to a given server anyway (for
# example, because we have a PDU or a RR in another room to send), then we may
# as well send out all of the pending RRs for that server. So it may be that
# by the time we get to T+N, we don't actually have any RRs left to send out.
# Nevertheless we continue to buffer up RRs for the room in question until we
# reach the point that no RRs arrive between timer ticks.
#
# For even more background, see https://github.com/matrix-org/synapse/issues/4730.
room_id = receipt.room_id
# Work out which remote servers should be poked and poke them.
domains_set = await self.state.get_current_hosts_in_room(room_id)
domains = [
d
for d in domains_set
if d != self.server_name
and self._federation_shard_config.should_handle(self._instance_name, d)
]
if not domains:
return
queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(room_id)
# if there is no flush yet scheduled, we will send out these receipts with
# immediate flushes, and schedule the next flush for this room.
if queues_pending_flush is not None:
logger.debug("Queuing receipt for: %r", domains)
else:
logger.debug("Sending receipt to: %r", domains)
self._schedule_rr_flush_for_room(room_id, len(domains))
for domain in domains:
queue = self._get_per_destination_queue(domain)
queue.queue_read_receipt(receipt)
# if there is already a RR flush pending for this room, then make sure this
# destination is registered for the flush
if queues_pending_flush is not None:
queues_pending_flush.add(queue)
else:
queue.flush_read_receipts_for_room(room_id)
def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None:
# that is going to cause approximately len(domains) transactions, so now back
# off for that multiplied by RR_TXN_INTERVAL_PER_ROOM
backoff_ms = self._rr_txn_interval_per_room_ms * n_domains
logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms)
self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id)
self._queues_awaiting_rr_flush_by_room[room_id] = set()
def _flush_rrs_for_room(self, room_id: str) -> None:
queues = self._queues_awaiting_rr_flush_by_room.pop(room_id)
logger.debug("Flushing RRs in %s to %s", room_id, queues)
if not queues:
# no more RRs arrived for this room; we are done.
return
# schedule the next flush
self._schedule_rr_flush_for_room(room_id, len(queues))
for queue in queues:
queue.flush_read_receipts_for_room(room_id)
def send_presence_to_destinations(
self, states: Iterable[UserPresenceState], destinations: Iterable[str]
) -> None:
"""Send the given presence states to the given destinations.
destinations (list[str])
"""
if not states or not self.hs.config.server.use_presence:
# No-op if presence is disabled.
return
# Ensure we only send out presence states for local users.
for state in states:
assert self.is_mine_id(state.user_id)
for destination in destinations:
if destination == self.server_name:
continue
if not self._federation_shard_config.should_handle(
self._instance_name, destination
| |
#!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, <NAME> <<EMAIL>>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import multiprocessing
import unittest
import subprocess
import time
import signal
import sys
import os
import pexpect
from . import PexpectTestCase
from .utils import no_coverage_env
# Many of these test cases blindly assume that sequential directory
# listings of the /bin directory will yield the same results.
# This may not be true, but seems adequate for testing now.
# I should fix this at some point.
FILTER=''.join([(len(repr(chr(x)))==3) and chr(x) or '.' for x in range(256)])
def hex_dump(src, length=16):
result=[]
for i in range(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(FILTER)
result.append("%04X %-*s %s\n" % (i, length*3, hexa, printable))
return ''.join(result)
def hex_diff(left, right):
diff = ['< %s\n> %s' % (_left, _right,) for _left, _right in zip(
hex_dump(left).splitlines(), hex_dump(right).splitlines())
if _left != _right]
return '\n' + '\n'.join(diff,)
class ExpectTestCase (PexpectTestCase.PexpectTestCase):
def test_expect_basic (self):
p = pexpect.spawn('cat', echo=False, timeout=5)
p.sendline (b'Hello')
p.sendline (b'there')
p.sendline (b'Mr. Python')
p.expect (b'Hello')
p.expect (b'there')
p.expect (b'Mr. Python')
p.sendeof ()
p.expect (pexpect.EOF)
def test_expect_exact_basic (self):
p = pexpect.spawn('cat', echo=False, timeout=5)
p.sendline (b'Hello')
p.sendline (b'there')
p.sendline (b'Mr. Python')
p.expect_exact (b'Hello')
p.expect_exact (b'there')
p.expect_exact (b'Mr. Python')
p.sendeof ()
p.expect_exact (pexpect.EOF)
def test_expect_ignore_case(self):
'''This test that the ignorecase flag will match patterns
even if case is different using the regex (?i) directive.
'''
p = pexpect.spawn('cat', echo=False, timeout=5)
p.sendline (b'HELLO')
p.sendline (b'there')
p.expect (b'(?i)hello')
p.expect (b'(?i)THERE')
p.sendeof ()
p.expect (pexpect.EOF)
def test_expect_ignore_case_flag(self):
'''This test that the ignorecase flag will match patterns
even if case is different using the ignorecase flag.
'''
p = pexpect.spawn('cat', echo=False, timeout=5)
p.ignorecase = True
p.sendline (b'HELLO')
p.sendline (b'there')
p.expect (b'hello')
p.expect (b'THERE')
p.sendeof ()
p.expect (pexpect.EOF)
def test_expect_order (self):
'''This tests that patterns are matched in the same order as given in the pattern_list.
(Or does it? Doesn't it also pass if expect() always chooses
(one of the) the leftmost matches in the input? -- grahn)
... agreed! -jquast, the buffer ptr isn't forwarded on match, see first two test cases
'''
p = pexpect.spawn('cat', echo=False, timeout=5)
self._expect_order(p)
def test_expect_order_exact (self):
'''Like test_expect_order(), but using expect_exact().
'''
p = pexpect.spawn('cat', echo=False, timeout=5)
p.expect = p.expect_exact
self._expect_order(p)
def _expect_order (self, p):
p.sendline (b'1234')
p.sendline (b'abcd')
p.sendline (b'wxyz')
p.sendline (b'7890')
p.sendeof ()
index = p.expect ([
b'1234',
b'abcd',
b'wxyz',
pexpect.EOF,
b'7890' ])
assert index == 0, (index, p.before, p.after)
index = p.expect ([
b'54321',
pexpect.TIMEOUT,
b'1234',
b'abcd',
b'wxyz',
pexpect.EOF], timeout=5)
assert index == 3, (index, p.before, p.after)
index = p.expect ([
b'54321',
pexpect.TIMEOUT,
b'1234',
b'abcd',
b'wxyz',
pexpect.EOF], timeout=5)
assert index == 4, (index, p.before, p.after)
index = p.expect ([
pexpect.EOF,
b'abcd',
b'wxyz',
b'7890' ])
assert index == 3, (index, p.before, p.after)
index = p.expect ([
b'abcd',
b'wxyz',
b'7890',
pexpect.EOF])
assert index == 3, (index, p.before, p.after)
def test_expect_setecho_off(self):
'''This tests that echo may be toggled off.
'''
p = pexpect.spawn('cat', echo=True, timeout=5)
try:
self._expect_echo_toggle(p)
except IOError:
if sys.platform.lower().startswith('sunos'):
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest("Not supported on this platform.")
return 'skip'
raise
def test_expect_setecho_off_exact(self):
p = pexpect.spawn('cat', echo=True, timeout=5)
p.expect = p.expect_exact
try:
self._expect_echo_toggle(p)
except IOError:
if sys.platform.lower().startswith('sunos'):
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest("Not supported on this platform.")
return 'skip'
raise
def test_waitnoecho(self):
" Tests setecho(False) followed by waitnoecho() "
p = pexpect.spawn('cat', echo=False, timeout=5)
try:
p.setecho(False)
p.waitnoecho()
except IOError:
if sys.platform.lower().startswith('sunos'):
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest("Not supported on this platform.")
return 'skip'
raise
def test_waitnoecho_order(self):
''' This tests that we can wait on a child process to set echo mode.
For example, this tests that we could wait for SSH to set ECHO False
when asking of a password. This makes use of an external script
echo_wait.py. '''
p1 = pexpect.spawn('%s echo_wait.py' % self.PYTHONBIN)
start = time.time()
try:
p1.waitnoecho(timeout=10)
except IOError:
if sys.platform.lower().startswith('sunos'):
if hasattr(unittest, 'SkipTest'):
raise unittest.SkipTest("Not supported on this platform.")
return 'skip'
raise
end_time = time.time() - start
assert end_time < 10 and end_time > 2, "waitnoecho did not set ECHO off in the expected window of time."
# test that we actually timeout and return False if ECHO is never set off.
p1 = pexpect.spawn('cat')
start = time.time()
retval = p1.waitnoecho(timeout=4)
end_time = time.time() - start
assert end_time > 3, "waitnoecho should have waited longer than 2 seconds. retval should be False, retval=%d"%retval
assert retval==False, "retval should be False, retval=%d"%retval
# This one is mainly here to test default timeout for code coverage.
p1 = pexpect.spawn('%s echo_wait.py' % self.PYTHONBIN)
start = time.time()
p1.waitnoecho()
end_time = time.time() - start
assert end_time < 10, "waitnoecho did not set ECHO off in the expected window of time."
def test_expect_echo (self):
'''This tests that echo is on by default.
'''
p = pexpect.spawn('cat', echo=True, timeout=5)
self._expect_echo(p)
def test_expect_echo_exact (self):
'''Like test_expect_echo(), but using expect_exact().
'''
p = pexpect.spawn('cat', echo=True, timeout=5)
p.expect = p.expect_exact
self._expect_echo(p)
def _expect_echo (self, p):
p.sendline (b'1234') # Should see this twice (once from tty echo and again from cat).
index = p.expect ([
b'1234',
b'abcd',
b'wxyz',
pexpect.EOF,
pexpect.TIMEOUT])
assert index == 0, "index="+str(index)+"\n"+p.before
index = p.expect ([
b'1234',
b'abcd',
b'wxyz',
pexpect.EOF])
assert index == 0, "index="+str(index)
def _expect_echo_toggle(self, p):
p.sendline (b'1234') # Should see this twice (once from tty echo and again from cat).
index = p.expect ([
b'1234',
b'abcd',
b'wxyz',
pexpect.EOF,
pexpect.TIMEOUT])
assert index == 0, "index="+str(index)+"\n"+p.before
index = p.expect ([
b'1234',
b'abcd',
b'wxyz',
pexpect.EOF])
assert index == 0, "index="+str(index)
p.setecho(0) # Turn off tty echo
p.waitnoecho()
p.sendline (b'abcd') # Now, should only see this once.
p.sendline (b'wxyz') # Should also be only once.
index = p.expect ([
pexpect.EOF,
pexpect.TIMEOUT,
b'abcd',
b'wxyz',
b'1234'])
assert index == 2, "index="+str(index)
index = p.expect ([
pexpect.EOF,
b'abcd',
b'wxyz',
b'7890'])
assert index == 2, "index="+str(index)
p.setecho(1) # Turn on tty echo
p.sendline (b'7890') # Should see this twice.
index = p.expect ([pexpect.EOF,b'abcd',b'wxyz',b'7890'])
assert index == 3, "index="+str(index)
index = p.expect ([pexpect.EOF,b'abcd',b'wxyz',b'7890'])
assert index == 3, "index="+str(index)
p.sendeof()
def test_expect_index (self):
'''This tests that mixed list of regex strings, TIMEOUT, and EOF all
return the correct index when matched.
'''
p = pexpect.spawn('cat', echo=False, timeout=5)
self._expect_index(p)
def test_expect_index_exact (self):
'''Like test_expect_index(), but using expect_exact().
'''
p = pexpect.spawn('cat', echo=False, timeout=5)
p.expect = p.expect_exact
self._expect_index(p)
def _expect_index (self, p):
p.sendline (b'1234')
index = p.expect ([b'abcd',b'wxyz',b'1234',pexpect.EOF])
assert index == 2, "index="+str(index)
p.sendline (b'abcd')
index = p.expect ([pexpect.TIMEOUT,b'abcd',b'wxyz',b'1234',pexpect.EOF])
assert index == 1, "index="+str(index)+str(p)
p.sendline (b'wxyz')
index = p.expect ([b'54321',pexpect.TIMEOUT,b'abcd',b'wxyz',b'1234',pexpect.EOF])
assert index == 3, "index="+str(index) # Expect 'wxyz'
p.sendline (b'$*!@?')
index = p.expect ([b'54321',pexpect.TIMEOUT,b'abcd',b'wxyz',b'1234',pexpect.EOF],
timeout=1)
assert index == 1, "index="+str(index) # Expect TIMEOUT
p.sendeof ()
index = p.expect ([b'54321',pexpect.TIMEOUT,b'abcd',b'wxyz',b'1234',pexpect.EOF])
assert index == 5, "index="+str(index) # Expect EOF
def test_expect (self):
the_old_way = subprocess.Popen(args=['ls', '-l', '/bin'],
stdout=subprocess.PIPE).communicate()[0].rstrip()
p = pexpect.spawn('ls -l /bin')
the_new_way = b''
while 1:
i = p.expect ([b'\n', pexpect.EOF])
the_new_way = the_new_way + p.before
if i == 1:
break
the_new_way = the_new_way.rstrip()
the_new_way = the_new_way.replace(b'\r\n', b'\n'
).replace(b'\r', b'\n').replace(b'\n\n', b'\n').rstrip()
the_old_way = the_old_way.replace(b'\r\n', b'\n'
).replace(b'\r', b'\n').replace(b'\n\n', b'\n').rstrip()
assert the_old_way == the_new_way, hex_diff(the_old_way, the_new_way)
def test_expect_exact (self):
the_old_way = subprocess.Popen(args=['ls', '-l', '/bin'],
stdout=subprocess.PIPE).communicate()[0].rstrip()
p = pexpect.spawn('ls -l /bin')
the_new_way = b''
while 1:
i = p.expect_exact ([b'\n', pexpect.EOF])
the_new_way = the_new_way + p.before
if i == 1:
break
the_new_way = the_new_way.replace(b'\r\n', b'\n'
).replace(b'\r', b'\n').replace(b'\n\n', b'\n').rstrip()
the_old_way = the_old_way.replace(b'\r\n', b'\n'
).replace(b'\r', b'\n').replace(b'\n\n', b'\n').rstrip()
| |
<gh_stars>1-10
# coding: utf-8
# In[2]:
# errorCorrection_PatchDS_3D(Sim_posmod, Ti_primod, wel_obs_data, i_dim, j_dim, k_dim, run_smooth)
# Authors: <NAME>, <NAME>
# Contact: <EMAIL>
# Date: Oct 22, 2018
# This is the function to correct the posterior residual errors at mismatched wells using "MPS Direct Sampling" for 3D models of posterior.
# This function returns the DS corrected 3D models posterior.
# e.g. corrected_posterior = errorCorrection_PatchDS_3D(Sim_posmod, Ti_primod, wel_obs_data, i_dim, j_dim, k_dim, True)
# Sim_posmod: the 2D array matrix that constains all the posterior realizations(each realization is vectorized) that need to correct,
# Sim_posmod = realization_number x total_grid_number_per_model(i_dim x j_dim x k_dim)
# Ti_primod: the 2D array matrix that contains all the prior realizations(each realization is vectorized), used as trainning image, same dimension as the "Sim_posmod"
# wel_obs_data: the 3D arrays that contains the well observation data,
# wel_obs_data = well_number x well_sample_points x 4 (i_location, j_location, k_location, and observation value)
# i_dim, j_dim, k_dim: the i, j, k dimension of the model
# run_smooth: False or True.
# If "True", the input "Sim_posmod" will be smoothed to remove the noise, before running the DS error correction.
# If "False", the “Sim_posmod” will be directly used for DS error correction.
import numpy as np
import random
from tqdm import tqdm
def errorCorrection_PatchDS_3D(Sim_posmod, Ti_primod, wel_obs_data, i_dim, j_dim, k_dim, run_smooth):
###################################
####set up some constant variables
##################################
realnums = len(Sim_posmod)
d_obs_loc_val = wel_obs_data
Well_Amount = len(wel_obs_data[:,0,0])
Well_Depth = len(wel_obs_data[0,:,0])
# Realization_Index = layer_num
Realization_Layer = k_dim
Realization_Height = j_dim
Realization_Width = i_dim
# TI_Index = layer_num
TI_Layer = Realization_Layer
TI_Height = Realization_Height
TI_Width = Realization_Width
corrected_posterior = []
for real_num in tqdm(range(realnums)):
### Posterior to re-construct
if run_smooth == True:
# use the statistical filter to smooth out noise (isolated points)
realization = Sim_posmod[real_num].reshape(Realization_Layer,Realization_Height,Realization_Width)
old_realization = np.zeros((Realization_Layer,Realization_Height,Realization_Width))
smooth_x = 1
smooth_y = 1
noisedPattern = []
for realization_z in range(Realization_Layer):
for realization_y in range(Realization_Height):
for realization_x in range(Realization_Width):
bottom_y = max(realization_y-smooth_y,0)
up_y = min(realization_y+smooth_y+1,Realization_Height)
bottom_x = max(realization_x-smooth_x,0)
up_x = min(realization_x+smooth_x+1,Realization_Width)
noisedPattern = []
for sample_y in range(bottom_y,up_y):
for sample_x in range(bottom_x,up_x):
noisedPattern.append(realization[realization_z][sample_y][sample_x])
noisedPattern.sort();
sample_value = noisedPattern[int(len(noisedPattern)/2)];
old_realization[realization_z][realization_y][realization_x] = sample_value
else:
old_realization = Sim_posmod[real_num].reshape(Realization_Layer,Realization_Height,Realization_Width)
### Prior as training image
TI = Ti_primod[real_num].reshape(TI_Layer,TI_Height,TI_Width)
#############################################
##### set up some temporary variables #####
#############################################
well_index = 0
well_depth_index = 0
well_value = 0
well_x = 0
well_y = 0
well_z = 0
realization_value = 0
realization_z = 0
realization_y = 0
realization_x = 0
point_x = 0
point_y = 0
point_z = 0
# this is the new realization
new_realization = np.empty((Realization_Layer,Realization_Height,Realization_Width))
new_realization_operation = np.empty((Realization_Layer,Realization_Height,Realization_Width)) # 0 no operation 1 mismatch 2 match
# initial the new realization
for realization_z in range(Realization_Layer):
for realization_y in range(Realization_Height):
for realization_x in range(Realization_Width):
new_realization[realization_z][realization_y][realization_x] = -1.0;
#############################################
##### find the mismatch location #####
#############################################
for well_index in range(Well_Amount):
# attention: the well data in d_obs_loc_val is reservely ranked
well_x = int(d_obs_loc_val[well_index][well_depth_index][0]) # this is the x corridinate of well data
well_y = int(d_obs_loc_val[well_index][well_depth_index][1]) # this is the y corridinate of well data
#print("the " + str(well_index)+"-th well x: "+ str(well_x)+" y: "+str(well_y))
for well_depth_index in range(Well_Depth):
well_z = int(d_obs_loc_val[well_index][well_depth_index][2]) # this is the z corridinate of well data
well_value = d_obs_loc_val[well_index][well_depth_index][3] # this is the value of well data
new_realization[well_z][well_y][well_x] = well_value;
new_realization_operation[well_z][well_y][well_x] = 2;
realization_value = old_realization[well_z][well_y][well_x] # this is the value of old realization
# key 1: determine the points that need to be resimulated
if well_value != realization_value:
bottom_z = max(well_z-1,0)
up_z = min(well_z+2,Realization_Layer)
#bottom_z = max(well_z,0)
#up_z = min(well_z+1,Realization_Layer)
bottom_y = max(well_y-1,0)
up_y = min(well_y+2,Realization_Height)
bottom_x = max(well_x-1,0)
up_x = min(well_x+2,Realization_Width)
for point_z in range(bottom_z,up_z):
for point_y in range (bottom_y,up_y):
for point_x in range (bottom_x,up_x):
if(new_realization_operation[point_z][point_y][point_x]==0):
new_realization_operation[point_z][point_y][point_x] = 1;
#############################################
###### set up the parameter of DS ##########
#############################################
Neighborhood = 30
Threshold = 0.01
Fraction = Realization_Layer * Realization_Height * Realization_Width * 0.6
weight_new = 0.6
weight_old = 0.4
#############################################
####### perform DS to correct mismatch ######
#############################################
# this cell is used for defining variables
probability = 20
probability_step = 3
pattern_new_value = []
pattern_new_x = []
pattern_new_y = []
pattern_new_z = []
pattern_old_value = []
pattern_old_x = []
pattern_old_y = []
pattern_old_z = []
circle = 1
continueGather = True
distance = 10.0
distance_new = 10.0
distance_old = 10.0
distance_min = 10.0
sample_x = 0
sample_y = 0
sample_z = 0
sample_value = 0
sample_Amount = 0
conditioning_pattern_value = 0
training_pattern_value = 0
pattern_index = 0
bottom_z = 0
up_z = 0
bottom_y = 0
up_y = 0
bottom_x = 0
up_x = 0
loop_status = True
########################################################
######### MPS - DIRECT SAMPLING MAIN FUNCTION #########
########################################################
# this is the solution and patch direct sampling: (1) a limited searching area (2) paste a patch at a time
# core idea: (1) the solution of the closest simulated point privides the guidance (2) simulate a patch at a time
# control parameter
loop_CloseSolution_Patch_count = 0
loop_CloseSolution_Patch_MaxCount = 10
# parameter: the template stripe to collect the informed points
stride_x = 1
stride_y = 1
stride_z = 4
circle_x = 0
circle_y = 0
circle_z = 0
# parameter: the size of simulated patch
radius_simulationPatch_x = 4 # the size of simulation patch is 2*radius+1
radius_simulationPatch_y = 4
radius_simulationPatch_z = 1
radius_simulationPatch_min_x = 1
radius_simulationPatch_min_y = 1
radius_simulationPatch_min_z = 0
radius_simulationPatch_slope = 1
# parameter: the size of previous soluation search
radius_solution_x = radius_simulationPatch_x + 1
radius_solution_y = radius_simulationPatch_y + 1
radius_solution_z = 0
# parameter: the size of current searching area: closest solution method
radius_search_solution_x = 3
radius_search_solution_y = 3
radius_search_solution_z = 1
# parameter: the size of current searching area: exhaustive search method
radius_search_exhaustive_x = 10
radius_search_exhaustive_y = 10
radius_search_exhaustive_z = 3
new_realization_temporary = np.zeros((Realization_Layer,Realization_Height,Realization_Width))
new_realization_operation_temporary = np.zeros((Realization_Layer,Realization_Height,Realization_Width))
solution_array_x = np.zeros((Realization_Layer,Realization_Height,Realization_Width))
solution_array_y = np.zeros((Realization_Layer,Realization_Height,Realization_Width))
solution_array_z = np.zeros((Realization_Layer,Realization_Height,Realization_Width))
# initial the new realization
for realization_z in range(Realization_Layer):
for realization_y in range(Realization_Height):
for realization_x in range(Realization_Width):
solution_array_x[realization_z][realization_y][realization_x] = -1;
solution_array_y[realization_z][realization_y][realization_x] = -1;
solution_array_z[realization_z][realization_y][realization_x] = -1;
solution_x = 0
solution_y = 0
solution_z = 0
previous_x = 0
previous_y = 0
previous_z = 0
distance_min_x = 0
distance_min_y = 0
distance_min_z = 0
patch_realization_z = 0
patch_realization_y = 0
patch_realization_x = 0
patch_TI_z = 0
patch_TI_y = 0
patch_TI_x = 0
loop_status = True
while loop_CloseSolution_Patch_count < loop_CloseSolution_Patch_MaxCount and loop_status == True:
loop_CloseSolution_Patch_count += 1
loop_status = False
#print("start to correct the error: extend the circle")
# print(str(loop_CloseSolution_Patch_count)+"-th loop:")
radius_simulationPatch_x = max(radius_simulationPatch_min_x,radius_simulationPatch_x-radius_simulationPatch_slope)
radius_simulationPatch_y = max(radius_simulationPatch_min_y,radius_simulationPatch_y-radius_simulationPatch_slope)
radius_simulationPatch_z = max(radius_simulationPatch_min_z,radius_simulationPatch_z-radius_simulationPatch_slope)
probability += probability_step
for realization_z in range(Realization_Layer):
for realization_y in range(Realization_Height):
for realization_x in range(Realization_Width):
new_realization_temporary[realization_z][realization_y][realization_x] = -1
new_realization_operation_temporary[realization_z][realization_y][realization_x] = 0
# '''
# print("before processing:")
# for realization_z in range(Realization_Layer):
# print("slice_z: "+str(realization_z))
# plt.imshow(new_realization[realization_z],vmin=-1, vmax=3)
# plt.show()
# plt.imshow(new_realization_operation[realization_z],vmin=-1, vmax=3)
# plt.show()
# '''
for realization_z in range(Realization_Layer):
for realization_y in range(Realization_Height):
for realization_x in range(Realization_Width):
# if this point needs to be simulated
if(new_realization_operation[realization_z][realization_y][realization_x]==1 and new_realization_temporary[realization_z][realization_y][realization_x]==-1.0):
pattern_new_value.clear()
pattern_new_z.clear()
pattern_new_y.clear()
pattern_new_x.clear()
pattern_old_value.clear()
pattern_old_z.clear()
pattern_old_y.clear()
pattern_old_x.clear()
# gather the conditioning points
circle = 1
continueGather = True
while continueGather :
circle_x = int((circle-1)/stride_x);
circle_y = int((circle-1)/stride_y);
circle_z = int((circle-1)/stride_z);
#print("circle:"+str(circle))
#print("circle_z:"+str(circle_z))
circle += 1
bottom_z = max(realization_z-circle_z,0)
up_z = min(realization_z+circle_z+1,Realization_Layer)
bottom_y = max(realization_y-circle_y,0)
up_y = min(realization_y+circle_y+1,Realization_Height)
bottom_x = max(realization_x-circle_x,0)
up_x = min(realization_x+circle_x+1,Realization_Width)
for point_z in range(bottom_z,up_z):
for point_y in range(bottom_y,up_y):
for point_x in range(bottom_x,up_x):
if continueGather == False:
continue;
if(abs(point_z-realization_z)==circle_z or
abs(point_y-realization_y)==circle_y or
abs(point_x-realization_x)==circle_x):
if(new_realization[point_z][point_y][point_x]!=-1):
pattern_new_value.append(new_realization[point_z][point_y][point_x])
pattern_new_z.append(point_z-realization_z)
pattern_new_y.append(point_y-realization_y)
pattern_new_x.append(point_x-realization_x)
elif (old_realization[point_z][point_y][point_x]!=-1):
if random.randint(0,99) < probability :
pattern_old_value.append(old_realization[point_z][point_y][point_x])
pattern_old_z.append(point_z-realization_z)
pattern_old_y.append(point_y-realization_y)
pattern_old_x.append(point_x-realization_x)
if len(pattern_new_value)+len(pattern_old_value) > Neighborhood :
continueGather = False
#find the closest simulated point
distance = 10.0
distance_min = 10000.0
bottom_z = max(realization_z-radius_solution_z,0)
up_z = min(realization_z+radius_solution_z+1,Realization_Height)
bottom_y = max(realization_y-radius_solution_y,0)
up_y = min(realization_y+radius_solution_y+1,Realization_Height)
bottom_x = max(realization_x-radius_solution_x,0)
up_x = min(realization_x+radius_solution_x+1,Realization_Width)
for point_z in range(bottom_z,up_z):
for point_y in range(bottom_y,up_y):
for point_x in range(bottom_x,up_x):
if solution_array_z[point_z][point_y][point_x] != -1:
distance = (point_z-realization_z)*(point_z-realization_z)
+ (point_y-realization_y)*(point_y-realization_y)
+ (point_x-realization_x)*(point_x-realization_x)
if distance < distance_min:
solution_x = int(solution_array_x[point_z][point_y][point_x])
solution_y = int(solution_array_y[point_z][point_y][point_x])
solution_z = | |
X = np.stack(X_arrays, axis = 0)
Y = np.stack(Y_arrays, axis = 0)
# Generate data
return X, Y
class generator_df_LSTM(keras.utils.all_utils.Sequence):
""" Class that generates batches of data ready for training an LSTM model. The data is expected to
come from a dataframe and to follow event style (cue and ouctomes seperated by underscores)
Attributes
----------
data: dataframe
dataframe with the first column containing the cues and second colum containing the outcomes
batch_size: int
number of examples in each batch
num_cues: int
number of allowed cues
num_outcomes: int
number of allowed outcomes
cue_index: dict
mapping from cues to indices
outcome_index: dict
mapping from outcomes to indices
max_len: int
Consider only 'max_len' first tokens in a sequence
vector_encoding: str
Whether to use one-hot encoding (='onehot') or embedding (='embedding'). Default: 'onehot'
shuffle_epoch: Boolean
whether to shuffle the data after every epoch
Returns
-------
class object
generator for keras. It inherites from keras.utils.all_utils.Sequence
"""
def __init__(self, data, batch_size, num_cues, num_outcomes,
cue_index, outcome_index, max_len,
vector_encoding = 'onehot', shuffle_epoch = False):
'Initialization'
self.data = data
self.batch_size = batch_size
self.num_cues = num_cues
self.num_outcomes = num_outcomes
self.cue_index = cue_index
self.outcome_index = outcome_index
self.max_len = max_len
self.vector_encoding = vector_encoding
self.shuffle_epoch = shuffle_epoch
self.on_epoch_end()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.floor(len(self.data) / self.batch_size))
def __getitem__(self, index):
'Generate one batch of data'
# Generate indices of the batch
indexes_batch = self.indexes[index*self.batch_size:(index+1)*self.batch_size]
# Generate data
X, Y = self.__data_generation(indexes_batch)
return X, Y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.indexes = np.arange(len(self.data))
if self.shuffle_epoch == True:
np.random.shuffle(self.indexes)
def __data_generation(self, indexes_batch):
'Generates data containing batch_size samples' # X : (batch_size, *dim, n_channels)
if self.vector_encoding == 'onehot': # One-hot encoding
seq_to_vec = seq_to_onehot_2darray
else: # Embedding
seq_to_vec = seq_to_integers_1darray
X_arrays = [seq_to_vec(cue_seq, self.cue_index, self.num_cues, self.max_len) for cue_seq in self.data.loc[self.data.index[indexes_batch], 'cues']]
Y_arrays = [seq_to_onehot_1darray(outcome_seq, self.outcome_index, self.num_outcomes) for outcome_seq in self.data.loc[self.data.index[indexes_batch], 'outcomes']]
Y = np.stack(Y_arrays, axis=0)
X = np.stack(X_arrays, axis=0)
# Generate data
return X, Y
def train_LSTM(data_train, data_valid, cue_index, outcome_index,
shuffle_epoch = False, num_threads = 1, verbose = 1,
metrics = ['accuracy', 'precision', 'recall', 'f1score'],
params = {'max_len': 10,
'embedding_input': None,
'embedding_dim': None,
'epochs': 1, # number of iterations on the full set
'batch_size': 128,
'hidden_neuron':64, # number of neurons in the input layer
'lr': 0.0001, # learning rate
'dropout': 0,
'optimizer': optimizers.RMSprop,
'losses': losses.binary_crossentropy,
'last_activation': 'sigmoid'}):
""" Train an LSTM
Parameters
----------
data_train: dataframe or class
dataframe, path to a '.gz' event file or indexed text file containing training data
data_valid: class or dataframe
dataframe, path to a '.gz' event file or indexed text file containing validation data
cue_index: dict
mapping from cues to indices. The dictionary should include only the cues to keep in the data
outcome_index: dict
mapping from outcomes to indices. The dictionary should include only the outcomes to keep in the data
shuffle_epoch: Boolean
whether to shuffle the data after every epoch
num_threads: int
maximum number of processes to use - it should be >= 1. Default: 1
verbose: int (0, 1, or 2)
verbosity mode. 0 = silent, 1 = one line per epoch, 2 = detailed. Default: 1
metrics: list
params: dict
model parameters:
'max_len': int
Consider only 'max_len' first tokens in a cue sequence. Default: 10
'embedding_input': str, numpy matrix or None
There are 3 possible choices: (1) if embedding_input = 'learn', learn embedding vectors from scratch while
training the model. An embedding layer will be added to the network; (2) if embedding_input = 'path',
extract embedding vectors from an embedding text file given in 'path' (it is imporant that it is a
text file); (3) Use the already prepared embedding matrix for training. You can use
prepare_embedding_matrix() from the preprocessing module. Default: None
'embedding_dim': int or None
Length of the cue embedding vectors. Default: 50
'epochs': int
Number of passes through the entire training dataset that has to be completed. Default: 1
'batch_size': int
Number of training examples to use for each update
'hidden_neuron': int
Number of neurons in the LSTM layer
'lr': float
Learning rate
'dropout': float
Dropout in the LSTM layer
'optimizer': class
Keras optimizer function
'losses': func
Keras loss function
'last_activation': str
Keras activation in the output layer
Returns
-------
tuple
keras fit history and model objects
"""
### check verbose and convert to keras verbose
if verbose == 0:
verbose_k = 0
elif verbose in (1, 2):
verbose_k = 2
else:
raise ValueError("incorrect verbose value: choose an integer bewteen 0 and 2")
if verbose ==2:
_ = sys.stdout.write('\n*** Model compilation\n\n')
sys.stdout.flush()
start_compile = time.time()
### Extract number of cues and outcomes from the index systems
num_cues = len(cue_index)
num_outcomes = len(outcome_index)
### Select the appropriate model generator based on the type of data
# Training data
if isinstance(data_train, pd.DataFrame):
generator_train = generator_df_LSTM
elif isinstance(data_train, IndexedFile):
generator_train = generator_textfile_LSTM
elif isinstance(data_train, str):
data_train = IndexedFile(data_train, 'gz')
generator_train = generator_textfile_LSTM
else:
raise ValueError("data_train should be either a path to an event file, a dataframe or an indexed text file")
# Validation data
if isinstance(data_valid, pd.DataFrame):
generator_valid = generator_df_LSTM
elif isinstance(data_valid, IndexedFile):
generator_valid = generator_textfile_LSTM
elif isinstance(data_valid, str):
data_valid = IndexedFile(data_valid, 'gz')
generator_valid = generator_textfile_LSTM
else:
raise ValueError("data_valid should be either a path to an event file, a dataframe or an indexed text file")
# Convert the metric list to a list that can be understood by the FNN model
for i, m in enumerate(metrics):
if m == 'precision':
metrics[i] = precision
elif m == 'recall':
metrics[i] = recall
elif m == 'f1score':
metrics[i] = f1score
# Extract from the params dict the parameters that are used repeatedly to reduce run time
max_len = params['max_len']
batch_size = params['batch_size']
### Initialise the model
model = Sequential()
### Add embedding layer if requested + decide vector encoding type
if not params['embedding_input']:
vector_encoding_0 = 'onehot'
else:
vector_encoding_0 = 'embedding'
if params['embedding_input'] == 'learn':
model.add(Embedding(num_cues+1, params['embedding_dim'], input_length = max_len))
elif isinstance(params['embedding_input'], str) and not params['embedding_input'] == 'learn': # if pre-trained embedding provided
params['embedding_dim'] = extract_embedding_dim(params['embedding_input']) # Extract embedding dimension
embedding_mat = prepare_embedding_matrix(params['embedding_input'], cue_index)
model.add(Embedding(num_cues+1, params['embedding_dim'], input_length = max_len,
weights = [embedding_mat], trainable = False))
elif isinstance(params['embedding_input'], np.ndarray):
params['embedding_dim'] = extract_embedding_dim(params['embedding_input']) # Extract embedding dimension
model.add(Embedding(num_cues+1, params['embedding_dim'], input_length = max_len,
weights = [params['embedding_input']], trainable = False))
# LSTM layer
model.add(LSTM(params['hidden_neuron'], return_sequences = False, input_shape = (max_len, num_cues)))
# Add drop out
model.add(Dropout(params['dropout']))
# Add output layer
model.add(Dense(num_outcomes,
activation = params['last_activation']))
# Compile the model
model.compile(loss = params['losses'],
optimizer = params['optimizer'](lr = params['lr']),
metrics = metrics)
if verbose ==2:
_ = sys.stdout.write('Model compilation completed in %.1fs\n\n' \
% (time.time() - start_compile))
_ = sys.stdout.write('*** Model fitting\n\n')
sys.stdout.flush()
start_fit = time.time()
### Initiate the generators for the train, valid and test data
train_gen = generator_train(data = data_train,
batch_size = batch_size,
num_cues = num_cues,
num_outcomes = num_outcomes,
cue_index = cue_index,
outcome_index = outcome_index,
max_len = max_len,
vector_encoding = vector_encoding_0,
shuffle_epoch = shuffle_epoch)
valid_gen = generator_valid(data = data_valid,
batch_size = batch_size,
num_cues = num_cues,
num_outcomes = num_outcomes,
cue_index = cue_index,
outcome_index = outcome_index,
max_len = max_len,
vector_encoding = vector_encoding_0,
shuffle_epoch = shuffle_epoch)
# Fit the model
# No parallel processing if the inputs are text files (still need to be sorted out)
if isinstance(data_train, pd.DataFrame) and isinstance(data_valid, pd.DataFrame):
out = model.fit_generator(generator = train_gen,
validation_data = valid_gen,
epochs = params['epochs'],
use_multiprocessing = True,
verbose = verbose_k,
workers = num_threads-1)
else:
out = model.fit_generator(generator = train_gen,
validation_data = valid_gen,
epochs = params['epochs'],
use_multiprocessing = False,
verbose = verbose_k,
workers = 0)
hist = out.history
if verbose == 2:
_ = sys.stdout.write('\nModel fitting completed in %.0fs\n' \
% (time.time() - start_fit))
sys.stdout.flush()
return hist, model
def grid_search_LSTM(data_train, data_valid, cue_index, outcome_index,
params, prop_grid, tuning_output_file, shuffle_epoch = False,
shuffle_grid = | |
import numpy as np
import pandas as pd
from pprint import pprint
import argparse
from pytorch_pretrained_bert.tokenization import (BasicTokenizer,
BertTokenizer, whitespace_tokenize)
import collections
import torch
from torch.utils.data import TensorDataset
from pytorch_pretrained_bert.modeling import BertForQuestionAnswering, BertConfig
import math
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
# from tqdm import tqdm
from parafinder import ParaFinder
torch.manual_seed(123)
class SquadExample(object):
"""
A single training/test example for the Squad dataset.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
example_id,
para_text,
qas_id,
question_text,
doc_tokens,
unique_id):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.example_id = example_id
self.para_text = para_text
self.unique_id = unique_id
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
return s
### Convert paragraph to tokens and returns question_text
def read_squad_examples(input_data):
"""Read a SQuAD json file into a list of SquadExample."""
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
i = 0
examples = []
for entry in input_data:
example_id = entry['id']
paragraph_text = entry['text']
doc_tokens = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
for qa in entry['ques']:
qas_id = i
question_text = qa
example = SquadExample(example_id=example_id,
qas_id=qas_id,
para_text=paragraph_text,
question_text=question_text,
doc_tokens=doc_tokens,
unique_id=i)
i += 1
examples.append(example)
return examples
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_is_max_context,
token_to_orig_map,
input_ids,
input_mask,
segment_ids):
self.doc_span_index = doc_span_index
self.unique_id = unique_id
self.example_index = example_index
self.tokens = tokens
self.token_is_max_context = token_is_max_context
self.token_to_orig_map = token_to_orig_map
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length):
"""Loads a data file into a list of `InputBatch`s."""
features = []
unique_id = 1
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
### Truncate the query if query length > max_query_length..
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(InputFeatures(unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_is_max_context=token_is_max_context,
token_to_orig_map=token_to_orig_map,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
unique_id += 1
return features
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
def predict(examples, all_features, all_results, max_answer_length):
n_best_size = 10
### Adding index to feature ###
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
for example in examples:
index = 0
features = example_index_to_features[example.unique_id]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
#### we remove the indexes which are invalid @
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, True)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
if not nbest:
nbest.append(
_NbestPrediction(text="No result found", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example] = (nbest_json[0]["text"], nbest_json[0]["probability"])
index = +1
return all_predictions
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--paragraph", | |
<gh_stars>1-10
from index import db, bcrypt
import datetime
import decimal
import json
from sqlalchemy.orm import relationship
from flask import jsonify
from sqlalchemy.dialects.mysql import LONGTEXT
from sqlalchemy import JSON
user_rule_association = db.Table(
'user_rule_association', db.Model.metadata,
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('rule_id', db.Integer, db.ForeignKey('rule.id')),
info={'bind_key': 'wehomeproperty'}
)
class User(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
name = db.Column(db.String(255), index=True, unique=True)
rules = relationship("Rule", secondary=user_rule_association, back_populates="users")
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
@staticmethod
def get_user_with_name_or_id(id=None, name=None):
if id:
user = User.query.filter_by(id=id).first()
elif name:
user = User.query.filter_by(name=name).first()
if user:
return user
else:
return None
class Rule(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
users = relationship("User", secondary=user_rule_association, back_populates="rules")
api_id = db.Column(db.String(255))
throttle_day = db.Column(db.String(255))
throttle_hour = db.Column(db.String(255))
throttle_min = db.Column(db.String(255))
includes = db.Column(db.Text())
excludes = db.Column(db.Text())
statistics = db.Column(db.Boolean(), default=False)
notes = db.Column(db.String(255))
extra = db.Column(db.Text())
class Picture(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
# type 0: avatar 1 => linkinde url
type = db.Column(db.Integer())
picturable_id = db.Column(db.Integer())
filename = db.Column(db.String(255))
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
__table_args__ = (
db.Index("idx_type_picturable_id", "type", "picturable_id"),
)
def __init__(self, type, picturable_id, filename):
self.type = type
self.picturable_id = picturable_id
self.filename = filename
@staticmethod
def get_filename_with_user_id(user_id):
picture = Picture.query.filter_by(type=0, picturable_id=user_id).order_by(Picture.id.desc()).first()
if picture:
return picture.filename
return None
class Administrator(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
user_id = db.Column(db.Integer(), db.ForeignKey("user.id", ondelete="CASCADE"),
nullable=False, index=True, unique=True)
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
@staticmethod
def is_user_admin(user_id):
admin = Administrator.query.filter_by(user_id=user_id).first()
return admin is not None
class File(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
# 0 => s3 file
type = db.Column(db.Integer())
is_active = db.Column(db.Boolean(), default=True, index=True)
foreign_id = db.Column(db.Integer(), nullable=False, index=True)
# like report's item id and itme id
item_id = db.Column(db.Integer(), nullable=False, index=True)
filename = db.Column(db.String(255))
# the raw name of upload
raw_name = db.Column(db.String(255))
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
__table_args__ = (
db.Index("idx_foreign_id_item_id_type_is_active", "foreign_id", 'item_id', 'type', 'is_active'),
)
def __init__(self, type, foreign_id, item_id, filename, raw_name, is_active=True):
self.type = type
self.foreign_id = foreign_id
self.item_id = item_id
self.filename = filename
self.is_active = is_active
self.raw_name = raw_name
class ExtraFile(db.Model):
__bind_key__ = 'wehomeproperty'
'''for user or property extra file'''
id = db.Column(db.Integer(), index=True, primary_key=True)
# user_id or property_id
foreign_id = db.Column(db.Integer(), nullable=False, index=True)
file_id = db.Column(db.Integer())
# 0 => user inspection report 1 => user contract file
type = db.Column(db.Integer())
url = db.Column(db.String(255))
# 0 => discard 1 => used
is_active = db.Column(db.Boolean(), default=True, index=True)
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
def __init__(self, foreign_id, file_id, type, url, is_active=1):
self.foreign_id = foreign_id
self.file_id = file_id
self.type = type
self.url = url
self.is_active = is_active
class Neighborhood(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
name = db.Column(db.String(255))
region_id = db.Column(db.Integer(), index=True, unique=True)
city = db.Column(db.String(255))
state = db.Column(db.String(255))
past_rent_ratio = db.Column(db.Float())
past_increase_ratio = db.Column(db.Float())
forecast_rent_ratio = db.Column(db.Float())
forecast_increase_ratio = db.Column(db.Float())
home_value_rent_price_history = db.Column(db.Text())
home_value_sale_price_history = db.Column(db.Text())
market_health_index = db.Column(db.Float())
rent_final_point = db.Column(db.Float())
zestimate = db.Column(db.Float())
neighborhood_score = db.Column(db.Float())
area_geoid = db.Column(db.Integer(), index=True)
# for IRR
hoa = db.Column(db.Float())
property_tax = db.Column(db.Float())
vaccancy_rate = db.Column(db.Float())
property_management_fee = db.Column(db.Float())
leasing_commission = db.Column(db.Float())
insurance_cost = db.Column(db.Float())
repair = db.Column(db.Float())
cap_ex = db.Column(db.Float())
acquisition_cost = db.Column(db.Float())
disposition_cost = db.Column(db.Float())
rent_growth = db.Column(db.Float())
properties = db.Column(LONGTEXT())
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
class Area(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
geoid = db.Column(db.String(255), index=True, unique=True)
cities = db.relationship('City', backref='area')
name = db.Column(db.String(255), index=True)
eng_name = db.Column(db.String(255), index=True)
lat = db.Column(db.Float())
lng = db.Column(db.Float())
layer_type = db.Column(db.String(255), index=True)
properties = db.Column(JSON())
# for IRR
property_tax = db.Column(db.Float())
vaccancy_rate = db.Column(db.Float())
property_management_fee = db.Column(db.Float())
leasing_commission = db.Column(db.Float())
insurance_cost = db.Column(db.Float())
repair = db.Column(db.Float())
cap_ex = db.Column(db.Float())
acquisition_cost = db.Column(db.Float())
disposition_cost = db.Column(db.Float())
rent_growth = db.Column(db.Float())
#
down_payment = db.Column(db.Float())
loan_interest_rate = db.Column(db.Float())
expenses = db.Column(db.Float())
closing_costs_misc = db.Column(db.Float())
#
history = db.Column(db.Text())
block_villa_median = db.Column(db.Float())
block_apartment_median = db.Column(db.Float())
deal_average_price = db.Column(db.Float())
list_average_price = db.Column(db.Float())
occ_rate_long = db.Column(db.Float())
occ_rate_airbnb = db.Column(db.Float())
return_long = db.Column(db.Float())
return_airbnb = db.Column(db.Float())
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
class State(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
geoid = db.Column(db.String(255), index=True, unique=True)
name = db.Column(db.String(255))
name_abbr = db.Column(db.String(255))
lat = db.Column(db.Float())
lng = db.Column(db.Float())
properties = db.Column(JSON)
cities = db.relationship('City', backref='state')
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
class CensusReport(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
type = db.Column(db.Integer()) # Refer utils.type.CENSUS_REPORT
geoid = db.Column(db.String(255))
census = db.Column(JSON)
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
__table_args__ = (
db.Index("idx_census_report", "type", 'geoid'),
)
def __init__(self, type, geoid, census):
self.type = type
self.geoid = geoid
self.census = census
class City(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
geoid = db.Column(db.String(255), index=True, unique=True)
area_geoid = db.Column(db.String(255), db.ForeignKey('area.geoid'), index=True)
state_geoid = db.Column(db.String(255), db.ForeignKey('state.geoid'),index=True)
zipcodes = db.relationship('Zipcode', backref='city')
neighbors = db.relationship('Neighbor', backref='city_')
name = db.Column(db.String(255), index=True)
lat = db.Column(db.Float())
lng = db.Column(db.Float())
properties = db.Column(JSON)
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
class Zipcode(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
geoid = db.Column(db.String(255), index=True, unique=True)
city_geoid = db.Column(db.String(255), db.ForeignKey('city.geoid',onupdate="SET NULL", ondelete="SET NULL"), index=True)
lat = db.Column(db.Float())
lng = db.Column(db.Float())
properties = db.Column(JSON)
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
class IpQuery(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
ip = db.Column(db.String(255), index=True)
home_id = db.Column(db.String(255))
source_name = db.Column(db.String(255))
date = db.Column(db.Date())
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
def __init__(self, ip, home_id, source_name, date):
self.ip = ip
self.home_id = home_id
self.source_name = source_name
self.date = date
class Neighbor(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
# neighbor_id = db.Column(db.String(255), index=True, unique=False)
centroid = db.Column(db.String(255))
name = db.Column(db.String(255))
url = db.Column(db.String(255))
crime = db.Column(db.Integer())
demographic = db.Column(db.Integer())
real_estate = db.Column(db.Integer())
overview = db.Column(db.Integer())
school = db.Column(db.Integer())
property = db.Column(JSON)
city = db.Column(db.String(255), index=True, unique=False)
city_geoid = db.Column(db.String(255), db.ForeignKey('city.geoid', onupdate="SET NULL", ondelete="SET NULL"), index=True)
class County(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column(db.Integer(), index=True, primary_key=True)
name = db.Column(db.String(255), index=True)
geoid = db.Column(db.String(255), index=True, unique=True)
cbsa_geoid = db.Column(db.String(255), index=True)
lat = db.Column(db.Float())
lng = db.Column(db.Float())
properties = db.Column(JSON)
created_at = db.Column(db.DateTime(), default=datetime.datetime.now)
updated_at = db.Column(db.DateTime(), default=datetime.datetime.now, onupdate=datetime.datetime.now)
class RedfinMarket(db.Model):
__bind_key__ = 'wehomeproperty'
id = db.Column('index', db.Integer(), primary_key=True)
avg_sale_to_list_mom = db.Column('Avg Sale To List Mom', db.Float())
avg_sale_to_list_yoy = db.Column('Avg Sale To List Yoy', db.Float())
avg_sale_to_list = db.Column('Avg Sale To List', db.Float())
city = db.Column('City', db.String(255))
homes_sold_mom = db.Column('Homes Sold Mom', db.Float())
homes_sold_yoy = db.Column('Homes Sold Yoy', db.Float())
homes_sold = db.Column('Homes Sold', db.Float())
inventory_mom = db.Column('Inventory Mom', db.Float())
inventory_yoy = db.Column('Inventory Yoy', db.Float())
inventory = db.Column('Inventory', db.Float())
measure_display = db.Column('Measure Display', db.String(255))
median_dom_mom = db.Column('Median Dom Mom', db.Integer())
median_dom_yoy = db.Column('Median Dom Yoy', db.Integer())
median_dom = db.Column('Median Dom', db.Integer())
median_list_ppsf_mom = db.Column('Median List Ppsf Mom', db.Float())
median_list_ppsf_yoy = db.Column('Median List Ppsf Yoy', db.Float())
median_list_ppsf = db.Column('Median List Ppsf', db.Float())
median_list_price_mom = db.Column('Median List Price Mom', db.Float())
median_list_price_yoy = db.Column('Median List Price Yoy', db.Float())
median_list_price = db.Column('Median List Price', db.Integer())
median_ppsf_mom = db.Column('Median Ppsf Mom', db.Float())
median_ppsf_yoy = db.Column('Median Ppsf Yoy', db.Float())
median_ppsf = db.Column('Median Ppsf', db.Float())
median_sale_price_mom = db.Column('Median Sale Price Mom', db.Float())
median_sale_price_yoy = db.Column('Median Sale Price Yoy', db.Float())
median_sale_price = db.Column('Median Sale Price', db.String(31))
new_listings_mom = db.Column('New Listings Mom', db.Float())
new_listings_yoy = db.Column('New Listings Yoy', db.Float())
new_listings = db.Column('New Listings', db.Integer())
number_of_records = db.Column('Number of Records', db.Integer())
period_begin = db.Column('Period Begin', db.String(255))
period_duration = db.Column('Period Duration', db.Integer())
period_end = db.Column('Period End', db.String(255))
price_drops_mom = db.Column('Price Drops Mom', db.Float())
price_drops_yoy = db.Column('Price Drops Yoy', db.Float())
price_drops = db.Column('Price Drops', db.Float())
property_type = db.Column('Property Type', db.String(255))
region_type = db.Column('Region Type', db.String(15))
region = db.Column('Region', db.String(255))
sold_above_list_mom = db.Column('Sold Above List Mom', db.Float())
sold_above_list_yoy = db.Column('Sold Above List Yoy', db.Float())
sold_above_list = db.Column('Sold Above List', db.Float())
state_code = db.Column('State Code', db.String(2))
state = db.Column('State', db.String(255))
table_id = db.Column('Table Id', db.String(255))
worksheet_filter = db.Column('Worksheet Filter', db.String(255))
months_of_supply = db.Column('Months Of Supply', db.Float())
months_of_supply_mom = db.Column('Months Of | |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from collections import defaultdict, deque
from typing import Any, Dict, List, Optional
import time
import numpy as np
import torch
import torch.nn.functional as F
import tqdm
from torch.optim.lr_scheduler import LambdaLR
from bps_nav.common.base_trainer import BaseRLTrainer
from bps_nav.common.env_utils import construct_envs, construct_envs_habitat
from bps_nav.common.rollout_storage import RolloutStorage
from bps_nav.common.tensorboard_utils import TensorboardWriter
from bps_nav.common.utils import (
batch_obs,
generate_video,
linear_decay,
)
from bps_nav.common.logger import logger
from bps_nav.rl.ppo.ppo import PPO
from bps_nav.common.tree_utils import (
tree_append_in_place,
tree_clone_shallow,
tree_map,
tree_select,
tree_clone_structure,
tree_copy_in_place,
)
from bps_nav.rl.ddppo.policy import ResNetPolicy
from gym import spaces
from gym.spaces import Dict as SpaceDict
@torch.jit.script
def so3_to_matrix(q, m):
m[..., 0, 0] = 1.0 - 2.0 * (q[..., 2] ** 2 + q[..., 3] ** 2)
m[..., 0, 1] = 2.0 * (q[..., 1] * q[..., 2] - q[..., 3] * q[..., 0])
m[..., 0, 2] = 2.0 * (q[..., 1] * q[..., 3] + q[..., 2] * q[..., 0])
m[..., 1, 0] = 2.0 * (q[..., 1] * q[..., 2] + q[..., 3] * q[..., 0])
m[..., 1, 1] = 1.0 - 2.0 * (q[..., 1] ** 2 + q[..., 3] ** 2)
m[..., 1, 2] = 2.0 * (q[..., 2] * q[..., 3] - q[..., 1] * q[..., 0])
m[..., 2, 0] = 2.0 * (q[..., 1] * q[..., 3] - q[..., 2] * q[..., 0])
m[..., 2, 1] = 2.0 * (q[..., 2] * q[..., 3] + q[..., 1] * q[..., 0])
m[..., 2, 2] = 1.0 - 2.0 * (q[..., 1] ** 2 + q[..., 2] ** 2)
@torch.jit.script
def se3_to_4x4(se3_states):
n = se3_states.size(0)
mat = torch.zeros((n, 4, 4), dtype=torch.float32, device=se3_states.device)
mat[:, 3, 3] = 1
so3 = se3_states[:, 0:4]
so3_to_matrix(so3, mat[:, 0:3, 0:3])
mat[:, 0:3, 3] = se3_states[:, 4:]
return mat
class PPOTrainer(BaseRLTrainer):
r"""Trainer class for PPO algorithm
Paper: https://arxiv.org/abs/1707.06347.
"""
supported_tasks = ["Nav-v0"]
def __init__(self, config=None, resume_from=None):
super().__init__(config)
self.actor_critic = None
self.agent = None
self.envs = None
# if config is not None:
# logger.info(f"config: {config}")
self._static_encoder = False
self._encoder = None
def _setup_actor_critic_agent(self, ppo_cfg) -> None:
r"""Sets up actor critic and agent for PPO.
Args:
ppo_cfg: config node with relevant params
Returns:
None
"""
logger.add_filehandler(self.config.LOG_FILE)
self.actor_critic = ResNetPolicy(
observation_space=observation_space,
action_space=self.envs.action_spaces[0],
hidden_size=ppo_cfg.hidden_size,
rnn_type=self.config.RL.DDPPO.rnn_type,
num_recurrent_layers=self.config.RL.DDPPO.num_recurrent_layers,
backbone=self.config.RL.DDPPO.backbone,
)
self.actor_critic.to(self.device)
self.agent = PPO(
actor_critic=self.actor_critic,
clip_param=ppo_cfg.clip_param,
ppo_epoch=ppo_cfg.ppo_epoch,
num_mini_batch=ppo_cfg.num_mini_batch,
value_loss_coef=ppo_cfg.value_loss_coef,
entropy_coef=ppo_cfg.entropy_coef,
lr=ppo_cfg.lr,
eps=ppo_cfg.eps,
max_grad_norm=ppo_cfg.max_grad_norm,
use_normalized_advantage=ppo_cfg.use_normalized_advantage,
)
def save_checkpoint(
self, file_name: str, extra_state: Optional[Dict] = None
) -> None:
r"""Save checkpoint with specified name.
Args:
file_name: file name for checkpoint
Returns:
None
"""
def _cast(param):
if "Half" in param.type():
param = param.to(dtype=torch.float32)
return param
checkpoint = {
"state_dict": {k: _cast(v) for k, v in self.agent.state_dict().items()},
"config": self.config,
}
if extra_state is not None:
checkpoint["extra_state"] = extra_state
torch.save(checkpoint, os.path.join(self.config.CHECKPOINT_FOLDER, file_name))
def load_checkpoint(self, checkpoint_path: str, *args, **kwargs) -> Dict:
r"""Load checkpoint of specified path as a dict.
Args:
checkpoint_path: path of target checkpoint
*args: additional positional args
**kwargs: additional keyword args
Returns:
dict containing checkpoint info
"""
return torch.load(checkpoint_path, *args, **kwargs)
METRICS_BLACKLIST = {"top_down_map", "collisions.is_collision"}
@classmethod
def _extract_scalars_from_info(cls, info: Dict[str, Any]) -> Dict[str, float]:
result = {}
for k, v in info.items():
if k in cls.METRICS_BLACKLIST:
continue
if isinstance(v, dict):
result.update(
{
k + "." + subk: subv
for subk, subv in cls._extract_scalars_from_info(v).items()
if (k + "." + subk) not in cls.METRICS_BLACKLIST
}
)
# Things that are scalar-like will have an np.size of 1.
# Strings also have an np.size of 1, so explicitly ban those
elif v is None:
result[k] = None
elif np.size(v) == 1 and not isinstance(v, str):
result[k] = float(v)
return result
@classmethod
def _extract_scalars_from_infos(
cls, infos: List[Dict[str, Any]]
) -> Dict[str, List[float]]:
results = defaultdict(list)
for i in range(len(infos)):
for k, v in cls._extract_scalars_from_info(infos[i]).items():
results[k].append(v)
return results
def _inference(self, rollouts, idx):
with torch.no_grad(), self.timing.add_time("Rollout-Step"):
with self.timing.add_time("Inference"):
step_input = tree_select(
rollouts[idx].step, rollouts[idx].storage_buffers
)
(
values,
dist_result,
recurrent_hidden_states,
) = self.actor_critic.act_fast(
step_input["observations"],
step_input["recurrent_hidden_states"],
step_input["prev_actions"],
step_input["masks"],
)
with self.timing.add_time("Rollouts-Insert"):
rollouts[idx].insert(
recurrent_hidden_states=recurrent_hidden_states,
action_log_probs=dist_result["action_log_probs"],
value_preds=values,
actions=dist_result["actions"],
non_blocking=False,
)
with self.timing.add_time("Inference"):
cpu_actions = dist_result["actions"].squeeze(-1).to(device="cpu")
return cpu_actions
def _step_simulation(self, cpu_actions, idx):
with self.timing.add_time("Rollout-Step"), self.timing.add_time(
"Habitat-Step-Start"
):
self.envs.step(idx, cpu_actions.numpy())
obs = self._observations[idx]
rewards = self._rewards[idx]
masks = self._masks[idx]
infos = self._rollout_infos[idx]
return obs, rewards, masks, infos
def _start_simulation(self, cpu_actions, idx):
with self.timing.add_time("Rollout-Step"), self.timing.add_time(
"Habitat-Step-Start"
):
self.envs.step_start(idx, cpu_actions.numpy())
def _wait_simulation(self, idx):
with self.timing.add_time("Rollout-Step"), self.timing.add_time(
"Habitat-Step-Wait"
):
self.envs.step_end(idx)
obs = self._observations[idx]
rewards = self._rewards[idx]
masks = self._masks[idx]
infos = self._rollout_infos[idx]
return obs, rewards, masks, infos
def _render(self, idx):
with self.timing.add_time("Rollout-Step"), self.timing.add_time(
"Renderer-Start"
):
self.envs.render(idx)
def _sync_renderer_and_insert(self, rollouts, sim_step_res, idx):
with self.timing.add_time("Rollout-Step"):
batch, rewards, masks, infos = sim_step_res
with self.timing.add_time("Renderer-Wait"):
self._syncs[idx].wait()
torch.cuda.current_stream().synchronize()
with self.timing.add_time("Rollouts-Insert"):
rollouts[idx].insert(
batch, rewards=rewards, masks=masks, non_blocking=False
)
rollouts[idx].advance()
return masks.size(0)
def _update_stats(
self,
rollouts,
current_episode_reward,
running_episode_stats,
sim_step_res,
stats_inds,
idx,
):
with self.timing.add_time("Rollout-Step"):
batch, rewards, masks, infos = sim_step_res
with self.timing.add_time("Update-Stats"):
dones = masks == 0
def _masked(v):
return torch.where(dones, v, v.new_zeros(()))
current_episode_reward[stats_inds] += rewards
running_episode_stats["reward"][stats_inds] += _masked(
current_episode_reward[stats_inds]
)
running_episode_stats["count"][stats_inds] += dones.type_as(
running_episode_stats["count"]
)
for k, v in infos.items():
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k][stats_inds] += _masked(v)
current_episode_reward[stats_inds].masked_fill_(dones, 0)
def _collect_rollout_step(
self, rollouts, current_episode_reward, running_episode_stats
):
with self.timing.add_time("Rollout-Step"):
with torch.no_grad(), self.timing.add_time("Inference"):
with torch.no_grad():
step_observation = {
k: v[rollouts.step] for k, v in rollouts.observations.items()
}
(
values,
dist_result,
recurrent_hidden_states,
) = self.actor_critic.act(
step_observation,
rollouts.recurrent_hidden_states[rollouts.step],
rollouts.prev_actions[rollouts.step],
rollouts.masks[rollouts.step],
)
cpu_actions = actions.squeeze(-1).to(device="cpu")
with self.timing.add_time("Habitat-Step-Start"):
self.envs.async_step(cpu_actions)
with self.timing.add_time("Habitat-Step-Wait"):
batch, rewards, masks, infos = self.envs.wait_step()
with self.timing.add_time("Renderer-Render"):
sync = self._draw_batch(batch)
with self.timing.add_time("Update-Stats"):
current_episode_reward += rewards
running_episode_stats["reward"] += (1 - masks) * current_episode_reward
running_episode_stats["count"] += 1 - masks
for k, v in infos.items():
if k not in running_episode_stats:
running_episode_stats[k] = torch.zeros_like(
running_episode_stats["count"]
)
running_episode_stats[k] += (1 - masks) * v
current_episode_reward *= masks
with self.timing.add_time("Rollouts-Insert"):
rollouts.insert(
rewards=rewards, masks=masks,
)
with self.timing.add_time("Renderer-Wait"):
batch = self._fill_batch_result(batch, sync)
with self.timing.add_time("Rollouts-Insert"):
rollouts.insert(batch)
rollouts.advance()
return self.envs.num_envs
@staticmethod
def _update_agent_internal_fn(
rollouts, agent, actor_critic, _static_encoder, timing, warmup=False
):
actor_critic.train()
if _static_encoder:
_encoder.eval()
with timing.add_time("PPO"):
value_loss, action_loss, dist_entropy = agent.update(
rollouts, timing, warmup=warmup
)
rollouts.after_update()
return (value_loss, action_loss, dist_entropy)
def _compute_returns(self, ppo_cfg, rollouts):
with self.timing.add_time("Learning"), torch.no_grad(), self.timing.add_time(
"Inference"
):
for idx in range(len(rollouts)):
last_input = tree_select(
rollouts[idx].step, rollouts[idx].storage_buffers
)
next_value = self.actor_critic.get_value(
last_input["observations"],
last_input["recurrent_hidden_states"],
last_input["prev_actions"],
last_input["masks"],
)
with self.timing.add_time("Compute-Returns"):
rollouts[idx].compute_returns(
next_value, ppo_cfg.use_gae, ppo_cfg.gamma, ppo_cfg.tau
)
def _update_agent(self, rollouts, warmup=False):
with self.timing.add_time("Learning"):
losses = self._update_agent_internal_fn(
rollouts,
self.agent,
self.actor_critic,
self._static_encoder,
self.timing,
warmup=warmup,
)
if self.actor_critic.trt_enabled():
with self.timing.add_time("TRT Refit"):
with self.timing.add_time("TRT Weights"):
weights = self.actor_critic.get_trt_weights()
with self.timing.add_time("TRT Update"):
self.actor_critic.update_trt_weights(weights)
return losses
def _eval_checkpoint(
self,
checkpoint_path: str,
writer: TensorboardWriter,
checkpoint_index: int = 0,
) -> None:
r"""Evaluates a single checkpoint.
Args:
checkpoint_path: path of checkpoint
writer: tensorboard writer object for logging to tensorboard
checkpoint_index: index of cur checkpoint for logging
Returns:
None
"""
from habitat_baselines.common.environments import get_env_class
# Map location CPU is almost always better than mapping to a CUDA device.
ckpt_dict = self.load_checkpoint(checkpoint_path, map_location="cpu")
if self.config.EVAL.USE_CKPT_CONFIG:
config = self._setup_eval_config(ckpt_dict["config"])
else:
config = self.config.clone()
ppo_cfg = config.RL.PPO
config.defrost()
config.TASK_CONFIG.DATASET.SPLIT = config.EVAL.SPLIT
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.SHUFFLE = False
config.TASK_CONFIG.ENVIRONMENT.ITERATOR_OPTIONS.MAX_SCENE_REPEAT_STEPS = -1
config.freeze()
if len(self.config.VIDEO_OPTION) > 0:
config.defrost()
config.TASK_CONFIG.TASK.MEASUREMENTS.append("TOP_DOWN_MAP")
config.TASK_CONFIG.TASK.MEASUREMENTS.append("COLLISIONS")
config.freeze()
# logger.info(f"env config: {config}")
self.envs = construct_envs_habitat(config, get_env_class(config.ENV_NAME))
self.observation_space = SpaceDict(
{
"pointgoal_with_gps_compass": spaces.Box(
low=0.0, high=1.0, shape=(2,), dtype=np.float32
)
}
)
if self.config.COLOR:
self.observation_space = SpaceDict(
{
"rgb": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(3, *self.config.RESOLUTION),
dtype=np.uint8,
),
**self.observation_space.spaces,
}
)
if self.config.DEPTH:
self.observation_space = SpaceDict(
{
"depth": spaces.Box(
low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(1, *self.config.RESOLUTION),
dtype=np.float32,
),
**self.observation_space.spaces,
}
)
self.action_space = self.envs.action_spaces[0]
self._setup_actor_critic_agent(ppo_cfg)
self.agent.load_state_dict(ckpt_dict["state_dict"])
self.actor_critic = self.agent.actor_critic
self.actor_critic.script_net()
self.actor_critic.eval()
observations = self.envs.reset()
batch = batch_obs(observations, device=self.device)
current_episode_reward = torch.zeros(self.envs.num_envs, 1, device=self.device)
test_recurrent_hidden_states = torch.zeros(
self.config.NUM_PROCESSES,
self.actor_critic.num_recurrent_layers,
ppo_cfg.hidden_size,
device=self.device,
)
prev_actions = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.long
)
not_done_masks = torch.zeros(
self.config.NUM_PROCESSES, 1, device=self.device, dtype=torch.bool
)
stats_episodes = dict() # dict of dicts that stores stats per episode
rgb_frames = [
[] for _ in range(self.config.NUM_PROCESSES)
] # type: List[List[np.ndarray]]
if len(self.config.VIDEO_OPTION) > 0:
os.makedirs(self.config.VIDEO_DIR, exist_ok=True)
number_of_eval_episodes = self.config.TEST_EPISODE_COUNT
if number_of_eval_episodes == -1:
number_of_eval_episodes = sum(self.envs.number_of_episodes)
else:
total_num_eps = sum(self.envs.number_of_episodes)
if total_num_eps < number_of_eval_episodes:
logger.warn(
f"Config specified {number_of_eval_episodes} eval episodes"
", dataset only has {total_num_eps}."
)
| |
bit too complicated right now
cursor_result, _ = search_fn({
'limit': 1000,
'paginator_options': {'max_limit': 1000},
})
except ValidationError as exc:
return Response({'detail': six.text_type(exc)}, status=400)
group_list = list(cursor_result)
group_ids = [g.id for g in group_list]
is_bulk = len(group_ids) > 1
group_project_ids = {g.project_id for g in group_list}
# filter projects down to only those that have groups in the search results
projects = [p for p in projects if p.id in group_project_ids]
queryset = Group.objects.filter(
id__in=group_ids,
)
discard = result.get('discard')
if discard:
return handle_discard(request, list(queryset), projects, acting_user)
statusDetails = result.pop('statusDetails', result)
status = result.get('status')
release = None
commit = None
if status in ('resolved', 'resolvedInNextRelease'):
if status == 'resolvedInNextRelease' or statusDetails.get('inNextRelease'):
# TODO(jess): We may want to support this for multi project, but punting on it for now
if len(projects) > 1:
return Response({
'detail': 'Cannot set resolved in next release for multiple projects.'
}, status=400)
release = statusDetails.get('inNextRelease') or Release.objects.filter(
projects=projects[0],
organization_id=projects[0].organization_id,
).extra(select={
'sort': 'COALESCE(date_released, date_added)',
}).order_by('-sort')[0]
activity_type = Activity.SET_RESOLVED_IN_RELEASE
activity_data = {
# no version yet
'version': '',
}
status_details = {
'inNextRelease': True,
'actor': serialize(extract_lazy_object(request.user), request.user),
}
res_type = GroupResolution.Type.in_next_release
res_type_str = 'in_next_release'
res_status = GroupResolution.Status.pending
elif statusDetails.get('inRelease'):
# TODO(jess): We could update validation to check if release
# applies to multiple projects, but I think we agreed to punt
# on this for now
if len(projects) > 1:
return Response({
'detail': 'Cannot set resolved in release for multiple projects.'
}, status=400)
release = statusDetails['inRelease']
activity_type = Activity.SET_RESOLVED_IN_RELEASE
activity_data = {
# no version yet
'version': release.version,
}
status_details = {
'inRelease': release.version,
'actor': serialize(extract_lazy_object(request.user), request.user),
}
res_type = GroupResolution.Type.in_release
res_type_str = 'in_release'
res_status = GroupResolution.Status.resolved
elif statusDetails.get('inCommit'):
# TODO(jess): Same here, this is probably something we could do, but
# punting for now.
if len(projects) > 1:
return Response({
'detail': 'Cannot set resolved in commit for multiple projects.'
}, status=400)
commit = statusDetails['inCommit']
activity_type = Activity.SET_RESOLVED_IN_COMMIT
activity_data = {
'commit': commit.id,
}
status_details = {
'inCommit': serialize(commit, request.user),
'actor': serialize(extract_lazy_object(request.user), request.user),
}
res_type_str = 'in_commit'
else:
res_type_str = 'now'
activity_type = Activity.SET_RESOLVED
activity_data = {}
status_details = {}
now = timezone.now()
metrics.incr('group.resolved', instance=res_type_str, skip_internal=True)
# if we've specified a commit, let's see if its already been released
# this will allow us to associate the resolution to a release as if we
# were simply using 'inRelease' above
# Note: this is different than the way commit resolution works on deploy
# creation, as a given deploy is connected to an explicit release, and
# in this case we're simply choosing the most recent release which contains
# the commit.
if commit and not release:
# TODO(jess): If we support multiple projects for release / commit resolution,
# we need to update this to find the release for each project (we shouldn't assume
# it's the same)
try:
release = Release.objects.filter(
projects__in=projects,
releasecommit__commit=commit,
).extra(select={
'sort': 'COALESCE(date_released, date_added)',
}).order_by('-sort')[0]
res_type = GroupResolution.Type.in_release
res_status = GroupResolution.Status.resolved
except IndexError:
release = None
for group in group_list:
with transaction.atomic():
resolution = None
if release:
resolution_params = {
'release': release,
'type': res_type,
'status': res_status,
'actor_id': request.user.id
if request.user.is_authenticated() else None,
}
resolution, created = GroupResolution.objects.get_or_create(
group=group,
defaults=resolution_params,
)
if not created:
resolution.update(
datetime=timezone.now(), **resolution_params)
if commit:
GroupLink.objects.create(
group_id=group.id,
project_id=group.project_id,
linked_type=GroupLink.LinkedType.commit,
relationship=GroupLink.Relationship.resolves,
linked_id=commit.id,
)
affected = Group.objects.filter(
id=group.id,
).update(
status=GroupStatus.RESOLVED,
resolved_at=now,
)
if not resolution:
created = affected
group.status = GroupStatus.RESOLVED
group.resolved_at = now
assigned_to = self_subscribe_and_assign_issue(acting_user, group)
if assigned_to is not None:
result['assignedTo'] = assigned_to
if created:
activity = Activity.objects.create(
project=project_lookup[group.project_id],
group=group,
type=activity_type,
user=acting_user,
ident=resolution.id if resolution else None,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
activity.send_notification()
issue_resolved.send_robust(
organization_id=organization_id,
user=acting_user or request.user,
group=group,
project=project_lookup[group.project_id],
resolution_type=res_type_str,
sender=update_groups,
)
kick_off_status_syncs.apply_async(kwargs={
'project_id': group.project_id,
'group_id': group.id,
})
result.update({
'status': 'resolved',
'statusDetails': status_details,
})
elif status:
new_status = STATUS_CHOICES[result['status']]
with transaction.atomic():
happened = queryset.exclude(
status=new_status,
).update(
status=new_status,
)
GroupResolution.objects.filter(
group__in=group_ids,
).delete()
if new_status == GroupStatus.IGNORED:
metrics.incr('group.ignored', skip_internal=True)
ignore_duration = (
statusDetails.pop('ignoreDuration', None) or
statusDetails.pop('snoozeDuration', None)
) or None
ignore_count = statusDetails.pop(
'ignoreCount', None) or None
ignore_window = statusDetails.pop(
'ignoreWindow', None) or None
ignore_user_count = statusDetails.pop(
'ignoreUserCount', None) or None
ignore_user_window = statusDetails.pop(
'ignoreUserWindow', None) or None
if ignore_duration or ignore_count or ignore_user_count:
if ignore_duration:
ignore_until = timezone.now() + timedelta(
minutes=ignore_duration,
)
else:
ignore_until = None
for group in group_list:
state = {}
if ignore_count and not ignore_window:
state['times_seen'] = group.times_seen
if ignore_user_count and not ignore_user_window:
state['users_seen'] = group.count_users_seen()
GroupSnooze.objects.create_or_update(
group=group,
values={
'until':
ignore_until,
'count':
ignore_count,
'window':
ignore_window,
'user_count':
ignore_user_count,
'user_window':
ignore_user_window,
'state':
state,
'actor_id':
request.user.id if request.user.is_authenticated() else None,
}
)
result['statusDetails'] = {
'ignoreCount': ignore_count,
'ignoreUntil': ignore_until,
'ignoreUserCount': ignore_user_count,
'ignoreUserWindow': ignore_user_window,
'ignoreWindow': ignore_window,
'actor': serialize(extract_lazy_object(request.user), request.user),
}
else:
GroupSnooze.objects.filter(
group__in=group_ids,
).delete()
ignore_until = None
result['statusDetails'] = {}
else:
result['statusDetails'] = {}
if group_list and happened:
if new_status == GroupStatus.UNRESOLVED:
activity_type = Activity.SET_UNRESOLVED
activity_data = {}
elif new_status == GroupStatus.IGNORED:
activity_type = Activity.SET_IGNORED
activity_data = {
'ignoreCount': ignore_count,
'ignoreDuration': ignore_duration,
'ignoreUntil': ignore_until,
'ignoreUserCount': ignore_user_count,
'ignoreUserWindow': ignore_user_window,
'ignoreWindow': ignore_window,
}
groups_by_project_id = defaultdict(list)
for group in group_list:
groups_by_project_id[group.project_id].append(group)
for project in projects:
project_groups = groups_by_project_id.get(project.id)
if project_groups:
issue_ignored.send_robust(
project=project,
user=acting_user,
group_list=project_groups,
activity_data=activity_data,
sender=update_groups)
for group in group_list:
group.status = new_status
activity = Activity.objects.create(
project=project_lookup[group.project_id],
group=group,
type=activity_type,
user=acting_user,
data=activity_data,
)
# TODO(dcramer): we need a solution for activity rollups
# before sending notifications on bulk changes
if not is_bulk:
if acting_user:
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.status_change,
)
activity.send_notification()
if new_status == GroupStatus.UNRESOLVED:
kick_off_status_syncs.apply_async(kwargs={
'project_id': group.project_id,
'group_id': group.id,
})
if 'assignedTo' in result:
assigned_actor = result['assignedTo']
if assigned_actor:
for group in group_list:
resolved_actor = assigned_actor.resolve()
GroupAssignee.objects.assign(group, resolved_actor, acting_user)
result['assignedTo'] = serialize(
assigned_actor.resolve(), acting_user, ActorSerializer())
else:
for group in group_list:
GroupAssignee.objects.deassign(group, acting_user)
is_member_map = {
project.id: project.member_set.filter(user=acting_user).exists() for project in projects
}
if result.get('hasSeen'):
for group in group_list:
if is_member_map.get(group.project_id):
instance, created = create_or_update(
GroupSeen,
group=group,
user=acting_user,
project=project_lookup[group.project_id],
values={
'last_seen': timezone.now(),
}
)
elif result.get('hasSeen') is False:
GroupSeen.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
if result.get('isBookmarked'):
for group in group_list:
GroupBookmark.objects.get_or_create(
project=project_lookup[group.project_id],
group=group,
user=acting_user,
)
GroupSubscription.objects.subscribe(
user=acting_user,
group=group,
reason=GroupSubscriptionReason.bookmark,
)
elif result.get('isBookmarked') is False:
GroupBookmark.objects.filter(
group__in=group_ids,
user=acting_user,
).delete()
# TODO(dcramer): we could make these more efficient by first
# querying for rich rows are present (if N > 2), flipping the flag
# on those rows, and then creating the missing rows
if result.get('isSubscribed') in (True, False):
is_subscribed = result['isSubscribed']
for group in group_list:
# NOTE: Subscribing without an initiating event (assignment,
# commenting, etc.) clears out the previous subscription reason
# to avoid showing confusing messaging as a result of this
# action. It'd be jarring to go directly from "you are not
# subscribed" to "you were subscribed due since you were
# assigned" just by clicking the "subscribe" button (and you
# may no longer be assigned to the issue anyway.)
GroupSubscription.objects.create_or_update(
user=acting_user,
group=group,
project=project_lookup[group.project_id],
values={
'is_active': is_subscribed,
'reason': GroupSubscriptionReason.unknown,
},
)
result['subscriptionDetails'] = {
'reason': SUBSCRIPTION_REASON_MAP.get(
GroupSubscriptionReason.unknown,
'unknown',
),
}
if 'isPublic' in result:
# We always want to delete an existing share, because triggering
# an isPublic=True even when it's already public, should trigger
# regenerating.
for group in group_list:
if GroupShare.objects.filter(group=group).delete():
result['shareId'] = None
Activity.objects.create(
project=project_lookup[group.project_id],
group=group,
type=Activity.SET_PRIVATE,
user=acting_user,
)
if result.get('isPublic'):
for group in group_list:
share, created = GroupShare.objects.get_or_create(
project=project_lookup[group.project_id],
group=group,
user=acting_user,
)
if created:
result['shareId'] = share.uuid
Activity.objects.create(
project=project_lookup[group.project_id],
group=group,
type=Activity.SET_PUBLIC,
user=acting_user,
)
# XXX(dcramer): this feels a bit shady like it should be its own
# endpoint
if result.get('merge') and len(group_list) > 1:
# don't allow merging cross project
if len(projects) > 1:
return Response({'detail': 'Merging across multiple projects is not supported'})
group_list_by_times_seen = sorted(
group_list,
key=lambda g: (g.times_seen, g.id),
reverse=True,
)
primary_group, groups_to_merge = group_list_by_times_seen[0], group_list_by_times_seen[1:]
group_ids_to_merge = [g.id for g in groups_to_merge]
eventstream_state = eventstream.start_merge(
primary_group.project_id,
group_ids_to_merge,
primary_group.id
)
Group.objects.filter(
id__in=group_ids_to_merge
).update(
status=GroupStatus.PENDING_MERGE
)
transaction_id = uuid4().hex
merge_groups.delay(
from_object_ids=group_ids_to_merge,
to_object_id=primary_group.id,
transaction_id=transaction_id,
eventstream_state=eventstream_state,
)
Activity.objects.create(
project=project_lookup[primary_group.project_id],
group=primary_group,
type=Activity.MERGE,
user=acting_user,
data={
'issues': [{
| |
= [french,finir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
This can be taken as the general rule for conjugating **ir** verbs in the present tense. All you need to do is find the *stem* of the verb, which was fin- in this case and then apply these endings to figure out how to conjugate the verb for every personal pronoun.
For instance, try this yourself with the verb "grandir" (to grow). The stem is "grand-", so what are the corresponding six conjucations, as in the table above?
This pattern works for most "ir" verbs, and there are hundreds of them. Some common ones are:
- applaudir (to applaud)
- bâtir (to build)
- choisir (to choose)
- désobéir (to disobey)
- finir (to finish)
- grandir (to grow up)
- grossir (to gain weight)
- guérir (to heal, to get well)
- maigrir (to lose weight)
- obéir (to obey)
- punir (to punish)
- réfléchir (to think, to reflect)
- remplir (to fill)
- réussir (to succeed)
- vieillir (to grow old)
Again, though, there will be exceptions...
## 3. The "-re" Regular Verbs
There is a general rubric for conjugating verbs that end in **re** in the present tense.
We will illustrate this with the verb "vendre" (to sell). The stem of the verb finit is "vend-". We conjugate it by adding on the endings "s", "s", "nothing", "ons", "ez" "ent" for the corresponding pronouns, as follows:
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
vendre_stem = ['vend-','vend-','vend-','vend-','vend-','vend-']
re_ending = ['s','s','','ons','ez','ent']
vendre_conjug = ['vends','vends','vend','vendons','vendez','vendent']
trace0 = go.Table(
columnorder = [1,2],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,vendre_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
This can be taken as the general rule for conjugating **re** verbs in the present tense. All you need to do is find the *stem* of the verb, which was vend- in this case and then apply these endings to figure out how to conjugate the verb for every personal pronoun.
For instance, try this yourself with the verb "grandir" (to grow). The stem is "grand-", so what are the corresponding six conjugations, as in the table above?
This pattern works for most "re" verbs, and there are many of them. Some common ones are:
attendre (to wait)
défendre (to defend)
descendre (to descend)
entendre (to hear)
étendre (to stretch)
fondre (to melt)
pendre (to hang, or suspend)
perdre (to lose)
prétendre (to claim)
rendre (to give back, or return)
répondre (to answer)
vendre (to sell)
Again, though, there will be exceptions...
## 1. Exceptions to the regular er verbs
French is filled with exceptions, which makes it a bit of a difficult language to master as one has to basically dedicate the exceptions to memory. An exception for a verb means that it is not (maybe just partially) conjugating using the endings given above. Most exceptions arise in an alteration of the stem of the verb.
Thankfully there are not many exceptions for the **er** verbs. Here are three notable ones:
## 1a. The "-oyer" and "-uyer" exceptions:
For verbs like "envoyer" (to send) or "ennuyer" (to annoy) the stem changes the "y" to an "i" for all pronouns except nous and vous:
french = ["j'",'tu','elle, il, on','nous','vous','elles, ils']
envoyer_conjug = ['envoie', 'envoies','envoie','envoyons','envoyez','envoient']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,envoyer_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
## 1b. The "e_er" or "é_er" exceptions:
Verbs like "acheter" (to buy) or "préférer" (to prefer) also follow an exception rule. The accent aigue becomes an accent grave, that is, é becomes è, except in the nous and vous cases, where it does not change. Note this means the pronunciation of the letter changes as well.
preferer_conjug = ['préfère','préfères','préfère','préférons','préférez','préfèrent']
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,preferer_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
## 1c. The " –eler " and " -eter " exceptions:
For verbs like "appeler" (to call) or "rejeter" (to reject) the letters "l"
or "t" get doubled. Again, this does not hold for the nous and vous cases.
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
appeler_conjug = ['appelle','appelles','appelle','appelons','appelez','appellent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,appeler_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
It's important to be aware of these exceptions, as you will be able to identify patterns in verbs of these forms and the exceptions themselves, like how it doesn't apply for nous and vous. Knowledge of the exceptions is crucial to mastering the language!
## 2. Exceptions to the regular ir verbs
Unfortunately, with the **ir** verbs, there are many, many exceptions. Three important ones are as follows:
## 2a. Verbs like partir (to leave):
For "partir" (to leave), the keep is to drop the "t" from the stem in the singular case, and add the endings "s", "s", "t". For the plural case, you keep the "t". The conjgations go like this:
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
partir_conjug = ['pars','pars','part','partons','partez','partent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,partir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
height = 30
)
)
layout = dict(width=500, height=450)
data = [trace0]
fig = dict(data = data, layout = layout)
iplot(fig)
Other irregular ir verbs like partir include
- dormir (to sleep)
- mentir (to lie)
- partir (to leave)
- sentir (to feel)
- servir (to serve)
- sortir (to go out)
## 2b. Verbs that end in -llir, -frir, or -vrir
Curiously, these verbs conjugate like an "er" verb. Just take the stem and add the endings "e", "es", "s", "ons", "ez", "emt." For instance, here is the conjugation for ouvrir (to open):
french = ['je','tu','elle, il, on','nous','vous','elles, ils']
ouvrir_conjug = ['ouvre','ouvres','ouvre','ouvrons','ouvrez','ouvrent']
trace0 = go.Table(
columnorder = [1,2,3],
columnwidth = [10,10],
header = dict(
values = ['Pronoun','Conjugation'],
line = dict(color = 'rgb(0,0,0)'),
fill = dict(color = 'rgb(0,35,48)'),
align = ['center','center'],
font = dict(color = 'white', size = 16),
height = 40
),
cells = dict(
values = [french,ouvrir_conjug],
line = dict(color = 'black'),
fill = dict(color = 'rgb(95,102,161)'),
align = ['center', 'center'],
font = dict(color = 'white', size = 14),
| |
import collections
import importlib
import logging
from stdlib_list import stdlib_list
from ..code_fixing.code_fixer import CodeFixer
from ..code_fixing.static import standard_python_functions
LOGGER = logging.getLogger()
class PythonCodeFixer(CodeFixer):
def __init__(self, code, indents, poss_lines):
self.code = code
self.indents = indents
self.poss_lines = poss_lines
self.syntax = []
self.rules = []
self.statements = []
self.curr_line_n = 0
self.context = {'variables': [], 'functions': standard_python_functions(), 'classes': [], 'imports': [], 'methods': []}
self.class_indent = 0
self.class_context = collections.defaultdict(lambda: collections.defaultdict(list))
self.curr_class = None
self.def_indent = 0
self.def_context = collections.defaultdict(lambda: collections.defaultdict(list))
self.curr_def = None
self.syntax.append(('VARIABLE', '[a-z_]+'))
self.syntax.append(('FUNCTION', '[a-z_]+'))
self.syntax.append(('STATEMENT', '.+'))
self.syntax.append(('PARAMETERS', '.*'))
self.statements.append(('(FUNCTION)\((PARAMETERS)\)', 2, None, self.fix_func_or_class_call))
self.statements.append(('lambda (PARAMETERS): (STATEMENT)', 9, None, self.fix_lambda))
self.statements.append(('(VARIABLE)\.(FUNCTION)\((PARAMETERS)\)', 3, None, self.fix_method_call))
self.statements.append(('(self)\.(FUNCTION)\((PARAMETERS)\)', 7, None, self.fix_self_method_call))
self.statements.append(('(STATEMENT) and (STATEMENT)', 5, None, self.fix_and))
self.statements.append(('(STATEMENT) or (STATEMENT)', 4, None, self.fix_or))
self.statements.append(('not (STATEMENT)', 4, None, self.fix_not))
self.statements.append(('(STATEMENT) for (VARIABLE) in (STATEMENT)', 9, None, self.fix_generator))
self.statements.append(('\[(STATEMENT) for (VARIABLE) in (STATEMENT)\]', 11, None, self.fix_list_comp))
self.statements.append(
('(STATEMENT) for (VARIABLE) in range\((STATEMENT)\)', 16, None, self.fix_range_generator))
self.statements.append(
('\[(STATEMENT) for (VARIABLE) in range\((STATEMENT)\)\]', 18, None, self.fix_range_list_comp))
self.statements.append(('true', 4, None, lambda x, y: "True"))
self.statements.append(('false', 5, None, lambda x, y: "False"))
self.statements.append(('none', 4, None, lambda x, y: "None"))
self.statements.append(('(VARIABLE)', 0, None, self.fix_variable)) # default case
# check how the size can be changed to account for spaces.
# self.statements.append(('\((STATEMENT)\)', 2, None, self.fix_bracketed))
# self.statements.append(('(STATEMENT) == (STATEMENT)', 4, None, self.fix_eq))
# self.statements.append(('(STATEMENT) + (STATEMENT)', 3, None, self.fix_add))
# self.statements.append(('(STATEMENT) - (STATEMENT)', 3, None, self.fix_sub))
# self.statements.append(('(STATEMENT) * (STATEMENT)', 3, None, self.fix_mul))
# self.statements.append(('(STATEMENT) / (STATEMENT)', 3, None, self.fix_div))
# self.rules: list of quad-tuples (string to match, number of fixed, analysis_func, fix_func)
# analysis -> goes over result and gets any context var
# fix_func -> fixes the str with context var
self.rules.append(('import (.*)', 7, None, self.fix_import))
self.rules.append(('import (.*?) as (.*)', 11, None, self.fix_import_as))
self.rules.append(('from (.*?) import (.*)', 13, None, self.fix_from_import))
self.rules.append(('def (VARIABLE)\((PARAMETERS)\):', 7, self.analyze_def, self.fix_def))
self.rules.append(('class (VARIABLE):', 7, self.analyze_class, self.fix_class))
self.rules.append(('if (STATEMENT):', 4, None, self.fix_if))
self.rules.append(('elif (STATEMENT):', 6, None, self.fix_elif))
self.rules.append(('return (STATEMENT)', 7, None, self.fix_return))
self.rules.append(('while (STATEMENT):', 7, None, self.fix_while))
self.rules.append(('for (VARIABLE) in (STATEMENT):', 9, self.analyze_for, self.fix_for))
self.rules.append(('for (VARIABLE) in range\((STATEMENT)\):', 16, self.analyze_for_range, self.fix_for_range))
self.rules.append(('(FUNCTION)\((PARAMETERS)\)', 2, None, self.fix_function_call))
self.rules.append(('(VARIABLE)\.(FUNCTION)\((PARAMETERS)\)', 3, None, self.fix_method_call))
self.rules.append(('self\.(FUNCTION)\((PARAMETERS)\)', 7, None, self.fix_self_method_call))
self.rules.append(('(VARIABLE) = (STATEMENT)', 3, self.analyze_assignment, self.fix_assignment))
self.rules.append(('assert (STATEMENT)', 7, None, self.fix_assert))
self.rules.append(('del (STATEMENT)', 4, None, self.fix_del))
self.rules.append(('raise (STATEMENT)', 6, None, self.fix_raise))
self.rules.append(('global (VARIABLE)', 7, None, self.fix_global))
self.rules.append(('pass', 4, None, lambda x, y: 'pass'))
self.rules.append(('else:', 5, None, lambda x, y: 'else:'))
self.rules.append(('break', 5, None, lambda x, y: 'break'))
self.rules.append(('continue', 8, None, lambda x, y: 'continue'))
self.rules.append(('(.*)', 0, None, self.fix_default)) # If nothing else works this will
LOGGER.debug('Compiling main rules.')
self.rules_regexes = self.compile_regex(self.rules)
LOGGER.debug('Compiling statement rules.')
self.statements_regexes = self.compile_regex(self.statements)
def fix(self):
"""
Main function to be called which finds the closest regex match for each line, extracts context variables and
function names and then attempts to fix typos.
:return: Fixed version of the code
"""
LOGGER.debug('Starting python3 code fixing.')
fixed_lines = []
closest_matches = []
LOGGER.debug('Looking for closest matches.')
for i in range(len(self.poss_lines)): # range loop OK because of indexing type
closest_match = self.find_closest_match(self.poss_lines[i], self.rules_regexes)
(match, analyze_func, _) = closest_match
# At each line, check if currently in a class declaration.
if self.indents[i] < self.class_indent and self.curr_class:
self.curr_class = None
if self.indents[i] < self.def_indent and self.curr_def:
self.curr_def = None
if analyze_func:
analyze_func(match.groups(), i)
closest_matches.append(closest_match)
self.curr_def = None
self.curr_class = None
LOGGER.debug('Fixing lines.')
for idx, closest_match in enumerate(closest_matches):
(match, _, fix_func) = closest_match
# At each line, check if currently in a class declaration.
if self.indents[idx] < self.class_indent and self.curr_class:
self.curr_class = None
if self.indents[idx] < self.def_indent and self.curr_def:
self.curr_def = None
self.curr_line_n = idx
fixed = fix_func(match, self.poss_lines[idx])
fixed_lines.append(self.naive_fix(fixed))
return "\n".join("{indent}{code}".format(indent=" " * indent, code=line) for indent, line in
zip(self.indents, fixed_lines))
def naive_fix(self, line):
replace = [('--', '__'), ('\'\'', '"'), (',,', '"')]
for (find, rep) in replace:
line = line.replace(find, rep)
return line
def find_args(self, args):
# If no args or args empty string, return empty list
if not args or not args.strip():
return []
# Init prev to first non-whitespace character
prev = 0
while prev < len(args) and args[prev] == " ":
prev += 1
# No open brackets, so 0
openBR = 0
# curr list of results is empty
arguments = []
for idx, char in enumerate(args):
if char == "," and openBR == 0:
new_match = CustomMatch(args[prev: idx], prev, idx)
arguments.append(new_match)
prev = idx + 1
while args[prev] == " ":
prev += 1
elif char == "(":
openBR += 1
elif char == ")":
openBR -= 1
else:
new_match = CustomMatch(args[prev:], prev, len(args))
arguments.append(new_match)
return arguments
# ANALYSE
def analyze_class(self, groups, line_n):
LOGGER.debug("Analysing class. Adding {} to context.".format(groups[1]))
self.context['classes'].append(groups[1])
self.class_indent = self.indents[line_n]
self.curr_class = groups[1]
def analyze_for_range(self, groups, line_n):
LOGGER.debug("Analysing range. Adding {} to context.".format(groups[1]))
if self.curr_def:
self.def_context[self.curr_def]['variables'].append(groups[1])
elif self.curr_class:
self.class_context[self.curr_class]['variables'].append(groups[1])
else:
self.context['variables'].append(groups[1])
def analyze_for(self, groups, line_n):
LOGGER.debug("Analysing for. Adding {} to context.".format(groups[1]))
if self.curr_def:
self.def_context[self.curr_def]['variables'].append(groups[1])
elif self.curr_class:
self.class_context[self.curr_class]['variables'].append(groups[1])
else:
self.context['variables'].append(groups[1])
def analyze_assignment(self, groups, line_n):
LOGGER.debug("Analysing assignment. Adding {} to context.".format(groups[1]))
if self.curr_def:
self.def_context[self.curr_def]['variables'].append(groups[1])
elif self.curr_class:
self.class_context[self.curr_class]['variables'].append(groups[1])
else:
self.context['variables'].append(groups[1])
def analyze_def(self, groups, line_n):
LOGGER.debug("Analysing function def. Adding {} to context, and setting class context.".format(groups[1]))
# differentiate between functions and class methods
self.def_indent = self.indents[line_n]
self.curr_def = groups[1]
if self.curr_def:
self.def_context[self.curr_def]['functions'].append(groups[1]) # inline func -> only available in scope.
elif self.curr_class:
self.context['methods'].append(groups[1])
self.class_context[self.curr_class]['methods'].append(groups[1])
else:
self.context['functions'].append(groups[1])
variables = groups[2].split(',')
LOGGER.debug("Analysing function def variables. Adding {} to def context.".format(variables))
self.def_context[self.curr_def]['variables'].extend(variables)
def fix_default(self, match, poss_chars):
groups = match.groups()
LOGGER.debug("No match found for {}. Defaulting to not fixing.".format(groups[0]))
return groups[0]
def fix_import(self, match, poss_chars):
groups = match.groups()
poss_import = poss_chars[match.start(2): match.end(2)]
closest, _ = self.levenshtein_closest(poss_import, stdlib_list("3.6"))
LOGGER.debug("Fixing import. Changing from {} to {}, and adding to context after analysis.".format(groups[1],
closest))
self.context["imports"].append(closest)
return 'import {}'.format(closest)
def fix_import_as(self, match, poss_chars):
groups = match.groups()
poss_import = poss_chars[match.start(2): match.end(2)]
closest_module, _ = self.levenshtein_closest(poss_import, stdlib_list("3.6"))
LOGGER.debug("Fixing import as. Changing from {} to {}, and adding {} "
"to context after analysis.".format(groups[1], closest_module, groups[2]))
self.context["imports"].extend(groups[2])
return 'import {} as {}'.format(closest_module, groups[2])
def fix_from_import(self, match, poss_chars):
groups = match.groups()
poss_import = poss_chars[match.start(2): match.end(2)]
closest_module, _ = self.levenshtein_closest(poss_import, stdlib_list("3.6"))
imported = [i.strip() for i in groups[2].split(",")]
LOGGER.debug("Fixing from X import Y. Changing from {} to {}, and adding {}"
" to context after analysis.".format(groups[1], closest_module, imported))
self.context["imports"].extend(imported)
return 'from {} import {}'.format(closest_module, ", ".join(imported))
def fix_class(self, match, poss_chars):
groups = match.groups()
poss_class = poss_chars[match.start(2): match.end(2)]
closest, _ = self.levenshtein_closest(poss_class, self.context["classes"])
LOGGER.debug("Fixing class. Changing from {} to {}.".format(groups[1], closest))
return 'class {}:'.format(closest)
def fix_if(self, match, poss_chars):
groups = match.groups()
c_match = CustomMatch(groups[1], match.start(2), match.end(2))
stmt = self.fix_statement(c_match,
poss_chars[c_match.start(1): c_match.end(1)])
LOGGER.debug("Fixing if. From {} to {}.".format(groups[1], stmt))
return 'if {}:'.format(stmt)
def fix_elif(self, match, poss_chars):
groups = match.groups()
c_match = CustomMatch(groups[1], match.start(2), match.end(2))
stmt = self.fix_statement(c_match,
poss_chars[c_match.start(1): c_match.end(1)])
LOGGER.debug("Fixing elif. From {} to {}.".format(groups[1], stmt))
return 'elif {}:'.format(stmt)
def fix_return(self, match, poss_chars):
groups = match.groups()
c_match = CustomMatch(groups[1], match.start(2), match.end(2))
stmt = self.fix_statement(c_match,
poss_chars[c_match.start(1): c_match.end(1)])
LOGGER.debug("Fixing return. From {} to {}.".format(groups[1], stmt))
return 'return {}'.format(stmt)
def fix_while(self, match, poss_chars):
groups = match.groups()
c_match = CustomMatch(groups[1], match.start(2), match.end(2))
stmt = self.fix_statement(c_match,
poss_chars[c_match.start(1): c_match.end(1)])
LOGGER.debug("Fixing while. From {} to {}.".format(groups[1], stmt))
return 'while {}:'.format(stmt)
def fix_for(self, match, poss_chars):
groups = match.groups()
c_var_match = CustomMatch(groups[1], match.start(2), match.end(2))
var = self.fix_statement(c_var_match,
poss_chars[c_var_match.start(1): c_var_match.end(1)])
LOGGER.debug("Fixing for loop var from {} to {}".format(groups[1], var))
c_stmt_match = CustomMatch(groups[2], match.start(3), match.end(3))
stmt = self.fix_statement(c_stmt_match,
poss_chars[c_stmt_match.start(1): c_stmt_match.end(1)])
LOGGER.debug("Fixing for loop stmt from {} to {}".format(groups[2], stmt))
return 'for {} in {}:'.format(var, stmt)
def fix_for_range(self, match, poss_chars):
groups = match.groups()
c_var_match = CustomMatch(groups[1], match.start(2), match.end(2))
var = self.fix_statement(c_var_match,
poss_chars[c_var_match.start(1): c_var_match.end(1)])
LOGGER.debug("Fixing for range loop var from {} to {}".format(groups[1], var))
c_stmt_match = CustomMatch(groups[2], match.start(3), match.end(3))
stmt = self.fix_statement(c_stmt_match,
poss_chars[c_stmt_match.start(1): c_stmt_match.end(1)])
LOGGER.debug("Fixing for range loop stmt from {} to {}".format(groups[2], stmt))
return 'for {} in range({}):'.format(var, stmt)
def fix_function_call(self, match, poss_chars):
groups = match.groups()
poss_func = poss_chars[match.start(2): match.end(2)]
closest, _ = self.levenshtein_closest(poss_func, self.context["functions"])
LOGGER.debug(groups)
LOGGER.debug("Fixing func call. Using {} and {}.".format(*(closest, *groups[2:])))
# use 3 not 2 because of list 0-index
new_args = self.fix_arguments(groups[2], | |
weighted by the deriv of the error fct by the output layer. We don't use Lemma 3 dircetly here, we just apply the definition of delta_error
err_errorsignals = [0]*self.layercount
if error_function is 0:
errorbyyzero = -targ/out[:-1] #Kullback-Leibler divergence derivative
else:
if error_function is 1:
errorbyyzero = out[:-1]-targ #Mean-squared-error derivative
else:
if error_function is 2:
errorbyyzero = 1/4*(1-np.sqrt(targ)/np.sqrt(out[:-1])) #Hellinger-distance derivative
else:
if error_function is 3:
errorbyyzero=self.compute_experimental_gradient(out[:-1],targ,1000)
#errorbyyzero = self.chosen_error_fct(targ,out)
for i in range(0,self.layercount):
err_errorsignals[i] = np.dot(errorbyyzero,errorsignals[i]) #this is the matrix variant of D3
#Use (2.2) to get the sought derivatives. Observe that this is an outer product, though not mentioned in the source (Fuck you Heining, you bastard)
errorbyweights = [0]*self.layercount #dE/dW
errorbyweights[0] = np.outer(err_errorsignals[0],testdata).T #Why do I need to transpose here???
for i in range(1,self.layercount):
errorbyweights[i] = np.outer(err_errorsignals[i-1],ys[i][:-1]) # (L1)
#Compute the change of weights, that means, then apply actualization step of Gradient Descent to weight matrices
deltaweights=[0]*self.layercount
for i in range(0,self.layercount):
deltaweights[i] =-eta*errorbyweights[i]
self.weights[i][:,:-1]= self.weights[i][:,:-1]+ deltaweights[i].T #TODO: Problem: atm we only adjust non-bias weights. Change that!
error = self.PropagateSet(trainingdata, error_function)
return error
def LearnSingleBatch(self, batch, eta=0.01, stoch_coeff=1, error_function=0): #takes a batch, propagates all boards in that batch while accumulating deltaweights. Then sums the deltaweights up and the adjustes the weights of the Network.
deltaweights_batch = [0]*self.layercount
selection_size = int(np.round(len(batch.dic)*stoch_coeff))
if selection_size is 0: #prevent empty selection
selection_size = 1
random_selection = random.sample(list(batch.dic.keys()),selection_size)
for entry in random_selection:
testdata = self.convert_input(Hashable.unwrap(entry)) #input
targ = batch.dic[entry].reshape(9*9+1) #target output, this is to be approximated
if(np.sum(targ)>0): #We can only learn if there are actual target vectors
targ = targ/np.linalg.norm(targ, ord=1) #normalize (L1-norm)
y = np.append(testdata,[1]) #We append 1 for the bias
ys = [0]*self.layercount #y_saved for backpropagation
#Forward-propagate
for i in range(0,self.layercount):
W = self.weights[i] #<NAME>?
s = W.dot(y)
if i==self.layercount-1: #softmax as activationfct only in last layer
y = np.append(softmax(s),[1]) #We append 1 for the bias
else: #in all other hidden layers we use tanh as activation fct
if self.activation_function is 0:
y = np.append(np.tanh(s),[1]) #We append 1 for the bias
else:
if self.activation_function is 1:
y = np.append(relu(s),[1]) #We append 1 for the bias
ys[i]=y #save the y values for backpropagation
out=y
#Backpropagation
#Calculate Jacobian of the softmax activationfct in last layer only
Jacobian_Softmax = [0]*self.layercount
for i in range(self.layercount-1,self.layercount): #please note that I think this is pure witchcraft happening here
yt=ys[i] #load y from ys and lets call it yt y_temporary
yt=yt[:-1] #the last entry is from the offset, we don't need this
le=len(yt)
Jacobian_Softmax_temporary = np.ones((le,le)) #alloc storage temporarily
for j in range(0,le):
Jacobian_Softmax_temporary[j,:]*=yt[j]
Jacobian_Softmax_temporary=np.identity(le) - Jacobian_Softmax_temporary
for j in range(0,le):
Jacobian_Softmax_temporary[:,j]*=yt[j]
Jacobian_Softmax[i]=Jacobian_Softmax_temporary
#Jacobian_Softmax is quadratic and symmetric.
if self.activation_function is 0:
#Calc Jacobian of tanh
Jacobian_tanh = [0]*self.layercount
for i in range(0,self.layercount): #please note that I think this is pure witchcraft happening here
yt=ys[i] #load y from ys and lets call it yt
yt=yt[:-1] #the last entry is from the offset, we don't need this
u=1-yt*yt
Jacobian_tanh[i]=np.diag(u)
Jacobian_hidden = Jacobian_tanh
if self.activation_function is 1:
#Calc Jacobian of relu
Jacobian_relu = [0]*self.layercount
for i in range(0,self.layercount): #please note that I think this is pure witchcraft happening here
yt=ys[i] #load y from ys and lets call it yt
yt=yt[:-1] #the last entry is from the offset, we don't need this
yt[yt>0]=1#actually 0 values go to 1 also. this is not so easy, thus I leave it like that for now
Jacobian_relu[i]=np.diag(yt)
Jacobian_hidden = Jacobian_relu
#Use (L2) and (L3) to get the error signals of the layers
errorsignals = [0]*self.layercount
errorsignals[self.layercount-1] = Jacobian_Softmax[self.layercount-1] # (L2), the error signal of the output layer can be computed directly, here we actually use softmax
for i in range(2,self.layercount+1):
w = self.weights[self.layercount-i+1]
DFt = Jacobian_hidden[self.layercount-i] #tanh
errdet = np.matmul(w[:,:-1],DFt) #temporary
errorsignals[self.layercount-i] = np.dot(errorsignals[self.layercount-i+1],errdet) # (L3), does python fucking get that?
#Use (D3) to compute err_errorsignals as sum over the rows/columns? of the errorsignals weighted by the deriv of the error fct by the output layer. We don't use Lemma 3 dircetly here, we just apply the definition of delta_error
err_errorsignals=[0]*self.layercount
if error_function is 0:
errorbyyzero = -targ/out[:-1] #Kullback-Leibler divergence derivative
else:
if error_function is 1:
errorbyyzero = out[:-1]-targ #Mean-squared-error derivative
else:
if error_function is 2:
errorbyyzero = 1/4*(1-np.sqrt(targ)/np.sqrt(out[:-1])) #Hellinger-distance derivative
else:
if error_function is 3:
errorbyyzero=self.compute_experimental_gradient(out[:-1],targ,1000)
#errorbyyzero = self.chosen_error_fct(targ,out)
for i in range(0,self.layercount):
err_errorsignals[i]=np.dot(errorbyyzero,errorsignals[i]) #this is the matrix variant of D3
#Use (2.2) to get the sought derivatives. Observe that this is an outer product, though not mentioned in the source (Fuck you Heining, you bastard)
errorbyweights = [0]*self.layercount #dE/dW
errorbyweights[0] = np.outer(err_errorsignals[0],testdata).T #Why do I need to transpose here???
for i in range(1,self.layercount):
errorbyweights[i] = np.outer(err_errorsignals[i-1],ys[i][:-1]) # (L1)
#Compute the change of weights, that means, then apply actualization step of Gradient Descent to weight matrices
for i in range(0,self.layercount):
if type(deltaweights_batch[i]) is int: #initialize
deltaweights_batch[i]= -eta*errorbyweights[i]
else:
deltaweights_batch[i]-=eta*errorbyweights[i]
#self.weights[i][:,:-1]= self.weights[i][:,:-1]+ deltaweights[i].T #Problem: atm we only adjust non-bias weights. Change that!
#For Error statistics
#self.errorsbyepoch.append(self.compute_ms_error (y[:-1], targ))
#self.abserrorbyepoch.append(self.compute_error (y[:-1], targ))
#self.KLdbyepoch.append(self.compute_KL_divergence(y[:-1], targ))
#now adjust weights
for i in range(0,self.layercount):
if type(deltaweights_batch[i]) is not int: #in this case we had no target for any board in this batch
self.weights[i][:,:-1]= self.weights[i][:,:-1]+ deltaweights_batch[i].T #Problem: atm we only adjust non-bias weights. Change that!
error = self.PropagateSet(batch,error_function)
return error
def LearnSingleBatchAdaptive(self, batch, eta_start=0.01, stoch_coeff=1, error_function=0): #takes a batch, propagates all boards in that batch while accumulating deltaweights. Then sums the deltaweights up and the adjustes the weights of the Network.
deltaweights_batch = [0]*self.layercount
selection_size = int(np.round(len(batch.dic)*stoch_coeff))
if selection_size is 0: #prevent empty selection
selection_size = 1
random_selection = random.sample(list(batch.dic.keys()),selection_size)
for entry in random_selection:
testdata = self.convert_input(Hashable.unwrap(entry)) #input
targ = batch.dic[entry].reshape(9*9+1) #target output, this is to be approximated
if(np.sum(targ)>0): #We can only learn if there are actual target vectors
targ_sum=np.sum(targ) #save this for the adaptive eta
targ = targ/np.linalg.norm(targ, ord=1) #normalize (L1-norm)
y = np.append(testdata,[1]) #We append 1 for the bias
ys = [0]*self.layercount #y_saved for backpropagation
#Forward-propagate
for i in range(0,self.layercount):
W = self.weights[i] #<NAME>?
s = W.dot(y)
if i==self.layercount-1: #softmax as activationfct only in last layer
y = np.append(softmax(s),[1]) #We append 1 for the bias
else: #in all other hidden layers we use tanh as activation fct
if self.activation_function is 0:
y = np.append(np.tanh(s),[1]) #We append 1 for the bias
else:
if self.activation_function is 1:
y = np.append(relu(s),[1]) #We append 1 for the bias
ys[i]=y #save the y values for backpropagation
out=y
#Backpropagation
#Calculate Jacobian of the softmax activationfct in last layer only
Jacobian_Softmax = [0]*self.layercount
for i in range(self.layercount-1,self.layercount): #please note that I think this is pure witchcraft happening here
yt=ys[i] #load y from ys and lets call it yt y_temporary
yt=yt[:-1] #the last entry is from the offset, we don't need this
le=len(yt)
Jacobian_Softmax_temporary = np.ones((le,le)) #alloc storage temporarily
for j in range(0,le):
Jacobian_Softmax_temporary[j,:]*=yt[j]
Jacobian_Softmax_temporary=np.identity(le) - Jacobian_Softmax_temporary
for j in range(0,le):
Jacobian_Softmax_temporary[:,j]*=yt[j]
Jacobian_Softmax[i]=Jacobian_Softmax_temporary
#Jacobian_Softmax is quadratic and symmetric.
if self.activation_function is 0:
#Calc Jacobian of tanh
Jacobian_tanh = [0]*self.layercount
for i in range(0,self.layercount): #please note that I think this is pure witchcraft happening here
yt=ys[i] #load y from ys and lets call it yt
yt=yt[:-1] #the last entry is from the offset, we don't need this
u=1-yt*yt
Jacobian_tanh[i]=np.diag(u)
Jacobian_hidden = Jacobian_tanh
if self.activation_function is 1:
#Calc Jacobian of relu
Jacobian_relu = [0]*self.layercount
for i in range(0,self.layercount): #please note that I think this is pure witchcraft happening here
yt=ys[i] #load y from ys and lets call it yt
yt=yt[:-1] #the last entry is from the offset, we don't need this
yt[yt>0]=1#actually 0 values go to 1 also. this is not so easy, thus I leave it like that for now
Jacobian_relu[i]=np.diag(yt)
Jacobian_hidden = Jacobian_relu
#Use (L2) and (L3) to get the error signals of the layers
errorsignals = [0]*self.layercount
errorsignals[self.layercount-1] = | |
<reponame>tharvik/nessus
"""
sub modules for everything about the scans
"""
from enum import Enum
from uuid import uuid4
from typing import Iterable, Mapping, Union, Optional, MutableMapping
from nessus.base import LibNessusBase
from nessus.editor import NessusTemplate
from nessus.model import lying_exist, lying_type, Object, lying_exist_and_type, allow_to_exist
from nessus.permissions import NessusPermission
from nessus.policies import NessusPolicy
class NessusScanType(Enum):
"""
type of scan
"""
local = 'local'
remote = 'remote'
agent = 'agent'
class NessusScanStatus(Enum):
"""
current status of scan
lies:
- `empty` was added because sometimes, nessus return it (but it is not documented)
- `canceled` is returned instead of `cancelled`
- `processing` was added because sometimes, nessus return it (but it is not documented)
"""
completed = 'completed'
aborted = 'aborted'
imported = 'imported'
pending = 'pending'
running = 'running'
resuming = 'resuming'
canceling = 'canceling'
cancelled = 'cancelled'
pausing = 'pausing'
paused = 'paused'
stopping = 'stopping'
stopped = 'stopped'
empty = 'empty'
canceled = 'canceled'
processing = 'processing'
class NessusScan(Object):
"""
nessus is lying with:
- `type` which is none but should be NessusScanType (str)
- `status` which can be 'empty' but should be one of NessusScanStatus
- `use_dashboard` which do not always exists
"""
def __init__(self, scan_id: int, uuid: str, name: str, type: NessusScanType, owner: str, enabled: bool,
folder_id: int,
read: bool, status: NessusScanStatus, shared: bool, user_permissions: int, creation_date: int,
last_modification_date: int, control: bool, starttime: str, timezone: str, rrules: str,
use_dashboard: bool) -> None:
self.id = scan_id
self.uuid = uuid
self.name = name
self.type = type
self.owner = owner
self.enabled = enabled
self.folder_id = folder_id
self.read = read
self.status = status
self.shared = shared
self.user_permissions = user_permissions
self.creation_date = creation_date
self.last_modification_date = last_modification_date
self.control = control
self.starttime = starttime
self.timezone = timezone
self.rrules = rrules
self.use_dashboard = use_dashboard
def __eq__(self, other):
return isinstance(other, NessusScan) and self.id == other.id
def __hash__(self):
return hash(self.id)
@staticmethod
def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScan':
scan_id = int(json_dict['id'])
uuid = str(json_dict['uuid'])
name = str(json_dict['name'])
scan_type = lying_type(json_dict['type'], NessusScanType)
owner = str(json_dict['owner'])
enabled = bool(json_dict['enabled'])
folder_id = int(json_dict['folder_id'])
read = bool(json_dict['read'])
status = NessusScanStatus(json_dict['status'])
shared = bool(json_dict['shared'])
user_permissions = int(json_dict['user_permissions'])
creation_date = int(json_dict['creation_date'])
last_modification_date = int(json_dict['last_modification_date'])
control = bool(json_dict['control'])
starttime = str(json_dict['starttime'])
timezone = str(json_dict['timezone'])
rrules = str(json_dict['rrules'])
use_dashboard = lying_exist(json_dict, 'use_dashboard', bool)
return NessusScan(scan_id, uuid, name, scan_type, owner, enabled, folder_id, read, status, shared,
user_permissions,
creation_date, last_modification_date, control, starttime, timezone, rrules, use_dashboard)
class NessusScanCreated(Object):
"""
lies:
- `notification_filter_type` does not always exist
- `tag_id` does not always exist
"""
def __init__(self, creation_date: int, custom_targets: str, default_permisssions: int, description: str,
emails: str, scan_id: int, last_modification_date: int, name: str, notification_filter_type: str,
notification_filters: str, owner: str, owner_id: int, policy_id: int, enabled: bool, rrules: str,
scanner_id: int, shared: int, starttime: str, tag_id: int, timezone: str, scan_type: str,
user_permissions: int, uuid: str, use_dashboard: bool) -> None:
self.creation_date = creation_date
self.custom_targets = custom_targets
self.default_permisssions = default_permisssions
self.description = description
self.emails = emails
self.id = scan_id
self.last_modification_date = last_modification_date
self.name = name
self.notification_filter_type = notification_filter_type
self.notification_filters = notification_filters
self.owner = owner
self.owner_id = owner_id
self.policy_id = policy_id
self.enabled = enabled
self.rrules = rrules
self.scanner_id = scanner_id
self.shared = shared
self.starttime = starttime
self.tag_id = tag_id
self.timezone = timezone
self.type = scan_type
self.user_permissions = user_permissions
self.uuid = uuid
self.use_dashboard = use_dashboard
@staticmethod
def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanCreated':
creation_date = int(json_dict['creation_date'])
custom_targets = str(json_dict['custom_targets'])
default_permisssions = int(json_dict['default_permisssions'])
description = str(json_dict['description'])
emails = str(json_dict['emails'])
scan_id = int(json_dict['id'])
last_modification_date = int(json_dict['last_modification_date'])
name = str(json_dict['name'])
notification_filter_type = lying_exist(json_dict, 'notification_filter_type', str)
notification_filters = str(json_dict['notification_filters'])
owner = str(json_dict['owner'])
owner_id = int(json_dict['owner_id'])
policy_id = int(json_dict['policy_id'])
enabled = bool(json_dict['enabled'])
rrules = str(json_dict['rrules'])
scanner_id = int(json_dict['scanner_id'])
shared = int(json_dict['shared'])
starttime = str(json_dict['starttime'])
tag_id = lying_exist(json_dict, 'tag_id', int)
timezone = str(json_dict['timezone'])
scan_type = str(json_dict['type'])
user_permissions = int(json_dict['user_permissions'])
uuid = str(json_dict['uuid'])
use_dashboard = bool(json_dict['use_dashboard'])
return NessusScanCreated(creation_date, custom_targets, default_permisssions, description, emails, scan_id,
last_modification_date, name, notification_filter_type, notification_filters, owner,
owner_id, policy_id, enabled, rrules, scanner_id, shared, starttime, tag_id, timezone,
scan_type, user_permissions, uuid, use_dashboard)
class NessusScanDetailsInfo(Object):
"""
lies:
- `edit_allowed` is not always existing
- `policy` is not always existing
- `pci_can_upload` is not always existing
- `hasaudittrail` is not always existing
- `folder_id` is sometimes None
- `targets` is not always existing
- `timestamp` is not always existing
- `haskb` is not always existing
- `uuid` is not always existing
- `hostcount` is not always existing
- `scan_end` is not always existing
"""
def __init__(self, acls: Iterable[NessusPermission], edit_allowed: bool, status: str, policy: str,
pci_can_upload: bool, hasaudittrail: bool,
scan_start: str, folder_id: int, targets: str, timestamp: int, object_id: int, scanner_name: str,
haskb: bool, uuid: str, hostcount: int, scan_end: str, name: str, user_permissions: int,
control: bool) -> None:
self.acls = acls
self.edit_allowed = edit_allowed
self.status = status
self.policy = policy
self.pci_can_upload = pci_can_upload
self.hasaudittrail = hasaudittrail
self.scan_start = scan_start
self.folder_id = folder_id
self.targets = targets
self.timestamp = timestamp
self.object_id = object_id
self.scanner_name = scanner_name
self.haskb = haskb
self.uuid = uuid
self.hostcount = hostcount
self.scan_end = scan_end
self.name = name
self.user_permissions = user_permissions
self.control = control
@staticmethod
def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanDetailsInfo':
acls = {NessusPermission.from_json(acl) for acl in json_dict['acls']}
edit_allowed = lying_exist(json_dict, 'edit_allowed', bool)
status = str(json_dict['status'])
policy = lying_exist(json_dict, 'policy', str)
pci_can_upload = lying_exist(json_dict, 'pci-can-upload', bool)
hasaudittrail = lying_exist(json_dict, 'hasaudittrail', bool)
scan_start = str(json_dict['scan_start'])
folder_id = lying_type(json_dict['folder_id'], int) # it's None actually
targets = lying_exist(json_dict, 'targets', str)
timestamp = lying_exist(json_dict, 'timestamp', int)
object_id = int(json_dict['object_id'])
scanner_name = str(json_dict['scanner_name'])
haskb = lying_exist(json_dict, 'haskb', bool)
uuid = lying_exist(json_dict, 'uuid', str)
hostcount = lying_exist(json_dict, 'hostcount', int)
scan_end = lying_exist(json_dict, 'scan_end', str)
name = str(json_dict['name'])
user_permissions = int(json_dict['user_permissions'])
control = bool(json_dict['control'])
return NessusScanDetailsInfo(acls, edit_allowed, status, policy, pci_can_upload, hasaudittrail, scan_start,
folder_id, targets, timestamp, object_id, scanner_name, haskb, uuid, hostcount,
scan_end, name, user_permissions, control)
class NessusScanHost(Object):
"""
lies:
- `hostname` can be str
"""
def __init__(self, host_id: int, host_index: str, hostname: int, progress: str, critical: int, high: int,
medium: int, low: int, info: int, totalchecksconsidered: int, numchecksconsidered: int,
scanprogresstotal: int, scanprogresscurrent: int, score: int) -> None:
self.host_id = host_id
self.host_index = host_index
self.hostname = hostname
self.progress = progress
self.critical = critical
self.high = high
self.medium = medium
self.low = low
self.info = info
self.totalchecksconsidered = totalchecksconsidered
self.numchecksconsidered = numchecksconsidered
self.scanprogresstotal = scanprogresstotal
self.scanprogresscurrent = scanprogresscurrent
self.score = score
@staticmethod
def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanHost':
host_id = int(json_dict['host_id'])
host_index = str(json_dict['host_index'])
hostname = lying_type(json_dict['hostname'], int, str)
progress = str(json_dict['progress'])
critical = int(json_dict['critical'])
high = int(json_dict['high'])
medium = int(json_dict['medium'])
low = int(json_dict['low'])
info = int(json_dict['info'])
totalchecksconsidered = int(json_dict['totalchecksconsidered'])
numchecksconsidered = int(json_dict['numchecksconsidered'])
scanprogresstotal = int(json_dict['scanprogresstotal'])
scanprogresscurrent = int(json_dict['scanprogresscurrent'])
score = int(json_dict['score'])
return NessusScanHost(host_id, host_index, hostname, progress, critical, high, medium, low, info,
totalchecksconsidered, numchecksconsidered, scanprogresstotal, scanprogresscurrent,
score)
class NessusScanNote(Object):
def __init__(self, title: str, message: str, severity: int) -> None:
self.title = title
self.message = message
self.severity = severity
@staticmethod
def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanNote':
title = str(json_dict['title'])
message = str(json_dict['message'])
severity = int(json_dict['severity'])
return NessusScanNote(title, message, severity)
class NessusScanRemediation(Object):
def __init__(self, value: str, remediation: str, hosts: int, vulns: int) -> None:
self.value = value
self.remediation = remediation
self.hosts = hosts
self.vulns = vulns
@staticmethod
def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanRemediation':
value = str(json_dict['value'])
remediation = str(json_dict['remediation'])
hosts = int(json_dict['hosts'])
vulns = int(json_dict['vulns'])
return NessusScanRemediation(value, remediation, hosts, vulns)
class NessusScanDetailsRemediations(Object):
"""
lies:
- `remediations` can be None
"""
def __init__(self, remediations: Iterable[NessusScanRemediation], num_hosts: int, num_cves: int,
num_impacted_hosts: int, num_remediated_cves: int) -> None:
self.remediations = remediations
self.num_hosts = num_hosts
self.num_cves = num_cves
self.num_impacted_hosts = num_impacted_hosts
self.num_remediated_cves = num_remediated_cves
@staticmethod
def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanDetailsRemediations':
remediations = {NessusScanRemediation(remediation) for remediation in
lying_type(json_dict['remediations'], list, lambda x: None, list())}
num_hosts = int(json_dict['num_hosts'])
num_cves = int(json_dict['num_cves'])
num_impacted_hosts = int(json_dict['num_impacted_hosts'])
num_remediated_cves = int(json_dict['num_remediated_cves'])
return NessusScanDetailsRemediations(remediations, num_hosts, num_cves, num_impacted_hosts,
num_remediated_cves)
class NessusScanVulnerability(Object):
def __init__(self, plugin_id: int, plugin_name: str, plugin_family: str, count: int, vuln_index: int,
severity_index: int) -> None:
self.plugin_id = plugin_id
self.plugin_name = plugin_name
self.plugin_family = plugin_family
self.count = count
self.vuln_index = vuln_index
self.severity_index = severity_index
@staticmethod
def from_json(json_dict: Mapping[str, Union[int, str, bool]]) -> 'NessusScanVulnerability':
plugin_id = int(json_dict['plugin_id'])
plugin_name = str(json_dict['plugin_name'])
plugin_family = str(json_dict['plugin_family'])
count = | |
constant string,
- a parameter value defined as ``$parameter_name``,
- an original parameter value defined as
``$parameter_name.original``,
- a parameter value from some context defined as
``#context_name.parameter_name``.
default_value (str):
Optional. The default value to use when the ``value`` yields
an empty result. Default values can be extracted from
contexts by using the following syntax:
``#context_name.parameter_name``.
entity_type_display_name (str):
Optional. The name of the entity type, prefixed with ``@``,
that describes values of the parameter. If the parameter is
required, this must be provided.
mandatory (bool):
Optional. Indicates whether the parameter is
required. That is, whether the intent cannot be
completed without collecting the parameter
value.
prompts (Sequence[str]):
Optional. The collection of prompts that the
agent can present to the user in order to
collect a value for the parameter.
is_list (bool):
Optional. Indicates whether the parameter
represents a list of values.
"""
name = proto.Field(proto.STRING, number=1)
display_name = proto.Field(proto.STRING, number=2)
value = proto.Field(proto.STRING, number=3)
default_value = proto.Field(proto.STRING, number=4)
entity_type_display_name = proto.Field(proto.STRING, number=5)
mandatory = proto.Field(proto.BOOL, number=6)
prompts = proto.RepeatedField(proto.STRING, number=7)
is_list = proto.Field(proto.BOOL, number=8)
class Message(proto.Message):
r"""Corresponds to the ``Response`` field in the Dialogflow console.
Attributes:
text (~.gcd_intent.Intent.Message.Text):
Returns a text response.
image (~.gcd_intent.Intent.Message.Image):
Displays an image.
quick_replies (~.gcd_intent.Intent.Message.QuickReplies):
Displays quick replies.
card (~.gcd_intent.Intent.Message.Card):
Displays a card.
payload (~.struct.Struct):
A custom platform-specific response.
simple_responses (~.gcd_intent.Intent.Message.SimpleResponses):
Returns a voice or text-only response for
Actions on Google.
basic_card (~.gcd_intent.Intent.Message.BasicCard):
Displays a basic card for Actions on Google.
suggestions (~.gcd_intent.Intent.Message.Suggestions):
Displays suggestion chips for Actions on
Google.
link_out_suggestion (~.gcd_intent.Intent.Message.LinkOutSuggestion):
Displays a link out suggestion chip for
Actions on Google.
list_select (~.gcd_intent.Intent.Message.ListSelect):
Displays a list card for Actions on Google.
carousel_select (~.gcd_intent.Intent.Message.CarouselSelect):
Displays a carousel card for Actions on
Google.
telephony_play_audio (~.gcd_intent.Intent.Message.TelephonyPlayAudio):
Plays audio from a file in Telephony Gateway.
telephony_synthesize_speech (~.gcd_intent.Intent.Message.TelephonySynthesizeSpeech):
Synthesizes speech in Telephony Gateway.
telephony_transfer_call (~.gcd_intent.Intent.Message.TelephonyTransferCall):
Transfers the call in Telephony Gateway.
rbm_text (~.gcd_intent.Intent.Message.RbmText):
Rich Business Messaging (RBM) text response.
RBM allows businesses to send enriched and
branded versions of SMS. See
https://jibe.google.com/business-messaging.
rbm_standalone_rich_card (~.gcd_intent.Intent.Message.RbmStandaloneCard):
Standalone Rich Business Messaging (RBM) rich
card response.
rbm_carousel_rich_card (~.gcd_intent.Intent.Message.RbmCarouselCard):
Rich Business Messaging (RBM) carousel rich
card response.
browse_carousel_card (~.gcd_intent.Intent.Message.BrowseCarouselCard):
Browse carousel card for Actions on Google.
table_card (~.gcd_intent.Intent.Message.TableCard):
Table card for Actions on Google.
media_content (~.gcd_intent.Intent.Message.MediaContent):
The media content card for Actions on Google.
platform (~.gcd_intent.Intent.Message.Platform):
Optional. The platform that this message is
intended for.
"""
class Platform(proto.Enum):
r"""Represents different platforms that a rich message can be
intended for.
"""
PLATFORM_UNSPECIFIED = 0
FACEBOOK = 1
SLACK = 2
TELEGRAM = 3
KIK = 4
SKYPE = 5
LINE = 6
VIBER = 7
ACTIONS_ON_GOOGLE = 8
TELEPHONY = 10
GOOGLE_HANGOUTS = 11
class Text(proto.Message):
r"""The text response message.
Attributes:
text (Sequence[str]):
Optional. The collection of the agent's
responses.
"""
text = proto.RepeatedField(proto.STRING, number=1)
class Image(proto.Message):
r"""The image response message.
Attributes:
image_uri (str):
Optional. The public URI to an image file.
accessibility_text (str):
A text description of the image to be used for
accessibility, e.g., screen readers. Required if image_uri
is set for CarouselSelect.
"""
image_uri = proto.Field(proto.STRING, number=1)
accessibility_text = proto.Field(proto.STRING, number=2)
class QuickReplies(proto.Message):
r"""The quick replies response message.
Attributes:
title (str):
Optional. The title of the collection of
quick replies.
quick_replies (Sequence[str]):
Optional. The collection of quick replies.
"""
title = proto.Field(proto.STRING, number=1)
quick_replies = proto.RepeatedField(proto.STRING, number=2)
class Card(proto.Message):
r"""The card response message.
Attributes:
title (str):
Optional. The title of the card.
subtitle (str):
Optional. The subtitle of the card.
image_uri (str):
Optional. The public URI to an image file for
the card.
buttons (Sequence[~.gcd_intent.Intent.Message.Card.Button]):
Optional. The collection of card buttons.
"""
class Button(proto.Message):
r"""Optional. Contains information about a button.
Attributes:
text (str):
Optional. The text to show on the button.
postback (str):
Optional. The text to send back to the
Dialogflow API or a URI to open.
"""
text = proto.Field(proto.STRING, number=1)
postback = proto.Field(proto.STRING, number=2)
title = proto.Field(proto.STRING, number=1)
subtitle = proto.Field(proto.STRING, number=2)
image_uri = proto.Field(proto.STRING, number=3)
buttons = proto.RepeatedField(
proto.MESSAGE, number=4, message="Intent.Message.Card.Button",
)
class SimpleResponse(proto.Message):
r"""The simple response message containing speech or text.
Attributes:
text_to_speech (str):
One of text_to_speech or ssml must be provided. The plain
text of the speech output. Mutually exclusive with ssml.
ssml (str):
One of text_to_speech or ssml must be provided. Structured
spoken response to the user in the SSML format. Mutually
exclusive with text_to_speech.
display_text (str):
Optional. The text to display.
"""
text_to_speech = proto.Field(proto.STRING, number=1)
ssml = proto.Field(proto.STRING, number=2)
display_text = proto.Field(proto.STRING, number=3)
class SimpleResponses(proto.Message):
r"""The collection of simple response candidates. This message in
``QueryResult.fulfillment_messages`` and
``WebhookResponse.fulfillment_messages`` should contain only one
``SimpleResponse``.
Attributes:
simple_responses (Sequence[~.gcd_intent.Intent.Message.SimpleResponse]):
Required. The list of simple responses.
"""
simple_responses = proto.RepeatedField(
proto.MESSAGE, number=1, message="Intent.Message.SimpleResponse",
)
class BasicCard(proto.Message):
r"""The basic card message. Useful for displaying information.
Attributes:
title (str):
Optional. The title of the card.
subtitle (str):
Optional. The subtitle of the card.
formatted_text (str):
Required, unless image is present. The body
text of the card.
image (~.gcd_intent.Intent.Message.Image):
Optional. The image for the card.
buttons (Sequence[~.gcd_intent.Intent.Message.BasicCard.Button]):
Optional. The collection of card buttons.
"""
class Button(proto.Message):
r"""The button object that appears at the bottom of a card.
Attributes:
title (str):
Required. The title of the button.
open_uri_action (~.gcd_intent.Intent.Message.BasicCard.Button.OpenUriAction):
Required. Action to take when a user taps on
the button.
"""
class OpenUriAction(proto.Message):
r"""Opens the given URI.
Attributes:
uri (str):
Required. The HTTP or HTTPS scheme URI.
"""
uri = proto.Field(proto.STRING, number=1)
title = proto.Field(proto.STRING, number=1)
open_uri_action = proto.Field(
proto.MESSAGE,
number=2,
message="Intent.Message.BasicCard.Button.OpenUriAction",
)
title = proto.Field(proto.STRING, number=1)
subtitle = proto.Field(proto.STRING, number=2)
formatted_text = proto.Field(proto.STRING, number=3)
image = proto.Field(
proto.MESSAGE, number=4, message="Intent.Message.Image",
)
buttons = proto.RepeatedField(
proto.MESSAGE, number=5, message="Intent.Message.BasicCard.Button",
)
class Suggestion(proto.Message):
r"""The suggestion chip message that the user can tap to quickly
post a reply to the conversation.
Attributes:
title (str):
Required. The text shown the in the
suggestion chip.
"""
title = proto.Field(proto.STRING, number=1)
class Suggestions(proto.Message):
r"""The collection of suggestions.
Attributes:
suggestions (Sequence[~.gcd_intent.Intent.Message.Suggestion]):
Required. The list of suggested replies.
"""
suggestions = proto.RepeatedField(
proto.MESSAGE, number=1, message="Intent.Message.Suggestion",
)
class LinkOutSuggestion(proto.Message):
r"""The suggestion chip message that allows the user to jump out
to the app or website associated with this agent.
Attributes:
destination_name (str):
Required. The name of the app or site this
chip is linking to.
uri (str):
Required. The URI of the app or site to open
when the user taps the suggestion chip.
"""
destination_name = proto.Field(proto.STRING, number=1)
uri = proto.Field(proto.STRING, number=2)
class ListSelect(proto.Message):
r"""The card for presenting a list of options to select from.
Attributes:
title (str):
Optional. The overall title of the list.
items (Sequence[~.gcd_intent.Intent.Message.ListSelect.Item]):
Required. List items.
subtitle (str):
Optional. Subtitle of the list.
"""
class Item(proto.Message):
r"""An item in the list.
Attributes:
info (~.gcd_intent.Intent.Message.SelectItemInfo):
Required. Additional information about this
option.
title (str):
Required. The title of the list item.
description (str):
Optional. The main text describing the item.
image (~.gcd_intent.Intent.Message.Image):
Optional. The image to display.
"""
info = proto.Field(
proto.MESSAGE, number=1, message="Intent.Message.SelectItemInfo",
)
title = proto.Field(proto.STRING, number=2)
description = proto.Field(proto.STRING, number=3)
image = proto.Field(
proto.MESSAGE, number=4, message="Intent.Message.Image",
)
title = proto.Field(proto.STRING, number=1)
items = proto.RepeatedField(
proto.MESSAGE, number=2, message="Intent.Message.ListSelect.Item",
)
subtitle = proto.Field(proto.STRING, number=3)
class CarouselSelect(proto.Message):
r"""The card for presenting a carousel of options to select from.
Attributes:
items (Sequence[~.gcd_intent.Intent.Message.CarouselSelect.Item]):
Required. Carousel items.
"""
class Item(proto.Message):
r"""An item in the carousel.
Attributes:
info (~.gcd_intent.Intent.Message.SelectItemInfo):
Required. Additional info about the option
item.
title (str):
Required. Title of the carousel item.
description (str):
Optional. The body text of the card.
image (~.gcd_intent.Intent.Message.Image):
Optional. The image to display.
"""
info = proto.Field(
proto.MESSAGE, number=1, message="Intent.Message.SelectItemInfo",
)
title = proto.Field(proto.STRING, number=2)
description = proto.Field(proto.STRING, number=3)
image = proto.Field(
proto.MESSAGE, number=4, message="Intent.Message.Image",
)
items = proto.RepeatedField(
proto.MESSAGE, number=1, message="Intent.Message.CarouselSelect.Item",
)
class SelectItemInfo(proto.Message):
r"""Additional info about the select item for when it is
triggered in a dialog.
Attributes:
key (str):
Required. A unique key that will be sent back
to the agent if this response is given.
synonyms (Sequence[str]):
Optional. A list of synonyms that | |
# real_meta_fmt = [('rmsval', 'f')]
# self.grid_meta_int = self._buffer.read_struct(NamedStruct(integer_meta_fmt,
# self.prefmt,
# 'GridMetaInt'))
# self.grid_meta_real = self._buffer.read_struct(NamedStruct(real_meta_fmt,
# self.prefmt,
# 'GridMetaReal'))
# grid_start = self._buffer.set_mark()
else:
raise NotImplementedError('No method for unknown grid packing {}'
.format(packing_type.name))
def gdxarray(self, parameter=None, date_time=None, coordinate=None,
level=None, date_time2=None, level2=None):
"""Select grids and output as list of xarray DataArrays.
Subset the data by parameter values. The default is to not
subset and return the entire dataset.
Parameters
----------
parameter : str or array-like of str
Name of GEMPAK parameter.
date_time : datetime or array-like of datetime
Valid datetime of the grid. Alternatively
can be a string with the format YYYYmmddHHMM.
coordinate : str or array-like of str
Vertical coordinate.
level : float or array-like of float
Vertical level.
date_time2 : datetime or array-like of datetime
Secondary valid datetime of the grid. Alternatively
can be a string with the format YYYYmmddHHMM.
level2: float or array_like of float
Secondary vertical level. Typically used for layers.
Returns
-------
list
List of xarray.DataArray objects for each grid.
"""
if parameter is not None:
if (not isinstance(parameter, Iterable)
or isinstance(parameter, str)):
parameter = [parameter]
parameter = [p.upper() for p in parameter]
if date_time is not None:
if (not isinstance(date_time, Iterable)
or isinstance(date_time, str)):
date_time = [date_time]
for i, dt in enumerate(date_time):
if isinstance(dt, str):
date_time[i] = datetime.strptime(dt, '%Y%m%d%H%M')
if coordinate is not None:
if (not isinstance(coordinate, Iterable)
or isinstance(coordinate, str)):
coordinate = [coordinate]
coordinate = [c.upper() for c in coordinate]
if level is not None and not isinstance(level, Iterable):
level = [level]
if date_time2 is not None:
if (not isinstance(date_time2, Iterable)
or isinstance(date_time2, str)):
date_time2 = [date_time2]
for i, dt in enumerate(date_time2):
if isinstance(dt, str):
date_time2[i] = datetime.strptime(dt, '%Y%m%d%H%M')
if level2 is not None and not isinstance(level2, Iterable):
level2 = [level2]
# Figure out which columns to extract from the file
matched = self._gdinfo.copy()
if parameter is not None:
matched = filter(
lambda grid: grid if grid.PARM in parameter else False,
matched
)
if date_time is not None:
matched = filter(
lambda grid: grid if grid.DATTIM1 in date_time else False,
matched
)
if coordinate is not None:
matched = filter(
lambda grid: grid if grid.COORD in coordinate else False,
matched
)
if level is not None:
matched = filter(
lambda grid: grid if grid.LEVEL1 in level else False,
matched
)
if date_time2 is not None:
matched = filter(
lambda grid: grid if grid.DATTIM2 in date_time2 else False,
matched
)
if level2 is not None:
matched = filter(
lambda grid: grid if grid.LEVEL2 in level2 else False,
matched
)
matched = list(matched)
if len(matched) < 1:
raise KeyError('No grids were matched with given parameters.')
gridno = [g.GRIDNO for g in matched]
grids = []
irow = 0 # Only one row for grids
for icol, col_head in enumerate(self.column_headers):
if icol not in gridno:
continue
for iprt, part in enumerate(self.parts):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
self.data_ptr = self._buffer.read_int(4, self.endian, False)
self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
self.data_header_length = self._buffer.read_int(4, self.endian, False)
data_header = self._buffer.set_mark()
self._buffer.jump_to(data_header,
_word_to_position(part.header_length + 1))
packing_type = PackingType(self._buffer.read_int(4, self.endian, False))
full_name = col_head.GPM1 + col_head.GPM2 + col_head.GPM3
ftype, ftime = col_head.GTM1
valid = col_head.GDT1 + ftime
gvcord = col_head.GVCD.lower() if col_head.GVCD is not None else 'none'
var = (GVCORD_TO_VAR[full_name]
if full_name in GVCORD_TO_VAR
else full_name.lower()
)
data = self._unpack_grid(packing_type, part)
if data is not None:
if data.ndim < 2:
data = np.ma.array(data.reshape((self.ky, self.kx)),
mask=data == self.prod_desc.missing_float,
dtype=np.float32)
else:
data = np.ma.array(data, mask=data == self.prod_desc.missing_float,
dtype=np.float32)
xrda = xr.DataArray(
data=data[np.newaxis, np.newaxis, ...],
coords={
'time': [valid],
gvcord: [col_head.GLV1],
'x': self.x,
'y': self.y,
'lat': (['y', 'x'], self.lat),
'lon': (['y', 'x'], self.lon),
},
dims=['time', gvcord, 'y', 'x'],
name=var,
attrs={
**self.crs.to_cf(),
'grid_type': ftype,
}
)
grids.append(xrda)
else:
logger.warning('Unable to read grid for %s', col_head.GPM1)
return grids
class GempakSounding(GempakFile):
"""Subclass of GempakFile specific to GEMPAK sounding data."""
def __init__(self, file, *args, **kwargs):
"""Instantiate GempakSounding object from file."""
super().__init__(file)
# Row Headers
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.row_headers_ptr))
self.row_headers = []
row_headers_info = [(key, 'i', self._make_date) if key == 'DATE'
else (key, 'i', self._make_time) if key == 'TIME'
else (key, 'i')
for key in self.row_keys]
row_headers_info.extend([(None, None)])
row_headers_fmt = NamedStruct(row_headers_info, self.prefmt, 'RowHeaders')
for _ in range(1, self.prod_desc.rows + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.row_headers.append(self._buffer.read_struct(row_headers_fmt))
# Column Headers
self._buffer.jump_to(self._start, _word_to_position(self.prod_desc.column_headers_ptr))
self.column_headers = []
column_headers_info = [(key, '4s', self._decode_strip) if key == 'STID'
else (key, 'i') if key == 'STNM'
else (key, 'i', lambda x: x / 100) if key == 'SLAT'
else (key, 'i', lambda x: x / 100) if key == 'SLON'
else (key, 'i') if key == 'SELV'
else (key, '4s', self._decode_strip) if key == 'STAT'
else (key, '4s', self._decode_strip) if key == 'COUN'
else (key, '4s', self._decode_strip) if key == 'STD2'
else (key, 'i')
for key in self.column_keys]
column_headers_info.extend([(None, None)])
column_headers_fmt = NamedStruct(column_headers_info, self.prefmt, 'ColumnHeaders')
for _ in range(1, self.prod_desc.columns + 1):
if self._buffer.read_int(4, self.endian, False) == USED_FLAG:
self.column_headers.append(self._buffer.read_struct(column_headers_fmt))
self.merged = 'SNDT' in (part.name for part in self.parts)
self._sninfo = []
for irow, row_head in enumerate(self.row_headers):
for icol, col_head in enumerate(self.column_headers):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts))
self._buffer.jump_to(self._start, _word_to_position(pointer))
data_ptr = self._buffer.read_int(4, self.endian, False)
if data_ptr:
self._sninfo.append(
Sounding(
irow,
icol,
datetime.combine(row_head.DATE, row_head.TIME),
col_head.STID,
col_head.STNM,
col_head.SLAT,
col_head.SLON,
col_head.SELV,
col_head.STAT,
col_head.COUN,
)
)
def sninfo(self):
"""Return sounding information."""
return self._sninfo
def _unpack_merged(self, sndno):
"""Unpack merged sounding data."""
soundings = []
for irow, row_head in enumerate(self.row_headers):
for icol, col_head in enumerate(self.column_headers):
if (irow, icol) not in sndno:
continue
sounding = {'STID': col_head.STID,
'STNM': col_head.STNM,
'SLAT': col_head.SLAT,
'SLON': col_head.SLON,
'SELV': col_head.SELV,
'STAT': col_head.STAT,
'COUN': col_head.COUN,
'DATE': row_head.DATE,
'TIME': row_head.TIME,
}
for iprt, part in enumerate(self.parts):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
self.data_ptr = self._buffer.read_int(4, self.endian, False)
if not self.data_ptr:
continue
self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
self.data_header_length = self._buffer.read_int(4, self.endian, False)
data_header = self._buffer.set_mark()
self._buffer.jump_to(data_header,
_word_to_position(part.header_length + 1))
lendat = self.data_header_length - part.header_length
fmt_code = {
DataTypes.real: 'f',
DataTypes.realpack: 'i',
DataTypes.character: 's',
}.get(part.data_type)
if fmt_code is None:
raise NotImplementedError('No methods for data type {}'
.format(part.data_type))
if fmt_code == 's':
lendat *= BYTES_PER_WORD
packed_buffer = (
self._buffer.read_struct(
struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
)
)
parameters = self.parameters[iprt]
nparms = len(parameters['name'])
if part.data_type == DataTypes.realpack:
unpacked = self._unpack_real(packed_buffer, parameters, lendat)
for iprm, param in enumerate(parameters['name']):
sounding[param] = unpacked[iprm::nparms]
else:
for iprm, param in enumerate(parameters['name']):
sounding[param] = np.array(
packed_buffer[iprm::nparms], dtype=np.float32
)
soundings.append(sounding)
return soundings
def _unpack_unmerged(self, sndno):
"""Unpack unmerged sounding data."""
soundings = []
for irow, row_head in enumerate(self.row_headers):
for icol, col_head in enumerate(self.column_headers):
if (irow, icol) not in sndno:
continue
sounding = {'STID': col_head.STID,
'STNM': col_head.STNM,
'SLAT': col_head.SLAT,
'SLON': col_head.SLON,
'SELV': col_head.SELV,
'STAT': col_head.STAT,
'COUN': col_head.COUN,
'DATE': row_head.DATE,
'TIME': row_head.TIME,
}
for iprt, part in enumerate(self.parts):
pointer = (self.prod_desc.data_block_ptr
+ (irow * self.prod_desc.columns * self.prod_desc.parts)
+ (icol * self.prod_desc.parts + iprt))
self._buffer.jump_to(self._start, _word_to_position(pointer))
self.data_ptr = self._buffer.read_int(4, self.endian, False)
if not self.data_ptr:
continue
self._buffer.jump_to(self._start, _word_to_position(self.data_ptr))
self.data_header_length = self._buffer.read_int(4, self.endian, False)
data_header = self._buffer.set_mark()
self._buffer.jump_to(data_header,
_word_to_position(part.header_length + 1))
lendat = self.data_header_length - part.header_length
fmt_code = {
DataTypes.real: 'f',
DataTypes.realpack: 'i',
DataTypes.character: 's',
}.get(part.data_type)
if fmt_code is None:
raise NotImplementedError('No methods for data type {}'
.format(part.data_type))
if fmt_code == 's':
lendat *= BYTES_PER_WORD
packed_buffer = (
self._buffer.read_struct(
struct.Struct(f'{self.prefmt}{lendat}{fmt_code}')
)
)
parameters = self.parameters[iprt]
nparms = len(parameters['name'])
sounding[part.name] = {}
if part.data_type == DataTypes.realpack:
unpacked = self._unpack_real(packed_buffer, parameters, lendat)
for iprm, param in enumerate(parameters['name']):
sounding[part.name][param] = unpacked[iprm::nparms]
elif part.data_type == DataTypes.character:
for iprm, param in enumerate(parameters['name']):
sounding[part.name][param] = (
self._decode_strip(packed_buffer[iprm])
)
else:
for iprm, param in enumerate(parameters['name']):
sounding[part.name][param] = (
np.array(packed_buffer[iprm::nparms], dtype=np.float32)
)
soundings.append(self._merge_sounding(sounding))
return soundings
def _merge_sounding(self, parts):
"""Merge unmerged sounding data."""
merged = {'STID': parts['STID'],
'STNM': parts['STNM'],
'SLAT': parts['SLAT'],
'SLON': parts['SLON'],
'SELV': parts['SELV'],
'STAT': parts['STAT'],
'COUN': parts['COUN'],
'DATE': parts['DATE'],
'TIME': parts['TIME'],
'PRES': [],
'HGHT': [],
'TEMP': [],
'DWPT': [],
'DRCT': [],
'SPED': [],
}
# Number of parameter levels
num_man_levels = len(parts['TTAA']['PRES']) if 'TTAA' in parts else 0
num_man_wind_levels | |
<filename>entmoot/optimizer/optimizer.py
"""
Copyright (c) 2016-2020 The scikit-optimize developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
NOTE: Changes were made to the scikit-optimize source code included here.
For the most recent version of scikit-optimize we refer to:
https://github.com/scikit-optimize/scikit-optimize/
Copyright (c) 2019-2020 <NAME>.
"""
import warnings
import copy
import inspect
from numbers import Number
import numpy as np
from sklearn.base import clone
from entmoot.acquisition import _gaussian_acquisition
import sys
class Optimizer(object):
"""Run bayesian optimisation loop.
An `Optimizer` represents the steps of a bayesian optimisation loop. To
use it you need to provide your own loop mechanism. The various
optimisers provided by `skopt` use this class under the hood.
Use this class directly if you want to control the iterations of your
bayesian optimisation loop.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
base_estimator : string, default: "GBRT",
A default LightGBM surrogate model of the corresponding type is used
minimize `func`.
The following model types are available:
- "GBRT" for gradient-boosted trees
- "RF" for random forests
*NOTE: More model types will be added in the future
std_estimator : string, default: "BDD",
A model is used to estimate uncertainty of `base_estimator`.
Different types can be classified as exploration measures, i.e. move
as far as possible from reference points, and penalty measures, i.e.
stay as close as possible to reference points. Within these types, the
stay as close as possible to reference points. Within these types, the
following uncertainty estimators are available:
- exploration:
- "BDD" for bounded-data distance, which uses squared euclidean
distance to standardized data points
- "L1BDD" for bounded-data distance, which uses manhattan
distance to standardized data points
- "MP" for Misic proximity, which uses the number of trees
which match leaves with reference data points
- penalty:
- "DDP" for data distance, which uses squared euclidean
distance to standardized data points
- "L1DDP" for data distance, which uses manhattan
distance to standardized data points
*NOTE: More model uncertainty estimators will be added in the future
n_initial_points : int, default: 50
Number of evaluations of `func` with initialization points
before approximating it with `base_estimator`. Initial point
generator can be changed by setting `initial_point_generator`. For
`n_initial_points` <= 20 we need to set `min_child_samples` <= 20 in
`base_estimator_kwargs` so LightGBM can train tree models based on small
data sets.
initial_point_generator : str, InitialPointGenerator instance, \
default: "random"
Sets a initial points generator. Can be either
- "random" for uniform random numbers,
- "sobol" for a Sobol sequence,
- "halton" for a Halton sequence,
- "hammersly" for a Hammersly sequence,
- "lhs" for a latin hypercube sequence,
- "grid" for a uniform grid sequence
acq_func : string, default: "LCB"
Function to minimize over the posterior distribution. Can be either
- "LCB" for lower confidence bound.
acq_optimizer : string, default: "sampling"
Method to minimize the acquisition function. The fit model
is updated with the optimal value obtained by optimizing `acq_func`
with `acq_optimizer`.
- If set to "sampling", then `acq_func` is optimized by computing
`acq_func` at `n_points` randomly sampled points.
- If set to "global", then `acq_optimizer` is optimized by using
global solver to find minimum of `acq_func`.
random_state : int, RandomState instance, or None (default)
Set random state to something other than None for reproducible
results.
acq_func_kwargs : dict
Additional arguments to be passed to the acquisition function.
acq_optimizer_kwargs : dict
Additional arguments to be passed to the acquisition optimizer.
base_estimator_kwargs : dict
Additional arguments to be passed to the base_estimator.
std_estimator_kwargs : dict
Additional arguments to be passed to the std_estimator.
model_queue_size : int or None, default: None
Keeps list of models only as long as the argument given. In the
case of None, the list has no capped length.
verbose : bool or int:
- If it is `True`, general solver information is printed at every
iteration
- If it is `False`, no output is printed
- If it is 0, same as if `False`
- If it is 1, same as if `True`
- If it is 2, general solver information is printed at every iteration
as well as detailed solver information, i.e. gurobi log
Attributes
----------
Xi : list
Points at which objective has been evaluated.
yi : scalar
Values of objective at corresponding points in `Xi`.
models : list
Regression models used to fit observations and compute acquisition
function.
space : Space
An instance of :class:`skopt.space.Space`. Stores parameter search
space used to sample points, bounds, and type of parameters.
"""
def __init__(self,
dimensions,
base_estimator="GBRT",
std_estimator="BDD",
n_initial_points=50,
initial_point_generator="random",
acq_func="LCB",
acq_optimizer="global",
random_state=None,
acq_func_kwargs=None,
acq_optimizer_kwargs=None,
base_estimator_kwargs=None,
std_estimator_kwargs=None,
model_queue_size=None,
verbose=False
):
from entmoot.utils import is_supported
from entmoot.utils import cook_estimator
from entmoot.utils import cook_initial_point_generator
from entmoot.utils import cook_std_estimator
self.specs = {"args": copy.copy(inspect.currentframe().f_locals),
"function": "Optimizer"}
from sklearn.utils import check_random_state
self.rng = check_random_state(random_state)
# Configure acquisition function
# Store and create acquisition function set
self.acq_func = acq_func
if acq_func_kwargs is None:
self.acq_func_kwargs = dict()
else:
self.acq_func_kwargs = acq_func_kwargs
allowed_acq_funcs = ["LCB"]
if self.acq_func not in allowed_acq_funcs:
raise ValueError("expected acq_func to be in %s, got %s" %
(",".join(allowed_acq_funcs), self.acq_func))
# Configure counter of points
if n_initial_points < 0:
raise ValueError(
"Expected `n_initial_points` >= 0, got %d" % n_initial_points)
self._n_initial_points = n_initial_points
self.n_initial_points_ = n_initial_points
# Configure search space
# initialize search space
import numpy as np
from entmoot.space.space import Space
from entmoot.space.space import Categorical
self.space = Space(dimensions)
self._initial_samples = None
self._initial_point_generator = cook_initial_point_generator(
initial_point_generator)
if self._initial_point_generator is not None:
transformer = self.space.get_transformer()
self._initial_samples = self._initial_point_generator.generate(
self.space.dimensions, n_initial_points,
random_state=self.rng.randint(0, np.iinfo(np.int32).max))
self.space.set_transformer(transformer)
# Configure std estimator
self.std_estimator = std_estimator
if std_estimator_kwargs is None:
std_estimator_kwargs = dict()
allowed_std_est = ["BDD","BCD","DDP","L1BDD","L1DDP","MP"]
if self.std_estimator not in allowed_std_est:
raise ValueError("expected std_estimator to be in %s, got %s" %
(",".join(allowed_std_est), self.std_estimator))
self.scaled = self.acq_func_kwargs.get("scaled", False)
# build std_estimator
import numpy as np
est_random_state = self.rng.randint(0, np.iinfo(np.int32).max)
self.std_estimator = cook_std_estimator(
std_estimator,
space=self.space,
random_state=est_random_state,
std_estimator_params=std_estimator_kwargs)
# Configure estimator
# check support of base_estimator if exists
if not is_supported(base_estimator):
raise ValueError(
"Estimator type: %s is not supported." % base_estimator)
if base_estimator_kwargs is None:
base_estimator_kwargs = dict()
# build base_estimator
base_estimator = cook_estimator(
base_estimator,
self.std_estimator,
space=self.space,
random_state=est_random_state,
base_estimator_params=base_estimator_kwargs)
self.base_estimator_ = base_estimator
# Configure Optimizer
self.acq_optimizer = acq_optimizer
# record other arguments
if acq_optimizer_kwargs is None:
acq_optimizer_kwargs = dict()
if std_estimator_kwargs is None:
std_estimator_kwargs = dict()
self.acq_optimizer_kwargs = acq_optimizer_kwargs
self.n_points = acq_optimizer_kwargs.get("n_points", 10000)
self.gurobi_env = acq_optimizer_kwargs.get("env", None)
self.gurobi_timelimit = acq_optimizer_kwargs.get("gurobi_timelimit", None)
# Initialize storage for optimization
if not isinstance(model_queue_size, (int, type(None))):
raise TypeError("model_queue_size should be an int or | |
= set(indices).intersection(set(data['subset']))
if float(len(indices)) != 0.0:
pre = float(len(xx)) / float(len(indices))
else:
pre = 0.0
rec = float(len(xx)) / float(len(data['subset']))
item = (trial_id, fold_id, k_fold, num_passes, num_tr, mu, posi_ratio, s)
results[item] = {'algo_para': [trial_id, fold_id, s, para_r, para_g],
'auc_wt': roc_auc_score(y_true=data['y_tr'][te_index],
y_score=np.dot(data['x_tr'][te_index], wt)),
'f1_score': 2. * pre * rec / (pre + rec) if (pre + rec) > 0 else 0.0,
'aucs': aucs, 'rts': rts, 'wt': wt, 'nonzero_wt': np.count_nonzero(wt)}
print('trial-%d fold-%d %s p-ratio:%.2f auc: %.4f para_r:%.4f para_g:%.4f' %
(trial_id, fold_id, s, posi_ratio, results[item]['auc_wt'], para_r, para_g))
sys.stdout.flush()
return results
def cv_sht_am(para):
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, s = para
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[s]
__ = np.empty(shape=(1,), dtype=float)
# candidate parameters
list_s, list_b = range(10, 101, 10), [640 / _ for _ in [1, 2, 4, 8, 10]]
auc_wt, cv_wt_results = dict(), np.zeros((len(list_s), len(list_b)))
step_len, verbose, record_aucs, stop_eps = 1e8, 0, 0, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id, (ind_s, para_s), (ind_b, para_b) in product(range(k_fold), enumerate(list_s), enumerate(list_b)):
s_time = time.time()
algo_para = (para_s, para_b, (trial_id, fold_id, s, num_passes, posi_ratio, stop_eps))
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
if (trial_id, fold_id) not in auc_wt: # cross validate based on tr_index
auc_wt[(trial_id, fold_id)] = {'auc': 0.0, 'para': algo_para, 'num_nonzeros': 0.0}
list_auc_wt = np.zeros(k_fold)
list_num_nonzeros_wt = np.zeros(k_fold)
list_epochs = np.zeros(k_fold)
kf = KFold(n_splits=k_fold, shuffle=False)
for ind, (sub_tr_ind, sub_te_ind) in enumerate(kf.split(np.zeros(shape=(len(tr_index), 1)))):
sub_x_tr = np.asarray(data['x_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_y_tr = np.asarray(data['y_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_x_te = data['x_tr'][tr_index[sub_te_ind]]
sub_y_te = data['y_tr'][tr_index[sub_te_ind]]
_ = c_algo_sht_auc(sub_x_tr, __, __, __, sub_y_tr, 0, data['p'], global_paras, 0,
para_s, para_b, 1.0, 0.1)
wt, aucs, rts, epochs = _
list_auc_wt[ind] = roc_auc_score(y_true=sub_y_te, y_score=np.dot(sub_x_te, wt))
list_num_nonzeros_wt[ind] = np.count_nonzero(wt)
list_epochs[ind] = epochs[0]
cv_wt_results[ind_s, ind_b] = np.mean(list_auc_wt)
if auc_wt[(trial_id, fold_id)]['auc'] < np.mean(list_auc_wt):
auc_wt[(trial_id, fold_id)]['auc'] = float(np.mean(list_auc_wt))
auc_wt[(trial_id, fold_id)]['para'] = algo_para
auc_wt[(trial_id, fold_id)]['num_nonzeros'] = float(np.mean(list_num_nonzeros_wt))
print("trial-%d fold-%d s: %d para_s:%03d para_b:%03d auc:%.4f epochs:%02d run_time: %.6f" %
(trial_id, fold_id, s, para_s, para_b, float(np.mean(list_auc_wt)),
float(np.mean(list_epochs)), time.time() - s_time))
sys.stdout.flush()
return para, auc_wt, cv_wt_results
def test_sht_am(para):
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, s = para
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[s]
__ = np.empty(shape=(1,), dtype=float)
ms = pkl.load(open(data_path + 'ms_00_05_sht_am.pkl', 'rb'))
results = dict()
step_len, verbose, record_aucs, stop_eps = 1e2, 0, 1, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id in range(k_fold):
para_s, para_b, _ = ms[para]['sht_am']['auc_wt'][(trial_id, fold_id)]['para']
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
te_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
_ = c_algo_sht_auc(x_tr, __, __, __, y_tr, 0, data['p'], global_paras, 0, para_s, para_b, 1.0, 0.0)
wt, aucs, rts, epochs = _
item = (trial_id, fold_id, k_fold, num_passes, num_tr, mu, posi_ratio, s)
xx = set(np.nonzero(wt)[0]).intersection(set(data['subset']))
pre, rec = float(len(xx)) * 1. / float(len(np.nonzero(wt)[0])), float(len(xx)) / float(len(data['subset']))
results[item] = {'algo_para': [trial_id, fold_id, s, para_s, para_b],
'auc_wt': roc_auc_score(y_true=data['y_tr'][te_index],
y_score=np.dot(data['x_tr'][te_index], wt)),
'f1_score': 2. * pre * rec / (pre + rec) if (pre + rec) > 0 else 0.0,
'aucs': aucs, 'rts': rts, 'wt': wt, 'nonzero_wt': np.count_nonzero(wt)}
print('trial-%d fold-%d p-ratio:%.2f s: %d para_s: %d para_b: %d auc: %.4f para_s:%03d para_b:%03d' %
(trial_id, fold_id, posi_ratio, s, para_s, para_b, results[item]['auc_wt'], para_s, para_b))
sys.stdout.flush()
return results
def cv_sto_iht(para):
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, fig_i = para
# get data
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[fig_i]
__ = np.empty(shape=(1,), dtype=float)
# candidate parameters
list_s, list_b = range(10, 101, 10), [640 / _ for _ in [1, 2, 4, 8, 10]]
auc_wt, cv_wt_results = dict(), np.zeros((len(list_s), len(list_b)))
step_len, verbose, record_aucs, stop_eps = 1e8, 0, 0, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id, (ind_s, para_s), (ind_b, para_b) in product(range(k_fold), enumerate(list_s), enumerate(list_b)):
s_time = time.time()
algo_para = (para_s, para_b, (trial_id, fold_id, fig_i, num_passes, posi_ratio, stop_eps))
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
if (trial_id, fold_id) not in auc_wt: # cross validate based on tr_index
auc_wt[(trial_id, fold_id)] = {'auc': 0.0, 'para': algo_para, 'num_nonzeros': 0.0}
list_auc_wt = np.zeros(k_fold)
list_num_nonzeros_wt = np.zeros(k_fold)
list_epochs = np.zeros(k_fold)
kf = KFold(n_splits=k_fold, shuffle=False)
for ind, (sub_tr_ind, sub_te_ind) in enumerate(kf.split(np.zeros(shape=(len(tr_index), 1)))):
sub_x_tr = np.asarray(data['x_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_y_tr = np.asarray(data['y_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_x_te = data['x_tr'][tr_index[sub_te_ind]]
sub_y_te = data['y_tr'][tr_index[sub_te_ind]]
_ = c_algo_sto_iht(sub_x_tr, __, __, __, sub_y_tr, 0, data['p'], global_paras, para_s, para_b, 1., 0.0)
wt, aucs, rts, epochs = _
list_auc_wt[ind] = roc_auc_score(y_true=sub_y_te, y_score=np.dot(sub_x_te, wt))
list_num_nonzeros_wt[ind] = np.count_nonzero(wt)
list_epochs[ind] = epochs[0]
cv_wt_results[ind_s, ind_b] = np.mean(list_auc_wt)
if auc_wt[(trial_id, fold_id)]['auc'] < np.mean(list_auc_wt):
auc_wt[(trial_id, fold_id)]['auc'] = float(np.mean(list_auc_wt))
auc_wt[(trial_id, fold_id)]['para'] = algo_para
auc_wt[(trial_id, fold_id)]['num_nonzeros'] = float(np.mean(list_num_nonzeros_wt))
print("trial-%d fold-%d para_s: %03d para_b: %03d auc: %.4f epochs: %02d run_time: %.6f" %
(trial_id, fold_id, para_s, para_b, float(np.mean(list_auc_wt)),
float(np.mean(list_epochs)), time.time() - s_time))
sys.stdout.flush()
return para, auc_wt, cv_wt_results
def test_sto_iht(para):
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, fig_i = para
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[fig_i]
__ = np.empty(shape=(1,), dtype=float)
ms = pkl.load(open(data_path + 'ms_00_05_sto_iht.pkl', 'rb'))
results = dict()
step_len, verbose, record_aucs, stop_eps = 1e2, 0, 1, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id in range(k_fold):
para_s, para_b, _ = ms[para]['sto_iht']['auc_wt'][(trial_id, fold_id)]['para']
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
te_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
_ = c_algo_sto_iht(x_tr, __, __, __, y_tr, 0, data['p'], global_paras, para_s, para_b, 1., 0.0)
wt, aucs, rts, epochs = _
item = (trial_id, fold_id, k_fold, num_passes, num_tr, mu, posi_ratio, fig_i)
xx = set(np.nonzero(wt)[0]).intersection(set(data['subset']))
pre, rec = float(len(xx)) * 1. / float(len(np.nonzero(wt)[0])), float(len(xx)) / float(len(data['subset']))
results[item] = {'algo_para': [trial_id, fold_id, para_s, para_b],
'auc_wt': roc_auc_score(y_true=data['y_tr'][te_index],
y_score=np.dot(data['x_tr'][te_index], wt)),
'f1_score': 2. * pre * rec / (pre + rec) if (pre + rec) > 0 else 0.0,
'aucs': aucs, 'rts': rts, 'wt': wt, 'nonzero_wt': np.count_nonzero(wt)}
print('trial-%d fold-%d %s p-ratio:%.2f auc: %.4f para_s:%03d para_b:%03d' %
(trial_id, fold_id, fig_i, posi_ratio, results[item]['auc_wt'], para_s, para_b))
sys.stdout.flush()
return results
def cv_hsg_ht(para):
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, s = para
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[s]
__ = np.empty(shape=(1,), dtype=float)
# candidate parameters
list_s = range(10, 101, 10)
list_tau = [1., 10., 100., 1000.]
auc_wt, cv_wt_results = dict(), np.zeros((len(list_s), len(list_tau)))
step_len, verbose, record_aucs, stop_eps = 1e8, 0, 0, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id, (ind_s, para_s), (ind_c, para_tau) in product(range(k_fold), enumerate(list_s), enumerate(list_tau)):
s_time = time.time()
algo_para = (para_s, para_tau, (trial_id, fold_id, s, num_passes, posi_ratio, stop_eps))
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
if (trial_id, fold_id) not in auc_wt: # cross validate based on tr_index
auc_wt[(trial_id, fold_id)] = {'auc': 0.0, 'para': algo_para, 'num_nonzeros': 0.0}
list_auc_wt = np.zeros(k_fold)
list_num_nonzeros_wt = np.zeros(k_fold)
list_epochs = np.zeros(k_fold)
kf = KFold(n_splits=k_fold, shuffle=False)
for ind, (sub_tr_ind, sub_te_ind) in enumerate(kf.split(np.zeros(shape=(len(tr_index), 1)))):
sub_x_tr = np.asarray(data['x_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_y_tr = np.asarray(data['y_tr'][tr_index[sub_tr_ind]], dtype=float)
sub_x_te = data['x_tr'][tr_index[sub_te_ind]]
sub_y_te = data['y_tr'][tr_index[sub_te_ind]]
para_c, para_zeta = 3.0, 1.033
_ = c_algo_hsg_ht(sub_x_tr, __, __, __, sub_y_tr, 0, data['p'], global_paras,
para_s, para_tau, para_zeta, para_c, 0.0)
wt, aucs, rts, epochs = _
list_auc_wt[ind] = roc_auc_score(y_true=sub_y_te, y_score=np.dot(sub_x_te, wt))
list_num_nonzeros_wt[ind] = np.count_nonzero(wt)
list_epochs[ind] = epochs[0]
cv_wt_results[ind_s, ind_c] = np.mean(list_auc_wt)
if auc_wt[(trial_id, fold_id)]['auc'] < np.mean(list_auc_wt):
auc_wt[(trial_id, fold_id)]['auc'] = float(np.mean(list_auc_wt))
auc_wt[(trial_id, fold_id)]['para'] = algo_para
auc_wt[(trial_id, fold_id)]['num_nonzeros'] = float(np.mean(list_num_nonzeros_wt))
print("trial-%d fold-%d para_s: %03d para_c: %.3e auc: %.4f epochs: %02d run_time: %.6f" %
(trial_id, fold_id, para_s, para_tau, float(np.mean(list_auc_wt)),
float(np.mean(list_epochs)), time.time() - s_time))
sys.stdout.flush()
return para, auc_wt, cv_wt_results
def test_hsg_ht(para):
trial_id, k_fold, num_passes, num_tr, mu, posi_ratio, fig_i = para
f_name = data_path + 'data_trial_%02d_tr_%03d_mu_%.1f_p-ratio_%.2f.pkl'
data = pkl.load(open(f_name % (trial_id, num_tr, mu, posi_ratio), 'rb'))[fig_i]
__ = np.empty(shape=(1,), dtype=float)
ms = pkl.load(open(data_path + 'ms_00_05_hsg_ht.pkl', 'rb'))
results = dict()
step_len, verbose, record_aucs, stop_eps = 1e2, 0, 1, 1e-4
global_paras = np.asarray([num_passes, step_len, verbose, record_aucs, stop_eps], dtype=float)
for fold_id in range(k_fold):
para_s, para_tau, _ = ms[para]['hsg_ht']['auc_wt'][(trial_id, fold_id)]['para']
tr_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['tr_index']
te_index = data['trial_%d_fold_%d' % (trial_id, fold_id)]['te_index']
x_tr = np.asarray(data['x_tr'][tr_index], dtype=float)
y_tr = np.asarray(data['y_tr'][tr_index], dtype=float)
para_c, para_zeta = 3.0, 1.033
_ = c_algo_hsg_ht(x_tr, __, __, __, y_tr, 0, data['p'], global_paras, para_s, para_tau, para_zeta, para_c, 0.0)
wt, aucs, rts, epochs = _
item | |
result:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Error de tipos: tipo " + col.tipo.dato + " columna " + col.nombre + " valor a insertar " + str(
val.tipo)))
flag = False
break
else:
bas1 = validaCheck(
col, val, columnas, nodo.listValores)
if (bas1 == 0):
if validarUnique(col, val.valor, tabla):
if validarPK(col, val.valor, tabla):
if validarFK(col, val.valor, tabla, tablaSimbolos):
flag = True
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "El valor " + str(
val.valor) + " no corresponde a ningún valor de llave foránea"))
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "El valor " + str(
val.valor) + " infringe la condición de llave primaria"))
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico",
"El valor " + val.valor + " infringe la condición de columna única"))
elif bas1 == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico",
"La columna " + col.nombre + " no superó la condición CHECK"))
return
elif bas1 == 2:
flag = False
listaSemanticos.append(Error.ErrorS("Error Semantico", "La columna " + col.nombre +
" en su condición CHECK contienen un operario inexistente dentro de la tabla actual "))
return
if flag:
flag = False
tupla = validarDefault2(columnas, nodo.listValores, tabla, tablaSimbolos)
rs = jBase.insert(useActual, tabla.nombre, tupla)
if rs == 0:
consola += "Se insertó con éxito la tupla" + str(tupla) + "\n"
elif rs == 1:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Fallo al insertar la tupla: " + str(tupla)))
elif rs == 2:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Fallo al insertar, la base de datos '%s' no existe " % useActual))
elif rs == 3:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Fallo al insertar, la tabla '%s' no existe" % tabla.nombre))
elif rs == 4:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Fallo al insertar, Llaves duplicadas"))
elif rs == 5:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Fallo al insertar, La tupla excede el número de columnas"))
elif b["cod"] == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna " + b["col"] + "no existe en la tabla"))
elif b["cod"] == 2:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna " + b["col"] + " no puede ser nula"))
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "El numero de columnas a insertar no coincide"))
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "la base de datos " + useActual + " no ha sido encontrada"))
else:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "la base de datos " + useActual + " no ha sido encontrada"))
def validarUpdate(tupla, nombres, tablaSimbolos, tabla, diccionario, pk):
result = False
flag = False
global consola
# se comprueba la cantidad de columnas y las que tienen valor null
columnas = nombres
b = tabla.comprobarNulas2(nombres)
if b["cod"] == 0:
# se validan tipos
for i in range(len(columnas)):
col = tabla.getColumna(columnas[i])
val = Interpreta_Expresion(tupla[i],tablaSimbolos,tabla)
if col.tipo.tipo == TipoDato.NUMERICO:
result = validarTiposNumericos(
col.tipo.dato.lower(), val)
elif col.tipo.tipo == TipoDato.CHAR:
if val.tipo == Expresion.CADENA:
result = validarTiposChar(col.tipo, val)
else:
result = False
listaSemanticos.append(Error.ErrorS(
"Error Semantico",
"Error de tipos: tipo " + col.tipo.dato + " columna " + col.nombre + " valor a insertar " + str(
val.tipo)))
elif col.tipo.tipo == TipoDato.FECHA:
result = validarTiposFecha(
col.tipo.dato.lower(), val)
elif col.tipo.tipo == TipoDato.BOOLEAN:
if val.tipo == Expresion.BOOLEAN:
result = True
if not result:
listaSemanticos.append(Error.ErrorS("Error Semantico",
"Error de tipos: tipo " + col.tipo.dato + " columna " + col.nombre + " valor a insertar " + str(
val.tipo)))
break
else:
bas1 = validaCheck(
col, val, columnas, tupla)
if (bas1 == 0):
if True:
if True:
if validarFK(col, val.valor, tabla, tablaSimbolos):
flag = True
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico",
"El valor " + str(val.valor) + " no corresponde a ningún valor de llave foránea"))
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico",
"El valor " + str(val.valor) + " infringe la condición de llave primaria"))
else:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "El valor " + val.valor + " infringe la condición de columna única"))
elif bas1 == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna " + col.nombre + " no superó la condición CHECK"))
return False
elif bas1 == 2:
flag = False
listaSemanticos.append(Error.ErrorS("Error Semantico", "La columna " + col.nombre +
" en su condición CHECK contienen un operario inexistente dentro de la tabla actual "))
return False
if flag:
flag = False
tuplas = validarDefault2(columnas,tupla,tabla,tablaSimbolos)
#print(tuplas)
rs = jBase.update(useActual,tabla.nombre,diccionario,pk)
#rs = jBase.insert(useActual,tabla.nombre,tuplas)
if rs == 0:
consola += "Se actualizó con éxito la tupla" + str(tupla) + "\n"
elif rs == 1:
listaSemanticos.append(Error.ErrorS("Error Semantico", "Fallo al insertar la tupla: " + str(tupla)))
elif rs == 2:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Fallo al insertar, la base de datos '%s' no existe " % useActual))
elif rs == 3:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Fallo al insertar, la tabla '%s' no existe" % tabla.nombre))
elif rs == 4:
listaSemanticos.append(
Error.ErrorS("Error Semantico", "Fallo al insertar, La llave primaria '%s' no existe" % str(pk)))
elif b["cod"] == 1:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna " + b["col"] + "no existe en la tabla"))
elif b["cod"] == 2:
listaSemanticos.append(Error.ErrorS(
"Error Semantico", "La columna " + b["col"] + " no puede ser nula"))
# MÉTODO PARA RETORNAR LA TUPLA COMPLETA
def validarDefault(listaC, listaV, tabla, tablaSimbolos):
tupla = []
indice = 0
encontrado = False
for i in tabla.columnas:
if tabla.columnas[i].index == indice:
for j in range(len(listaC)):
if tabla.columnas[i].nombre == listaC[j].valor:
tupla.append(listaV[j].valor)
indice += 1
i = 0
encontrado = True
break
if not encontrado:
if tabla.columnas[i].default != None:
tupla.append(Interpreta_Expresion(tabla.columnas[i].default, tablaSimbolos, tabla).valor)
else:
tupla.append(None)
if (len(tabla.columnas) == len(tupla)):
return tupla
# MÉTODO PARA RETORNAR LA TUPLA COMPLETA
def validarDefault2(listaC, listaV, tabla, tablaSimbolos):
tupla = []
indice = 0
encontrado = False
for i in tabla.columnas:
if tabla.columnas[i].index == indice:
for j in range(len(listaC)):
if tabla.columnas[i].nombre == listaC[j]:
tupla.append(Interpreta_Expresion(listaV[j], tablaSimbolos, tabla).valor)
indice += 1
i = 0
encontrado = True
break
if not encontrado:
if tabla.columnas[i].default != None:
tupla.append(Interpreta_Expresion(tabla.columnas[i].default, tablaSimbolos, tabla).valor)
else:
tupla.append(None)
if (len(tabla.columnas) == len(tupla)):
return tupla
# MÉTODO PARA VALIDAR LAS LLAVES FORÁNEAS
def validarFK(col, val, tabla, tablaSimbolos):
if col.foreign_key != None:
registro = jBase.extractTable(useActual, col.foreign_key["tabla"])
indice = tablaSimbolos.getColumna(
useActual, col.foreign_key["tabla"], col.foreign_key["columna"]).index
if registro != None and len(registro) > 0:
for i in range(len(registro)):
if val == registro[i][indice]:
return True
return False
else:
return False
return True
# MÉTODO PARA VALIDAR LOS CHECKS
def validaCheck(col, val, columnas, valores):
if col.check != None:
# print("==================================================")
# print(str(col.check))
tipo = col.check["condicion"].opDer.tipo
if tipo == Expresion.ID:
for i in range(len(columnas)):
if columnas[i] == col.check["condicion"].opDer.valor:
nuevo = SOperacion(val, valores[i], col.check["condicion"].operador)
if Interpreta_Expresion(nuevo, None, None).valor:
return 0
else:
return 1
return 2
else:
nuevo = SOperacion(val, col.check["condicion"].opDer, col.check["condicion"].operador)
if Interpreta_Expresion(nuevo, None, None).valor:
return 0
else:
return 1
return 0
# MÉTODO PARA VALIDAR LOS UNIQUE
def validarUnique(col, val, tabla):
global useActual
registros = jBase.extractTable(useActual, tabla.nombre)
indice = col.index
if (col.unique == True):
for i in range(len(registros)):
if registros[i][indice] == val:
return False
return True
# MÉTODO PARA VALIDAR LAS PRIMARY KEY
def validarPK(col, val, tabla):
global useActual
registros = jBase.extractTable(useActual, tabla.nombre)
indice = col.index
if (col.primary_key == True):
if registros != None:
for i in range(len(registros)):
if registros[i][indice] == val:
return False
return True
def validarTiposNumericos(dato, expresion):
if dato == "smallint":
if expresion.tipo == Expresion.ENTERO or expresion.tipo == Expresion.NEGATIVO:
if expresion.valor >= -32768 and expresion.valor <= 32767:
return True
elif dato == "integer":
if expresion.tipo == Expresion.ENTERO or expresion.tipo == Expresion.NEGATIVO:
if expresion.valor >= -2147483648 and expresion.valor <= 2147483647:
return True
elif dato == "bigint":
if expresion.tipo == Expresion.ENTERO or expresion.tipo == Expresion.NEGATIVO:
if expresion.valor >= -9223372036854775808 and expresion.valor <= 9223372036854775807:
return True
elif dato == "decimal":
if expresion.tipo == Expresion.DECIMAL or expresion.tipo == Expresion.NEGATIVO:
return True
elif dato == "numeric":
if expresion.tipo == Expresion.DECIMAL or expresion.tipo == Expresion.NEGATIVO:
return True
elif dato == "real":
if expresion.tipo == Expresion.DECIMAL or expresion.tipo == Expresion.NEGATIVO:
return True
elif dato == "double":
if expresion.tipo == Expresion.DECIMAL or expresion.tipo == Expresion.NEGATIVO:
return True
elif dato == "money":
if expresion.tipo == Expresion.DECIMAL or expresion.tipo == Expresion.ENTERO:
return True
return False
def validarTiposChar(dato, expresion):
if dato.dato.lower() == "varying" or dato.dato.lower() == "varchar":
if len(expresion.valor) <= dato.cantidad:
return True
elif dato.dato.lower() == "character" or dato.dato.lower() == "char":
if len(expresion.valor) <= dato.cantidad:
return True
elif dato.dato.lower() == "text":
return True
return False
def validarTiposFecha(dato, expresion):
if dato == "date":
if expresion.tipo == Expresion.FECHA:
return True
elif dato == "timestamp":
if | |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright 2020, <NAME>, PKU.
"""
import queue
import sys
import math
import tensorflow as tf
import numpy as np
from tools.common import Notify
from loss import mvsnet_regression_loss
sys.path.append("../")
from cnn_wrapper.mvsnet import *
from homography_warping import *
from lstm import *
FLAGS = tf.app.flags.FLAGS
def deconv_gn(input_tensor,
kernel_size,
filters,
strides,
name,
relu=False,
center=False,
scale=False,
channel_wise=True,
group=32,
group_channel=8,
padding='same',
biased=False,
reuse=tf.AUTO_REUSE):
assert len(input_tensor.get_shape()) == 4
# deconvolution
res=tf.layers.conv2d_transpose(input_tensor, kernel_size=kernel_size, filters=filters, padding=padding, strides=strides,
reuse=reuse, name=name )
# group normalization
x = tf.transpose(res, [0, 3, 1, 2])
shape = tf.shape(x)
N = shape[0]
C = x.get_shape()[1]
H = shape[2]
W = shape[3]
if channel_wise:
G = max(1, C / group_channel)
else:
G = min(group, C)
# normalization
x = tf.reshape(x, [N, G, C // G, H, W])
mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
x = (x - mean) / tf.sqrt(var + 1e-5)
# per channel scale and bias (gamma and beta)
with tf.variable_scope(name + '/gn', reuse=reuse):
if scale:
gamma = tf.get_variable('gamma', [C], dtype=tf.float32, initializer=tf.ones_initializer())
else:
gamma = tf.constant(1.0, shape=[C])
if center:
beta = tf.get_variable('beta', [C], dtype=tf.float32, initializer=tf.zeros_initializer())
else:
beta = tf.constant(0.0, shape=[C])
gamma = tf.reshape(gamma, [1, C, 1, 1])
beta = tf.reshape(beta, [1, C, 1, 1])
output = tf.reshape(x, [-1, C, H, W]) * gamma + beta
# tranpose: [bs, c, h, w, c] to [bs, h, w, c] following the paper
output = tf.transpose(output, [0, 2, 3, 1])
if relu:
output = tf.nn.relu(output, name + '/relu')
return output
def conv_gn(input_tensor,
kernel_size,
filters,
strides,
name,
relu=False,
center=False,
scale=False,
channel_wise=True,
group=32,
group_channel=8,
padding='same',
biased=False,
reuse=tf.AUTO_REUSE,
dilation=1):
assert len(input_tensor.get_shape()) == 4
# deconvolution
res=tf.layers.conv2d(input_tensor, kernel_size=kernel_size, filters=filters, padding=padding, strides=strides,
reuse=reuse, name=name ,dilation_rate=dilation)
# group normalization
x = tf.transpose(res, [0, 3, 1, 2])
shape = tf.shape(x)
N = shape[0]
C = x.get_shape()[1]
H = shape[2]
W = shape[3]
if channel_wise:
G = max(1, C / group_channel)
else:
G = min(group, C)
# normalization
x = tf.reshape(x, [N, G, C // G, H, W])
mean, var = tf.nn.moments(x, [2, 3, 4], keep_dims=True)
x = (x - mean) / tf.sqrt(var + 1e-5)
# per channel scale and bias (gamma and beta)
with tf.variable_scope(name + '/gn', reuse=reuse):
if scale:
gamma = tf.get_variable('gamma', [C], dtype=tf.float32, initializer=tf.ones_initializer())
else:
gamma = tf.constant(1.0, shape=[C])
if center:
beta = tf.get_variable('beta', [C], dtype=tf.float32, initializer=tf.zeros_initializer())
else:
beta = tf.constant(0.0, shape=[C])
gamma = tf.reshape(gamma, [1, C, 1, 1])
beta = tf.reshape(beta, [1, C, 1, 1])
output = tf.reshape(x, [-1, C, H, W]) * gamma + beta
# tranpose: [bs, c, h, w, c] to [bs, h, w, c] following the paper
output = tf.transpose(output, [0, 2, 3, 1])
if relu:
output = tf.nn.relu(output, name + '/relu')
return output
def resnet_block_gn(input,kernel_size=3, filters=32, padding='same', strides=1,
reuse=tf.AUTO_REUSE, name=None,group=32,group_channel=8 ):
o1=conv_gn(input,kernel_size,filters,strides,relu=True,dilation=1,name=name+"_conv_0")
o2=conv_gn(o1,kernel_size,filters,strides,relu=False,dilation=1,name=name+"_conv_1")
return tf.nn.relu(o1+o2)
def gateNet(input,input_channel,name):
o = conv_gn(input,kernel_size=3,filters=8,strides=1,name=name+'_gate_conv_0',reuse=tf.AUTO_REUSE)
o = resnet_block_gn(o,kernel_size=1,filters=8,name=name)
o = tf.layers.conv2d(o,kernel_size=3, filters=1, padding='same', strides=1,
reuse=tf.AUTO_REUSE, name=name+"_gate_conv_1" ,dilation_rate=1)
return tf.nn.sigmoid(o)
def inference_prob_recurrent(images, cams, depth_num, depth_start, depth_interval, is_master_gpu=True):
""" infer disparity image from stereo images and cameras """
batch_size=FLAGS.batch_size
height=FLAGS.max_h
width=FLAGS.max_w
ref_image=tf.squeeze(tf.slice(images,[0,0,0,0,0],[-1,1,-1,-1,-1]),1)
images=tf.reshape(images,[-1,height,width,3])
feature_tower=SNetDS2GN_1({'data':images}, is_training=True, reuse=tf.AUTO_REUSE)
features=tf.reshape(feature_tower.get_output(),[FLAGS.batch_size,FLAGS.view_num,height,width,32])
ref_feature=tf.squeeze(tf.slice(features,[0,0,0,0,0],[-1,1,-1,-1,-1]),1)
view_features=tf.slice(features,[0,1,0,0,0],[-1,-1,-1,-1,-1])
depth_end = depth_start + (tf.cast(depth_num, tf.float32) - 1) * depth_interval
# reference image
ref_cam = tf.squeeze(tf.slice(cams, [0, 0, 0, 0, 0], [-1, 1, 2, 4, 4]), axis=1)
# get all homographies
view_homographies = []
for view in range(1, FLAGS.view_num):
view_cam = tf.squeeze(tf.slice(cams, [0, view, 0, 0, 0], [-1, 1, 2, 4, 4]), axis=1)
if FLAGS.inverse_depth:
homographies = get_homographies_inv_depth(ref_cam, view_cam, depth_num=depth_num,
depth_start=depth_start, depth_end=depth_end)
else:
homographies = get_homographies(ref_cam, view_cam, depth_num=depth_num,
depth_start=depth_start, depth_interval=depth_interval)
view_homographies.append(homographies)
feature_shape = [FLAGS.batch_size, FLAGS.max_h, FLAGS.max_w, 32]
batch_size,height,width,channel=feature_shape
cell0=ConvLSTMCell(
conv_ndims=2,
input_shape=[height, width,32],
output_channels=16,
kernel_shape=[3, 3],
name="conv_lstm_cell0"
)
cell1=ConvLSTMCell(
conv_ndims=2,
input_shape=[height/2, width/2, 16],
output_channels=16,
kernel_shape=[3, 3],
name="conv_lstm_cell1"
)
cell2=ConvLSTMCell(
conv_ndims=2,
input_shape=[height/4, width/4, 16],
output_channels=16,
kernel_shape=[3, 3],
name="conv_lstm_cell2"
)
cell3=ConvLSTMCell(
conv_ndims=2,
input_shape=[height/2, width/2, 32],
output_channels=16,
kernel_shape=[3, 3],
name="conv_lstm_cell3"
)
cell4=ConvLSTMCell(
conv_ndims=2,
input_shape=[height, width, 32],
output_channels=8,
kernel_shape=[3, 3],
name="conv_lstm_cell4"
)
initial_state0 = cell0.zero_state(batch_size, dtype=tf.float32)
initial_state1 = cell1.zero_state(batch_size, dtype=tf.float32)
initial_state2 = cell2.zero_state(batch_size, dtype=tf.float32)
initial_state3 = cell3.zero_state(batch_size, dtype=tf.float32)
initial_state4 = cell4.zero_state(batch_size, dtype=tf.float32)
with tf.name_scope('cost_volume_homography') as scope:
# forward cost volume
# depth=depth_start
costs=[]
# ref_feature=ref_tower.get_output()
# ref_feature=tf.reshape(ref_feature,[batch_size,height,width,channel])
depth_maps=[]
# weights=tf.reshape(tf.constant([1.0,0.2,0.1],dtype=tf.float32),[1,1,1,1,3])
# ref_feature=tf.extract_image_patches(images=ref_feature, ksizes=[1, 3, 3, 1], strides=[1, 1, 1, 1],
# rates=[1, 7, 7, 1], padding='SAME')#b,h,w,9*c
for d in range(depth_num):
# compute cost (variation metric)
ave_feature =ref_feature+0.0
ave_feature2 = tf.square(ave_feature)
warped_view_volumes = tf.zeros([batch_size,height,width,1])
weight_sum = tf.zeros([batch_size,height,width,1])
for view in range(0, FLAGS.view_num - 1):
view_feature=tf.squeeze(tf.slice(view_features,[0,view,0,0,0],[-1,1,-1,-1,-1]),1)
homographies = view_homographies[view]
homographies = tf.transpose(homographies, perm=[1, 0, 2, 3])
homography = homographies[d]
warped_view_feature = tf_transform_homography(view_feature, homography)
warped_view_volume = tf.square(warped_view_feature-ref_feature)
weight = gateNet(warped_view_volume,32,name='gate')
warped_view_volumes += (weight+1)*warped_view_volume
weight_sum += (weight+1)
cost=warped_view_volumes/weight_sum #= ave_feature2 - tf.square(ave_feature)
with tf.variable_scope("rnn/", reuse=tf.AUTO_REUSE):
cost0,initial_state0=cell0(cost,state=initial_state0)
cost1=tf.nn.max_pool2d(cost0,(2,2),2,'SAME')
cost1,initial_state1=cell1(cost1,state=initial_state1)
cost2=tf.nn.max_pool2d(cost1,(2,2),2,'SAME')
cost2,initial_state2=cell2(cost2,state=initial_state2)
cost2=deconv_gn(cost2, 16, 3, padding='same',strides=2, reuse=tf.AUTO_REUSE, name='cost_upconv0' )
cost2=tf.concat([cost2,cost1],-1)
cost3,initial_state3=cell3(cost2,state=initial_state3)
cost3=deconv_gn(cost3, 16, 3, padding='same',strides=2, reuse=tf.AUTO_REUSE, name='cost_upconv1' )
cost3=tf.concat([cost3,cost0],-1)
cost4,initial_state4=cell4(cost3,state=initial_state4)
cost = tf.layers.conv2d(
cost4, 1, 3, padding='same', reuse=tf.AUTO_REUSE, name='prob_conv')
costs.append(cost)
prob_volume = tf.stack(costs, axis=1)
prob_volume = tf.nn.softmax(-prob_volume, axis=1, name='prob_volume')
return prob_volume
def inference_winner_take_all(images, cams, depth_num, depth_start, depth_end,
is_master_gpu=True, reg_type='GRU', inverse_depth=False):
""" infer disparity image from stereo images and cameras """
if not inverse_depth:
depth_interval = (depth_end - depth_start) / (tf.cast(depth_num, tf.float32) - 1)
# reference image
ref_image = tf.squeeze(tf.slice(images, [0, 0, 0, 0, 0], [-1, 1, -1, -1, 3]), axis=1)
ref_cam = tf.squeeze(tf.slice(cams, [0, 0, 0, 0, 0], [-1, 1, 2, 4, 4]), axis=1)
height=tf.shape(images)[2]
width=tf.shape(images)[3]
images=tf.reshape(images,[-1,height,width,3])
feature_tower=SNetDS2GN_1({'data':images}, is_training=True, reuse=tf.AUTO_REUSE,dilation=1).get_output()
height=tf.shape(feature_tower)[1]
width=tf.shape(feature_tower)[2]
features=tf.reshape(feature_tower,[FLAGS.batch_size,FLAGS.view_num,height,width,32])
ref_feature=tf.squeeze(tf.slice(features,[0,0,0,0,0],[-1,1,-1,-1,-1]),1)
view_features=tf.slice(features,[0,1,0,0,0],[-1,-1,-1,-1,-1])
# get all homographies
view_homographies = []
for view in range(1, FLAGS.view_num):
view_cam = tf.squeeze(tf.slice(cams, [0, view, 0, 0, 0], [-1, 1, 2, 4, 4]), axis=1)
if inverse_depth:
homographies = get_homographies_inv_depth(ref_cam, view_cam, depth_num=depth_num,
depth_start=depth_start, depth_end=depth_end)
else:
homographies = get_homographies(ref_cam, view_cam, depth_num=depth_num,
depth_start=depth_start, depth_interval=depth_interval)
view_homographies.append(homographies)
feature_shape = [FLAGS.batch_size, FLAGS.max_h, FLAGS.max_w, 32]
batch_size,height,width,channel=feature_shape
gru_input_shape = [feature_shape[1], feature_shape[2]]
cell0=ConvLSTMCell(
conv_ndims=2,
input_shape=[height, width,32],
output_channels=16,
kernel_shape=[3, 3],
name="conv_lstm_cell0"
)
cell1=ConvLSTMCell(
conv_ndims=2,
input_shape=[height/2, width/2, 16],
output_channels=16,
kernel_shape=[3, 3],
name="conv_lstm_cell1"
)
cell2=ConvLSTMCell(
conv_ndims=2,
input_shape=[height/4, width/4, 16],
output_channels=16,
kernel_shape=[3, 3],
name="conv_lstm_cell2"
)
cell3=ConvLSTMCell(
conv_ndims=2,
input_shape=[height/2, width/2, 32],
output_channels=16,
kernel_shape=[3, 3],
name="conv_lstm_cell3"
)
cell4=ConvLSTMCell(
conv_ndims=2,
input_shape=[height, width, 32],
output_channels=8,
kernel_shape=[3, 3],
name="conv_lstm_cell4"
)
initial_state0 = cell0.zero_state(batch_size, dtype=tf.float32)
initial_state1 = cell1.zero_state(batch_size, dtype=tf.float32)
initial_state2 = cell2.zero_state(batch_size, dtype=tf.float32)
initial_state3 = cell3.zero_state(batch_size, dtype=tf.float32)
initial_state4 = cell4.zero_state(batch_size, dtype=tf.float32)
# initialize variables
exp_sum = tf.Variable(tf.zeros(
[FLAGS.batch_size, feature_shape[1], feature_shape[2], 1]),
name='exp_sum', trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
depth_image = tf.Variable(tf.zeros(
[FLAGS.batch_size, feature_shape[1], feature_shape[2], 1]),
name='depth_image', trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
max_prob_image = tf.Variable(tf.zeros(
[FLAGS.batch_size, feature_shape[1], feature_shape[2], 1]),
name='max_prob_image', trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
init_map = tf.zeros([FLAGS.batch_size, feature_shape[1], feature_shape[2], 1])
weights=tf.reshape(tf.constant([1.0,0.5,0.1],dtype=tf.float32),[1,1,1,1,3])
# define winner take all loop
def body(depth_index, initial_state0, initial_state1, initial_state2, initial_state3, initial_state4, depth_image, max_prob_image, exp_sum, incre):
"""Loop body."""
# calculate cost
ave_feature =ref_feature
ave_feature2 = tf.square(ref_feature)
warped_view_volumes = tf.zeros([batch_size,height,width,1])
weight_sum = tf.zeros([batch_size,height,width,1])
for view in range(0, FLAGS.view_num - 1):
homographies = view_homographies[view]
homographies = tf.transpose(homographies, perm=[1, 0, 2, 3])
homography = homographies[depth_index]
view_feature=tf.squeeze(tf.slice(view_features,[0,view,0,0,0],[-1,1,-1,-1,-1]),1)
warped_view_feature = tf_transform_homography(view_feature, homography)
warped_view_volume = tf.square(warped_view_feature-ref_feature)
weight = gateNet(warped_view_volume,32,name='gate')
warped_view_volumes += (weight+1)*warped_view_volume
weight_sum += (weight+1)
cost=warped_view_volumes/weight_sum
with tf.name_scope('cost_volume_homography') as scope:
with tf.variable_scope("rnn/", reuse=tf.AUTO_REUSE):
cost0,initial_state0=cell0(cost,state=initial_state0)
cost1=tf.nn.max_pool2d(cost0,(2,2),2,'SAME')
cost1,initial_state1=cell1(cost1,state=initial_state1)
cost2=tf.nn.max_pool2d(cost1,(2,2),2,'SAME')
cost2,initial_state2=cell2(cost2,state=initial_state2)
cost2=deconv_gn(cost2, 16, 3, padding='same',strides=2, reuse=tf.AUTO_REUSE, name='cost_upconv0' )
cost2=tf.concat([cost2,cost1],-1)
cost3,initial_state3=cell3(cost2,state=initial_state3)
cost3=deconv_gn(cost3, 16, 3, padding='same',strides=2, reuse=tf.AUTO_REUSE, name='cost_upconv1' )
cost3=tf.concat([cost3,cost0],-1)
cost4,initial_state4=cell4(cost3,state=initial_state4)
cost = tf.layers.conv2d(
cost4, 1, 3, padding='same', reuse=tf.AUTO_REUSE, name='prob_conv')
prob = tf.exp(-cost)
# index
d_idx = tf.cast(depth_index, tf.float32)
if inverse_depth:
inv_depth_start = tf.div(1.0, depth_start)
inv_depth_end = tf.div(1.0, depth_end)
inv_interval = (inv_depth_start - inv_depth_end) / (tf.cast(depth_num, 'float32') - 1)
inv_depth = inv_depth_start - d_idx * inv_interval
depth = tf.div(1.0, inv_depth)
else:
depth = depth_start + d_idx * depth_interval
temp_depth_image = tf.reshape(depth, [FLAGS.batch_size, 1, 1, 1])
temp_depth_image = tf.tile(
temp_depth_image, [1, feature_shape[1], feature_shape[2], 1])
# update the best
update_flag_image = tf.cast(tf.less(max_prob_image, prob), dtype='float32')
new_max_prob_image = update_flag_image * prob + (1 - update_flag_image) * max_prob_image
new_depth_image = update_flag_image * temp_depth_image + (1 - update_flag_image) * depth_image
max_prob_image = tf.assign(max_prob_image, new_max_prob_image)
depth_image = tf.assign(depth_image, new_depth_image)
# update counter
exp_sum = tf.assign_add(exp_sum, prob)
depth_index = tf.add(depth_index, incre)
return depth_index, initial_state0, initial_state1, initial_state2, initial_state3, initial_state4, depth_image, max_prob_image, exp_sum, incre
# run forward loop
exp_sum = tf.assign(exp_sum, init_map)
depth_image = tf.assign(depth_image, init_map)
max_prob_image = tf.assign(max_prob_image, init_map)
depth_index = tf.constant(0)
incre = tf.constant(1)
cond = lambda depth_index, *_: tf.less(depth_index, depth_num)
_, initial_state0, initial_state1, initial_state2, initial_state3, initial_state4, depth_image, max_prob_image, exp_sum, incre = tf.while_loop(
cond, body
, | |
np.sum(np.multiply(Q_tilde[link], self.zeta_gamma[link]))
vv21 = self.nu_tilde[link[0]] * self.nu_prime_tilde[link[1]] * self.theta_tilde[link[0]]
vv22 = np.multiply(self.T - self.A[link], np.exp(-self.theta_tilde[link[0]] * self.theta_prime_tilde[link[1]] * (self.T - self.A[link])))
vv2 = vv21 * np.sum(vv22)
den_theta_prime[link[1]] += vv + vv2
## Update theta_prime_tilde
self.theta_prime_tilde = num_nu_theta_prime / den_theta_prime
## Convert to likelihood parametrisation
if self.main_effects:
self.alpha = np.copy(self.alpha_tilde)
self.beta = np.copy(self.beta_tilde)
if not self.poisson_me:
self.mu = np.multiply(self.mu_tilde, self.phi_tilde)
self.phi = self.phi_tilde - self.mu
self.mu_prime = np.multiply(self.mu_prime_tilde, self.phi_prime_tilde)
self.phi_prime = self.phi_prime_tilde - self.mu_prime
if self.interactions:
self.gamma = np.copy(self.gamma_tilde) if self.D > 1 else self.gamma_tilde[:,0]
self.gamma_prime = np.copy(self.gamma_prime_tilde) if self.D > 1 else self.gamma_prime_tilde[:,0]
if not self.poisson_int:
self.nu = np.multiply(self.nu_tilde, self.theta_tilde)
self.theta = self.theta_tilde - self.nu
self.nu_prime = np.multiply(self.nu_prime_tilde, self.theta_prime_tilde)
self.theta_prime = self.theta_prime_tilde - self.nu_prime
## Setup likelihood calculations
self.likelihood_calculation_setup(verbose=False)
## Calculate likelihood for evaluating convergence
if ((self.interactions and self.hawkes_int) or (self.main_effects and self.hawkes_me)) and (not self.poisson_me or not self.poisson_int):
self.psi_calculation(verbose=False)
## Calculate zeta
self.zeta_calculation(verbose=False)
## Calculate compensator
self.compensator_T()
## Use zeta to calculate the likelihood correctly
log_likelihood = 0.0
for link in self.A:
log_likelihood += np.sum(np.log(list(self.zeta[link].values())))
log_likelihood -= self.Lambda_T[link]
## Add back missing elements
if self.main_effects and self.full_links:
log_likelihood -= (self.n2 if self.bipartite else self.n) * np.sum(self.alpha_compensator)
log_likelihood -= (self.n1 if self.bipartite else self.n) * np.sum(self.beta_compensator if self.directed else self.alpha_compensator)
if self.interactions and self.full_links:
if self.D == 1:
log_likelihood -= self.T * np.sum(self.gamma) * np.sum(self.gamma_prime if self.directed else self.gamma)
else:
log_likelihood -= self.T * np.inner(np.sum(self.gamma,axis=0), np.sum(self.gamma_prime if self.directed else self.gamma, axis=0))
ll += [log_likelihood]
## Calculate the criterion
if iteration > 2 and ll[-1] - ll[-2] > 0:
tcrit = (np.abs((ll[-1] - ll[-2]) / ll[-2]) > tolerance)
if verbose:
print("")
return ll
## Calculation of the compensator at time T (useful for the log-likelihood) - Approximation for the discrete process
def compensator_T(self):
self.Lambda_T = {}
## Main effects if full links
if self.full_links:
if self.main_effects:
self.alpha_compensator = self.alpha * self.T
if self.directed:
self.beta_compensator = self.beta * self.T
if not self.poisson_me:
for i in range(self.n1 if self.bipartite else self.n):
mu = self.mu[i]
phi = self.phi[i]
if i in self.node_ts:
if self.hawkes_me:
self.alpha_compensator[i] -= mu / (mu+phi) * np.sum(np.exp(-(mu+phi) * (self.T - self.node_ts[i])) - 1)
else:
self.alpha_compensator[i] -= mu / (mu+phi) * np.sum((np.exp(-(mu+phi) * np.diff(np.append(self.node_ts[i],self.T))) - 1))
if self.directed:
for j in range(self.n2 if self.bipartite else self.n):
mu_prime = self.mu_prime[j]
phi_prime = self.phi_prime[j]
if j in self.node_ts_prime:
if self.hawkes_me:
self.beta_compensator[j] -= mu_prime / (mu_prime+phi_prime) * np.sum(np.exp(-(mu_prime+phi_prime) * (self.T - self.node_ts_prime[j])) - 1)
else:
self.beta_compensator[j] -= mu_prime / (mu_prime+phi_prime) * np.sum((np.exp(-(mu_prime + phi_prime) * \
np.diff(np.append(self.node_ts_prime[j],self.T))) - 1))
for link in self.A:
self.Lambda_T[link] = 0
## Select parameters
if self.main_effects and not self.poisson_me:
mu = self.mu[link[0]]
mu_prime = self.mu_prime[link[1]] if self.directed else self.mu[link[1]]
phi = self.phi[link[0]]
phi_prime = self.phi_prime[link[1]] if self.directed else self.phi[link[1]]
if self.interactions and not self.poisson_int:
nu = self.nu[link[0]]
nu_prime = self.nu_prime[link[1]] if self.directed else self.nu[link[1]]
theta = self.theta[link[0]]
theta_prime = self.theta_prime[link[1]] if self.directed else self.theta[link[1]]
if not self.full_links:
## Update the main effects
if self.main_effects:
self.Lambda_T[link] += (self.alpha[link[0]] + (self.beta[link[1]] if self.directed else self.alpha[link[1]])) * (self.T - self.Tau[link])
if not self.poisson_me:
if self.hawkes_me:
self.Lambda_T[link] -= mu / (mu+phi) * np.sum(np.exp(-(mu+phi) * (self.T - self.node_ts[link[0]])) -
np.exp(-(mu+phi) * np.maximum(0, self.Tau[link] - self.node_ts[link[0]])))
self.Lambda_T[link] -= mu_prime / (mu_prime+phi_prime) * np.sum(np.exp(-(mu_prime+phi_prime) * (self.T - \
(self.node_ts_prime[link[1]] if self.directed else self.node_ts[link[1]]))) - \
np.exp(-(mu_prime+phi_prime) * np.maximum(0, self.Tau[link] - \
(self.node_ts_prime[link[1]] if self.directed else self.node_ts[link[1]]))))
else:
zero_out = (self.node_ts[link[0]] >= self.Tau[link])
self.Lambda_T[link] -= mu / (mu+phi) * np.sum((np.exp(-(mu+phi) * np.diff(np.append(self.node_ts[link[0]],self.T))) - 1)[zero_out])
zero_out_prime = ((self.node_ts_prime[link[1]] if self.directed else self.node_ts[link[1]]) >= self.Tau[link])
self.Lambda_T[link] -= mu_prime / (mu_prime+phi_prime) * np.sum((np.exp(-(mu_prime + phi_prime) * \
np.diff(np.append(self.node_ts_prime[link[1]] if self.directed else self.node_ts[link[1]],self.T))) - 1)[zero_out_prime])
## Update the interactions
if self.interactions:
if self.D == 1:
if not self.full_links:
self.Lambda_T[link] += (self.gamma[link[0]] * (self.gamma_prime[link[1]] if self.directed else self.gamma[link[1]])) * (self.T - self.Tau[link])
if not self.poisson_int:
if self.hawkes_int:
self.Lambda_T[link] -= (nu * nu_prime) / ((nu+theta) * (nu_prime+theta_prime)) * np.sum(np.exp(-(nu+theta) * (nu_prime+theta_prime) * (self.T - self.A[link])) - 1)
else:
self.Lambda_T[link] -= (nu * nu_prime) / ((nu+theta) * (nu_prime+theta_prime)) * np.sum(np.exp(-(nu+theta) * (nu_prime+theta_prime) * self.A_diff[link]) - 1)
self.Lambda_T[link] -= (nu * nu_prime) / ((nu+theta) * (nu_prime+theta_prime)) * (np.exp(-(nu+theta) * (nu_prime+theta_prime) * (self.T - self.A[link][-1])) - 1)
else:
if not self.full_links:
self.Lambda_T[link] += np.sum(self.gamma[link[0]] * (self.gamma_prime[link[1]] if self.directed else self.gamma[link[1]])) * (self.T - self.Tau[link])
if not self.poisson_int:
if self.hawkes_int:
self.Lambda_T[link] -= np.sum([(nu[k] * nu_prime[k]) / ((nu[k]+theta[k]) * (nu_prime[k]+theta_prime[k])) * np.sum(np.exp(-(nu[k]+theta[k]) * (nu_prime[k]+theta_prime[k]) * (self.T - self.A[link])) - 1) for k in range(self.D)])
else:
self.Lambda_T[link] -= np.sum([(nu[k] * nu_prime[k]) / ((nu[k]+theta[k]) * (nu_prime[k]+theta_prime[k])) * np.sum(np.exp(-(nu[k]+theta[k]) * (nu_prime[k]+theta_prime[k]) * self.A_diff[link]) - 1) for k in range(self.D)])
self.Lambda_T[link] -= np.sum([(nu[k] * nu_prime[k]) / ((nu[k]+theta[k]) * (nu_prime[k]+theta_prime[k])) * (np.exp(-(nu[k]+theta[k]) * (nu_prime[k]+theta_prime[k]) * (self.T - self.A[link][-1])) - 1) for k in range(self.D)])
## Calculation of gradients
def gradient(self, prior_penalisation=False, verbose=True):
if verbose:
prop = 0
## Initialise rows and columns to create COO sparse matrices
rows = []
cols = []
if self.main_effects:
vals = []
if not self.poisson_me:
vals_mu = []
vals_phi = []
if self.directed:
vals_mu_prime = []
vals_phi_prime = []
if self.interactions:
if self.D > 1:
rows_int = []
cols_int = []
dims_int = []
vals_gamma = []
if not self.poisson_int:
vals_nu = []
vals_theta = []
if self.directed:
vals_gamma_prime = []
if not self.poisson_int:
vals_nu_prime = []
vals_theta_prime = []
## Calculate the components separately for full links
if self.full_links and self.main_effects:
if not self.poisson_me:
mu_component = np.zeros(self.n1 if self.bipartite else self.n)
phi_component = np.zeros(self.n1 if self.bipartite else self.n)
for i in range(self.n1 if self.bipartite else self.n):
## Obtain parameters
mu = self.mu[i]
phi = self.phi[i]
if i in self.node_ts:
if self.hawkes_me:
## Updates for mu and phi
t_diff = self.T - self.node_ts[i]
sum_1 = np.sum(np.exp(-(mu+phi) * t_diff) - 1)
sum_2 = np.sum((np.multiply(t_diff, np.exp(-(mu+phi) * t_diff))))
mu_component[i] = phi / ((mu+phi) ** 2) * sum_1 - mu / (mu+phi) * sum_2
phi_component[i] = - mu / ((mu+phi) ** 2) * sum_1 - 1 / (mu+phi) * sum_2
else:
t_diff = np.diff(np.append(self.node_ts[i],self.T))
sum_1 = np.sum((np.exp(-(mu+phi) * t_diff) - 1))
sum_2 = np.sum(np.multiply(t_diff,np.exp(-(mu+phi) * t_diff)))
mu_component[i] = phi / ((mu+phi) ** 2) * sum_1 - mu / (mu+phi) * sum_2
phi_component[i] = - mu / ((mu+phi) ** 2) * sum_1 - 1 / (mu+phi) * sum_2
mu_prime_component = np.zeros(self.n2 if self.bipartite else self.n)
phi_prime_component = np.zeros(self.n2 if self.bipartite else self.n)
if self.directed:
for j in range(self.n2 if self.bipartite else self.n):
mu_prime = self.mu_prime[j]
phi_prime = self.phi_prime[j]
if j in self.node_ts_prime:
if self.hawkes_me:
## Repeat for mu_prime and phi_prime
t_diff_prime = self.T - self.node_ts_prime[j]
sum_1_prime = np.sum(np.exp(-(mu_prime+phi_prime) * t_diff_prime) - 1)
sum_2_prime = np.sum(np.multiply(t_diff_prime, np.exp(-(mu_prime+phi_prime) * t_diff_prime)))
mu_prime_component[j] = phi_prime / ((mu_prime+phi_prime) ** 2) * sum_1_prime - mu_prime / (mu_prime+phi_prime) * sum_2_prime
phi_prime_component[j] = - mu_prime / ((mu_prime+phi_prime) ** 2) * sum_1_prime - 1 / (mu_prime+phi_prime) * sum_2_prime
else:
t_diff_prime = np.diff(np.append(self.node_ts_prime[j],self.T))
sum_1_prime = np.sum((np.exp(-(mu_prime+phi_prime) * t_diff_prime) - 1))
sum_2_prime = np.sum(np.multiply(t_diff_prime,np.exp(-(mu_prime+phi_prime) * t_diff_prime)))
mu_prime_component[j] = phi_prime / ((mu_prime+phi_prime) ** 2) * sum_1_prime - mu_prime / (mu_prime+phi_prime) * sum_2_prime
phi_prime_component[j] = - mu_prime / ((mu_prime+phi_prime) ** 2) * sum_1_prime - 1 / (mu_prime+phi_prime) * sum_2_prime
## Calculate gradient from sparse matrices (indexed by edge)
for link in self.A:
## Update rows and columns
rows += [link[0]]
cols += [link[1]]
if not self.directed:
rows += [link[1]]
cols += [link[0]]
if self.main_effects:
## Updates for alpha and beta
kk = 0 if self.full_links else (self.T - self.Tau[link])
vals += (1 if self.directed else 2) * [- kk + np.sum([1.0 / self.zeta[link][k] for k in range(self.nij[link])])]
if not self.poisson_me:
## Obtain parameters
mu = self.mu[link[0]]
mu_prime = self.mu_prime[link[1]] if self.directed else self.mu[link[1]]
phi = self.phi[link[0]]
phi_prime = self.phi_prime[link[1]] if self.directed else self.phi[link[1]]
if self.hawkes_me:
## Updates for mu and phi
psi_sum = np.sum([self.psi[link][k] / self.zeta[link][k] for k in range(self.nij[link])])
psi_derivative_sum = np.sum([self.psi_derivative[link][k] / self.zeta[link][k] for k in range(self.nij[link])])
if not self.full_links:
t_diff = self.T - self.node_ts[link[0]]
t_diff_tau = np.maximum(0, self.Tau[link] - self.node_ts[link[0]])
## zero_out = (self.node_ts[link[0]] >= self.Tau[link]).astype(int)
sum_1 = np.sum(np.exp(-(mu+phi) * t_diff) - np.exp(-(mu+phi) * t_diff_tau))
sum_2 = np.sum((np.multiply(t_diff, np.exp(-(mu+phi) * t_diff)) - np.multiply(t_diff_tau,np.exp(-(mu+phi) * t_diff_tau))))
vals_mu += [phi / ((mu+phi) ** 2) * sum_1 - mu / (mu+phi) * sum_2 + psi_sum + mu * psi_derivative_sum]
vals_phi += [- mu / ((mu+phi) ** 2) * sum_1 - 1 / (mu+phi) * sum_2 + mu * psi_derivative_sum]
else:
vals_mu += [psi_sum + mu * psi_derivative_sum]
vals_phi += [mu * psi_derivative_sum]
## Repeat for mu_prime and phi_prime
psi_prime_sum = np.sum([self.psi_prime[link][k] / self.zeta[link][k] for k in range(self.nij[link])])
psi_prime_derivative_sum = np.sum([self.psi_prime_derivative[link][k] / self.zeta[link][k] for k in range(self.nij[link])])
if not self.full_links:
t_diff_prime = self.T - (self.node_ts_prime[link[1]] if self.directed else self.node_ts[link[1]])
t_diff_tau_prime = np.maximum(0, self.Tau[link] - (self.node_ts_prime[link[1]] if self.directed else self.node_ts[link[1]]))
## zero_out_prime = ((self.node_ts_prime[link[1]] if self.directed else self.node_ts[link[1]]) >= self.Tau[link]).astype(int)
sum_1_prime = np.sum(np.exp(-(mu_prime+phi_prime) * t_diff_prime) - np.exp(-(mu_prime+phi_prime) * t_diff_tau_prime))
sum_2_prime = np.sum((np.multiply(t_diff_prime, np.exp(-(mu_prime+phi_prime) * t_diff_prime)) - \
np.multiply(t_diff_tau_prime, np.exp(-(mu_prime+phi_prime) * t_diff_tau_prime))))
res_mu = [phi_prime / ((mu_prime+phi_prime) ** 2) * sum_1_prime - mu_prime / (mu_prime+phi_prime) * sum_2_prime + psi_prime_sum + mu_prime * psi_prime_derivative_sum]
res_phi = [- mu_prime / ((mu_prime+phi_prime) ** 2) * sum_1_prime - 1 / (mu_prime+phi_prime) * sum_2_prime + mu_prime * psi_prime_derivative_sum]
else:
res_mu = [psi_prime_sum + mu_prime * psi_prime_derivative_sum]
res_phi = [mu_prime * psi_prime_derivative_sum]
else:
## Updates for mu and phi
condition = (self.nij[link] != len(self.A_bar[link])) * (self.equal_start[link] if self.discrete else 1)
t_bar = self.A_bar[link]
t_diff = np.diff(np.append(self.node_ts[link[0]],self.T))
psi_sum = np.sum([np.exp(-(mu+phi) * t_bar[k - condition]) / self.zeta[link][k] for k in range(condition, self.nij[link])])
psi_derivative_sum = np.sum([np.exp(-(mu+phi) * t_bar[k - condition]) * t_bar[k - condition] | |
<reponame>morepath/dectate
import abc
import logging
import sys
import inspect
from .error import (
ConflictError,
ConfigError,
DirectiveError,
DirectiveReportError,
)
from .toposort import topological_sort
from .sentinel import NOT_FOUND
order_count = 0
class Configurable:
"""Object to which configuration actions apply.
This object is normally tucked away as the ``dectate`` class
attribute on an :class:`dectate.App` subclass.
Actions are registered per configurable during the import phase;
actions are not actually created or performed, so their
``__init__`` and ``perform`` are not yet called.
During the commit phase the configurable is executed. This expands
any composite actions, groups actions into action groups and sorts
them by depends so that they are executed in the correct order,
and then executes each action group, which performs them.
"""
app_class = None
def __init__(self, extends, config):
"""
:param extends:
the configurables that this configurable extends.
:type extends: list of configurables.
:param config:
the object that will contains the actual configuration.
Normally it's the ``config`` class attribute of the
:class:`dectate.App` subclass.
"""
self.extends = extends
self.config = config
# all action classes known
self._action_classes = {}
# directives used with configurable
self._directives = []
# have we ever been committed
self.committed = False
def register_directive(self, directive, obj):
"""Register a directive with this configurable.
Called during import time when directives are used.
:param directive: the directive instance, which contains
information about the arguments, line number it was invoked, etc.
:param obj: the object the directive was invoked on; typically
a function or a class it was invoked on as a decorator.
"""
self._directives.append((directive, obj))
def _fixup_directive_names(self):
"""Set up correct name for directives."""
app_class = self.app_class
for name, method in app_class.get_directive_methods():
func = method.__func__
func.__name__ = name
# As of Python 3.5, the repr of bound methods uses
# __qualname__ instead of __name__.
# See http://bugs.python.org/issue21389#msg217566
if hasattr(func, "__qualname__"):
func.__qualname__ = type(app_class).__name__ + "." + name
def get_action_classes(self):
"""Get all action classes registered for this app.
This includes action classes registered for its base class.
:return: a dict with action class keys and name values.
"""
result = {}
app_class = self.app_class
for name, method in app_class.get_directive_methods():
result[method.__func__.action_factory] = name
# add any action classes defined by base classes
for configurable in self.extends:
for action_class, name in configurable._action_classes.items():
if action_class not in result:
result[action_class] = name
return result
def setup(self):
"""Set up config object and action groups.
This happens during the start of the commit phase.
Takes inheritance of apps into account.
"""
self._fixup_directive_names()
self._action_classes = self.get_action_classes()
grouped_action_classes = sort_action_classes(
group_action_classes(self._action_classes.keys())
)
# delete any old configuration in case we run this a second time
for action_class in grouped_action_classes:
self.delete_config(action_class)
# now we create ActionGroup objects for each action class group
self._action_groups = d = {}
# and we track what config factories we've seen for consistency
# checking
self._factories_seen = {}
for action_class in grouped_action_classes:
self.setup_config(action_class)
d[action_class] = ActionGroup(
action_class, self.action_extends(action_class)
)
def setup_config(self, action_class):
"""Set up the config objects on the ``config`` attribute.
:param action_class: the action subclass to setup config for.
"""
# sort the items in order of creation
items = topological_sort(action_class.config.items(), factory_key)
# this introduces all dependencies, including those only
# mentioned in factory_arguments. we want to create those too
# if they weren't already created
seen = self._factories_seen
config = self.config
for name, factory in items:
# if we already have this set up, we don't want to create
# it anew
configured = getattr(config, name, None)
if configured is not None:
if seen[name] is not factory:
raise ConfigError(
"Inconsistent factories for config %r (%r and %r)"
% ((name, seen[name], factory))
)
continue
seen[name] = factory
kw = get_factory_arguments(
action_class, config, factory, self.app_class
)
setattr(config, name, factory(**kw))
def delete_config(self, action_class):
"""Delete config objects on the ``config`` attribute.
:param action_class: the action class subclass to delete config for.
"""
config = self.config
for name, factory in action_class.config.items():
if hasattr(config, name):
delattr(config, name)
factory_arguments = getattr(factory, "factory_arguments", None)
if factory_arguments is None:
continue
for name in factory_arguments.keys():
if hasattr(config, name):
delattr(config, name)
def group_actions(self):
"""Groups actions for this configurable into action groups."""
# turn directives into actions
actions = [
(directive.action(), obj) for (directive, obj) in self._directives
]
# add the actions for this configurable to the action group
d = self._action_groups
for action, obj in expand_actions(actions):
action_class = action.group_class
if action_class is None:
action_class = action.__class__
d[action_class].add(action, obj)
def get_action_group(self, action_class):
"""Return ActionGroup for ``action_class`` or ``None`` if not found.
:param action_class: the action class to find the action group of.
:return: an ``ActionGroup`` instance.
"""
return self._action_groups.get(action_class, None)
def action_extends(self, action_class):
"""Get ActionGroup for action class in ``extends``.
:param action_class: the action class
:return: list of ``ActionGroup`` instances that this action group
extends.
"""
return [
configurable._action_groups.get(
action_class, ActionGroup(action_class, [])
)
for configurable in self.extends
]
def execute(self):
"""Execute actions for configurable."""
self.app_class.clean()
self.setup()
self.group_actions()
for action_class in sort_action_classes(self._action_groups.keys()):
self._action_groups[action_class].execute(self)
self.committed = True
class ActionGroup:
"""A group of actions.
Grouped actions are all performed together.
Normally actions are grouped by their class, but actions can also
indicate another action class to group with using ``group_class``.
"""
def __init__(self, action_class, extends):
"""
:param action_class:
the action_class that identifies this action group.
:param extends:
list of action groups extended by this action group.
"""
self.action_class = action_class
self._actions = []
self._action_map = {}
self.extends = extends
def add(self, action, obj):
"""Add an action and the object this action is to be performed on.
:param action: an :class:`Action` instance.
:param obj: the function or class the action should be performed for.
"""
self._actions.append((action, obj))
def prepare(self, configurable):
"""Prepare the action group for a configurable.
Detect any conflicts between actions.
Merges in configuration of what this action extends.
:param configurable: The :class:`Configurable` option to prepare for.
"""
# check for conflicts and fill action map
discriminators = {}
self._action_map = action_map = {}
for action, obj in self._actions:
kw = action._get_config_kw(configurable)
id = action.identifier(**kw)
discs = [id]
discs.extend(action.discriminators(**kw))
for disc in discs:
other_action = discriminators.get(disc)
if other_action is not None:
raise ConflictError([action, other_action])
discriminators[disc] = action
action_map[id] = action, obj
# inherit from extends
for extend in self.extends:
self.combine(extend)
def get_actions(self):
"""Get all actions registered for this action group.
:return: list of action instances in registration order.
"""
result = list(self._action_map.values())
result.sort(key=lambda value: value[0].order or 0)
return result
def combine(self, actions):
"""Combine another prepared actions with this one.
Those configuration actions that would conflict are taken to
have precedence over those being combined with this one. This
allows the extending actions to override actions in
extended actions.
:param actions: list of :class:`ActionGroup` objects to
combine with this one.
"""
to_combine = actions._action_map.copy()
to_combine.update(self._action_map)
self._action_map = to_combine
def execute(self, configurable):
"""Perform actions for configurable.
:param configurable: the :class:`Configurable` instance to execute
the actions against.
"""
self.prepare(configurable)
kw = self.action_class._get_config_kw(configurable)
# run the group class before operation
self.action_class.before(**kw)
# perform the actual actions
for action, obj in self.get_actions():
try:
action._log(configurable, obj)
action.perform(obj, **kw)
except DirectiveError as e:
raise DirectiveReportError(f"{e}", action.code_info)
# run the group class after operation
self.action_class.after(**kw)
class Action(metaclass=abc.ABCMeta):
"""A configuration action.
Base class of configuration actions.
A configuration action is performed for an object (typically a
function or a class object) and affects one or more configuration
objects.
Actions can conflict with each other based on their identifier and
discriminators. Actions can override each other based on their
identifier. Actions can only be in conflict with actions of the
same action class or actions with the same ``action_group``.
"""
config = {}
"""Describe configuration.
A dict mapping configuration names to factory functions. The
resulting configuration objects are passed into
:meth:`Action.identifier`, :meth:`Action.discriminators`,
:meth:`Action.perform`, and :meth:`Action.before` and
:meth:`Action.after`.
After commit completes, the configured objects are found
as attributes on :class:`App.config`.
"""
app_class_arg = False
"""Pass in app class as argument.
In addition to the arguments defined in :attr:`Action.config`,
pass in the app class itself as an argument into
:meth:`Action.identifier`, :meth:`Action.discriminators`,
:meth:`Action.perform`, and :meth:`Action.before` and
:meth:`Action.after`.
| |
input variables must be non-zero \(0\) "
r"for the SwapFreq, MEMC_1Freq, MEMC_2Freq, and MEMC_3Freq. "
r"The SwapFreq, MEMC_1Freq, MEMC_2Freq, and MEMC_3Freq need to be zero "
r'\(0\) for the "NVT" and "NPT" ensembles.',
):
gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NVT",
10,
300,
check_input_files_exist=False,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.10,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: All the MC move input variables must be non-zero \(0\) "
r"for the SwapFreq, MEMC_1Freq, MEMC_2Freq, and MEMC_3Freq. "
r"The SwapFreq, MEMC_1Freq, MEMC_2Freq, and MEMC_3Freq need to be zero "
r'\(0\) for the "NVT" and "NPT" ensembles.',
):
gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NPT",
10,
300,
check_input_files_exist=False,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.00,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: All the MC move input variables must be non-zero \(0\) "
r"for the SwapFreq, MEMC_1Freq, MEMC_2Freq, and MEMC_3Freq. "
r"The SwapFreq, MEMC_1Freq, MEMC_2Freq, and MEMC_3Freq need to be zero "
r'\(0\) for the "NVT" and "NPT" ensembles.',
):
gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NPT",
10,
300,
check_input_files_exist=False,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.00,
},
)
with pytest.raises(
ValueError,
match=r"ERROR: All the MC move input variables must be non-zero \(0\) "
r"for the SwapFreq, MEMC_1Freq, MEMC_2Freq, and MEMC_3Freq. "
r"The SwapFreq, MEMC_1Freq, MEMC_2Freq, and MEMC_3Freq need to be zero "
r'\(0\) for the "NVT" and "NPT" ensembles.',
):
gomc_control.write_gomc_control_file(
charmm_NPT_NVT,
"test_save_NVT_bad_variables_part_8.conf",
"NPT",
10,
300,
check_input_files_exist=False,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.10,
},
)
# test good values of MEMC with GCMC
try:
value = gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
check_input_files_exist=False,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"ChemPot": {"ETH": -4000, "ETO": -8000},
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.10,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.10,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.00,
},
)
except:
value = "TEST_FAILED"
assert value == "GOMC_CONTROL_FILE_WRITTEN"
try:
value = gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
check_input_files_exist=False,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"ChemPot": {"ETH": -4000, "ETO": -8000},
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.10,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.00,
},
)
except:
value = "TEST_FAILED"
assert value == "GOMC_CONTROL_FILE_WRITTEN"
try:
value = gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
check_input_files_exist=False,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]]
],
"ChemPot": {"ETH": -4000, "ETO": -8000},
"DisFreq": 0.05,
"RotFreq": 0.05,
"IntraSwapFreq": 0.05,
"SwapFreq": 0.00,
"RegrowthFreq": 0.10,
"CrankShaftFreq": 0.05,
"VolFreq": 0.0,
"MultiParticleFreq": 0.1,
"IntraMEMC-1Freq": 0.10,
"MEMC-1Freq": 0.00,
"IntraMEMC-2Freq": 0.20,
"MEMC-2Freq": 0.00,
"IntraMEMC-3Freq": 0.20,
"MEMC-3Freq": 0.10,
},
)
except:
value = "TEST_FAILED"
assert value == "GOMC_CONTROL_FILE_WRITTEN"
# try all case unspecific values
try:
value = gomc_control.write_gomc_control_file(
charmm,
"test_save_NVT_bad_variables_part_8.conf",
"GCMC",
10,
300,
check_input_files_exist=False,
input_variables_dict={
"MEMC_DataInput": [
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "C2"]],
[1, "ETH", ["C1", "C2"], "ETO", ["C1", "O1"]],
],
"ChEmPot": {"ETH": -4000, "ETO": -8000},
"DisFreQ": 0.05,
"RotFreq": 0.05,
"InTraSwapFreq": 0.05,
"SwaPFreq": 0.00,
"ReGrowthFreq": 0.10,
"crankshaftfreq": 0.05,
"VolFrEQ": 0.0,
"MULtiParticleFreq": 0.1,
"IntRAMEMC-1Freq": 0.10,
"MEMC-1FREQ": 0.00,
"IntrAMEMC-2Freq": 0.20,
"MEMC-2FReq": 0.00,
"intramemc-3Freq": 0.20,
"memc-3Freq": 0.10,
},
)
except:
value = "TEST_FAILED"
assert value == "GOMC_CONTROL_FILE_WRITTEN"
def test_charmm_object_has_proper_no_boxes_for_ensemble_part_9(
self, ethane_gomc, ethanol_gomc
):
test_box_ethane_ethanol_liq = mb.fill_box(
compound=[ethane_gomc, ethanol_gomc],
n_compounds=[4, 4],
box=[4.0, 4.0, 4.0],
)
test_box_ethane_ethanol_vap = mb.fill_box(
compound=[ethane_gomc, ethanol_gomc],
n_compounds=[1, 1],
box=[8.0, 8.0, 8.0],
)
charmm_one_box = Charmm(
test_box_ethane_ethanol_liq,
"ethane_ethanol_1_box_liq",
ff_filename="ethane_ethanol",
residues=[ethane_gomc.name, ethanol_gomc.name],
forcefield_selection="oplsaa",
)
charmm_two_boxes = Charmm(
test_box_ethane_ethanol_liq,
"ethane_ethanol_2_boxes_liq",
structure_box_1=test_box_ethane_ethanol_vap,
filename_box_1="ethane_box_2_boxes_vap",
ff_filename="ethane_ethanol",
residues=[ethane_gomc.name, ethanol_gomc.name],
forcefield_selection="oplsaa",
)
# test that it fails with the GEMC_NVT with only 1 box in the Charmm object
with pytest.raises(
ValueError,
match=r"ERROR: The ensemble type selection of {} is using a Charmm "
r"object with one simulation boxes, and the {} ensemble only accepts "
r"two boxes \(box 0 and box 1\).".format("GEMC_NVT", "GEMC_NVT"),
):
gomc_control.write_gomc_control_file(
charmm_one_box,
"test_charmm_object_has_proper_no_boxes_for_ensemble_part_9_1_box",
"GEMC_NVT",
100,
300,
check_input_files_exist=False,
)
# test that it fails with the GEMC_NPT with only 1 box in the Charmm object
with pytest.raises(
ValueError,
match=r"ERROR: The ensemble type selection of {} is using a Charmm "
r"object with one simulation boxes, and the {} ensemble only accepts "
r"two boxes \(box 0 and box 1\).".format("GEMC_NPT", "GEMC_NPT"),
):
gomc_control.write_gomc_control_file(
charmm_one_box,
"test_charmm_object_has_proper_no_boxes_for_ensemble_part_9_1_box",
"GEMC_NPT",
100,
300,
check_input_files_exist=False,
)
# test that it fails with the GCMC with only 1 box in the Charmm object
with pytest.raises(
ValueError,
match=r"ERROR: The ensemble type selection of {} is using a Charmm "
r"object with one simulation boxes, and the {} ensemble only accepts "
r"two boxes \(box 0 and box 1\).".format("GCMC", "GCMC"),
):
gomc_control.write_gomc_control_file(
charmm_one_box,
"test_charmm_object_has_proper_no_boxes_for_ensemble_part_9_1_box",
"GCMC",
100,
300,
check_input_files_exist=False,
)
# test that it fails with the NVT with 2 boxes in the Charmm object
with pytest.raises(
ValueError,
match=r"ERROR: The ensemble type selection of {} is using a Charmm "
r"object with two simulation boxes, and the {} ensemble only accepts "
r"one box \(box 0\).".format("NVT", "NVT"),
):
gomc_control.write_gomc_control_file(
charmm_two_boxes,
"test_charmm_object_has_proper_no_boxes_for_ensemble_part_9_1_box",
"NVT",
100,
300,
check_input_files_exist=False,
)
# test that it fails with the NPT with 2 boxes in the Charmm object
with pytest.raises(
ValueError,
match=r"ERROR: The ensemble type selection of {} is using a Charmm "
r"object with two simulation boxes, and the {} ensemble only accepts "
r"one box \(box 0\).".format("NPT", "NPT"),
):
gomc_control.write_gomc_control_file(
charmm_two_boxes,
"test_charmm_object_has_proper_no_boxes_for_ensemble_part_9_1_box",
"NPT",
100,
300,
check_input_files_exist=False,
)
def test_save_non_othoganol_writer(self):
lattice_cif_ETV_triclinic = load_cif(
file_or_path=get_fn("ETV_triclinic.cif")
)
ETV_triclinic_1_cell = lattice_cif_ETV_triclinic.populate(x=1, y=1, z=1)
ETV_triclinic_1_cell.name = "ETV_1"
ETV_triclinic_3_cell = lattice_cif_ETV_triclinic.populate(x=3, y=3, z=3)
ETV_triclinic_3_cell.name = "ETV_3"
charmm = Charmm(
ETV_triclinic_1_cell,
"ETV_triclinic_1_cell_box_0",
structure_box_1=ETV_triclinic_3_cell,
filename_box_1="ETV_triclinic_3_cell_box_1",
ff_filename="ETV_triclinic_FF",
forcefield_selection={
ETV_triclinic_1_cell.name: get_fn(
"Charmm_writer_testing_only_zeolite.xml"
),
ETV_triclinic_3_cell.name: get_fn(
"Charmm_writer_testing_only_zeolite.xml"
),
},
residues=[ETV_triclinic_1_cell.name, ETV_triclinic_3_cell.name],
bead_to_atom_name_dict=None,
fix_residue=[ETV_triclinic_1_cell.name, ETV_triclinic_3_cell.name],
)
gomc_control.write_gomc_control_file(
charmm,
"test_save_non_othoganol_writer.conf",
"GEMC_NVT",
100000,
300,
check_input_files_exist=False,
)
with open("test_save_non_othoganol_writer.conf", "r") as fp:
cell_vector_box_0_1_read = False
cell_vector_box_0_2_read = False
cell_vector_box_0_3_read = False
cell_vector_box_1_1_read = False
cell_vector_box_1_2_read = False
cell_vector_box_1_3_read = False
out_gomc = fp.readlines()
for i, line in enumerate(out_gomc):
if line.startswith("CellBasisVector1 0"):
cell_vector_box_0_1_read = True
split_line = line.split()
assert split_line[1] == "0"
assert split_line[2] == "8.7503"
assert split_line[3] == "0.0"
assert split_line[4] == "0.0"
elif line.startswith("CellBasisVector2 0"):
cell_vector_box_0_2_read = True
split_line = line.split()
assert split_line[1] == "0"
assert split_line[2] == "-1.179131"
assert split_line[3] == "9.575585"
assert split_line[4] == "0.0"
elif line.startswith("CellBasisVector3 0"):
cell_vector_box_0_3_read = True
split_line = line.split()
assert split_line[1] == "0"
assert split_line[2] == "-1.817231"
assert split_line[3] == "-3.027821"
assert split_line[4] == "9.645823"
if line.startswith("CellBasisVector1 1"):
cell_vector_box_1_1_read = True
split_line = line.split()
assert split_line[1] == "1"
assert split_line[2] == "26.2509"
assert split_line[3] == "0.0"
assert split_line[4] == "0.0"
elif line.startswith("CellBasisVector2 1"):
cell_vector_box_1_2_read = True
split_line = line.split()
assert split_line[1] == "1"
assert split_line[2] == "-3.537381"
assert split_line[3] == "28.726735"
assert split_line[4] == "0.0"
elif line.startswith("CellBasisVector3 1"):
cell_vector_box_1_3_read = True
split_line = line.split()
assert split_line[1] == "1"
assert split_line[2] == "-5.451699"
assert split_line[3] == "-9.083469"
assert split_line[4] == "28.937455"
else:
pass
assert cell_vector_box_0_1_read == True
assert cell_vector_box_0_2_read == True
assert cell_vector_box_0_3_read == True
assert cell_vector_box_1_1_read == True
assert cell_vector_box_1_2_read == True
assert cell_vector_box_1_3_read == True
def test_box_vector_too_many_char(self):
methane = mb.Compound(name="MET")
methane_child_bead = mb.Compound(name="_CH4")
methane.add(methane_child_bead, inherit_periodicity=False)
methane_box_orth = mb.fill_box(
compound=methane, n_compounds=10, box=[1, 2, 3]
)
charmm_bad_box_0 = Charmm(
methane_box_orth,
"methane_box_0_orth",
ff_filename="methane_box_orth_bad_box_0_non_orth",
residues=[methane.name],
forcefield_selection="trappe-ua",
)
# set the vectors all too long
charmm_bad_box_0.box_0_vectors[0][0] = -0.45678901234561
charmm_bad_box_0.box_0_vectors[0][1] = -0.45678901234562
charmm_bad_box_0.box_0_vectors[0][2] = -0.45678901234563
charmm_bad_box_0.box_0_vectors[1][0] = -0.45678901234564
| |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import random
import hashlib
import base64
import json
import urllib
import traceback
import sys
import time
import calendar
import re
# 这段代码是用于解决中文报错的问题
reload(sys)
sys.setdefaultencoding("utf8")
from datetime import date
import datetime
from lxml import etree
from dateutil.relativedelta import relativedelta
if __name__ == '__main__':
sys.path.append('../..')
sys.path.append('../../..')
sys.path.append('../../../..')
from crawler.base_crawler import BaseCrawler
from worker.crawler.china_telecom_tool import login_unity
else:
from worker.crawler.base_crawler import BaseCrawler
from worker.crawler.china_telecom_tool import login_unity
class Crawler(BaseCrawler):
"""
kwargs 包含
'tel': str,
'pin_pwd': str,
'id_card': str,
'full_name': unicode,
'sms_code': str,
'captcha_code': str
錯誤等級
0: 成功
1: 帳號密碼錯誤
2: 認證碼錯誤
9: 其他錯誤
"""
def __init__(self, **kwargs):
"""
初始化
"""
super(Crawler, self).__init__(**kwargs)
def need_parameters(self, **kwargs):
# return ['pin_pwd', 'captcha_verify']
return ['pin_pwd']
def get_verify_type(self, **kwargs):
return ''
def login(self, **kwargs):
ProvinceID = '01'
code, key = login_unity(self, ProvinceID, **kwargs)
if code != 0:
return code, key
"""
手动重定向至北京电信URL:
"""
cookie_url = "http://www.189.cn/login/sso/ecs.do?method=linkTo&platNo=10001&toStUrl=http://bj.189.cn/iframe/feequery/detailBillIndex.action"
code, key, resp = self.get(cookie_url)
if code != 0:
return code, key
return 0, "success"
def send_verify_request(self, **kwargs):
"""
請求發送短信,或是下載圖片,或是同時發送請求
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
image_str: str, Captcha圖片的base64字串, SMS則回空
"""
send_sms_url = "http://bj.189.cn/iframe/feequery/smsRandCodeSend.action"
send_sms_data = {
'accNum': kwargs['tel']
}
header = {'Referer': 'http://bj.189.cn/iframe/feequery/detailBillIndex.action?fastcode=01390638&cityCode=bj'}
code, key, resp = self.post(send_sms_url, data=send_sms_data, headers=header)
if code != 0:
return code, key, ''
if 'SRandomCode' in resp.text:
try:
send_sms_res = json.loads(resp.text)
except:
error = traceback.format_exc()
message = 'json_error :%s' % error
self.log('crawler', message, resp)
return 9, "json_error", ""
self.sms_code = send_sms_res['SRandomCode']
# print self.sms_code
if send_sms_res['tip'] == u'对不起,当日使用随机短信次数过多。无法继续发送。':
self.log('crawler', 'over_max_sms_error', resp)
return 9, "over_max_sms_error", ""
return 0, "success", ""
elif u'查询失败' in resp.text:
self.log('crawler', 'request_error', resp)
return 9, "request_error", ""
else:
self.log('crawler', 'unknown_error', resp)
return 9, "unknown_error", ""
def verify(self, **kwargs):
"""
執行二次驗證
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
"""
check_sms_url = "http://bj.189.cn/iframe/feequery/detailValidCode.action"
check_sms_data = {
"requestFlag": 'asynchronism',
"accNum": kwargs['tel'],
"sRandomCode": self.sms_code
}
code, key, resp = self.post(check_sms_url, data=check_sms_data)
if code != 0:
return code, key
try:
check_sms_res = json.loads(resp.text)
except:
error = traceback.format_exc()
message = 'json_error : %s' % error
self.log('crawler', message, resp)
return 9, "json_error"
if check_sms_res['ILoginType'] == 4 and check_sms_res['billDetailValidate'] == "true":
return 0, "success"
elif check_sms_res['tip'] == u"随机短信码错误":
self.log('crawler', 'verify_error', resp)
return 2, "verify_error"
elif check_sms_res['billDetailValidate'] == -1:
self.log('crawler', 'user_exit', resp)
return 9, "user_exit"
else:
self.log('crawler', 'crawl_error', resp)
return 9, "crawl_error"
def crawl_info(self, **kwargs):
"""
爬取帳戶資訊
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
info: dict, 帳戶信息,參考帳戶信息格式
"""
change_info_url = "http://bj.189.cn/iframe/custservice/modifyUserInfo.action?fastcode=10000181&cityCode=bj"
headers = {
"Referer": "http://www.189.cn/dqmh/my189/initMy189home.do",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8"
}
code, key, resp = self.get(change_info_url, headers=headers)
if code != 0:
return code, key, {}
full_name = ""
open_date = ""
try:
et = etree.HTML(resp.text)
info_list = et.xpath("//td[@class='tl']/text()")
full_name = info_list[0]
open_date = info_list[-1]
time_type = time.strptime(open_date, "%Y-%m-%d %H:%M:%S")
open_date = str(int(time.mktime(time_type)))
except:
error = traceback.format_exc()
self.log("crawler", 'html_error{}'.format(error), resp)
return 9, 'html_error', {}
user_info = {}
info_url = 'http://www.189.cn/dqmh/userCenter/userInfo.do?method=editUserInfo_new&fastcode=&cityCode=bj'
code, key, resp = self.get(info_url)
if code != 0:
return code, key, {}
try:
selector = etree.HTML(resp.text)
user_info['full_name'] = full_name
user_info['open_date'] = open_date
if '<option selected="selected" value="1">身份证</option>' in resp.text.encode('utf8'):
id_card = selector.xpath('//input[@name="certificateNumber"]/@value')
user_info['id_card'] = id_card[0] if id_card else ''
else:
user_info['id_card'] = ''
address = selector.xpath('//*[@id="address"]/text()')
user_info['address'] = address[0] if address else ''
except:
error = traceback.format_exc()
self.log('crawler', 'html_error :%s' % error, resp)
return 9, 'html_error', {}
return 0, "success", user_info
def time_stamp(self,old_time):
temp_month = re.findall('-(\d)-', old_time)
if temp_month:
old_time = re.sub('-\d-', '0' + temp_month[0] + '-', old_time)
temp_day = re.findall('-(\d)\s', old_time)
if temp_day:
old_time = re.sub('-\d\s', '0' + temp_day[0], old_time)
call_time = re.findall('\d{2}', old_time)
call_time_change = call_time[0] + call_time[1] + '-' + call_time[2] + '-' + call_time[3] + ' ' + call_time[
4] + ':' + call_time[5] + ':' + call_time[6]
timeArray = time.strptime(call_time_change, "%Y-%m-%d %H:%M:%S")
call_time_timeStamp = str(int(time.mktime(timeArray)))
return call_time_timeStamp
def random_sleep(self, tm, page=1, modulus=3):
time.sleep(random.uniform(tm / page / modulus / 1.5, 1.5 * tm / page / modulus))
def crawl_call_log(self, **kwargs):
"""
爬取詳單
return
status_key: str, 狀態碼金鑰,參考status_code
level: int, 錯誤等級
message: unicode, 詳細的錯誤信息
call_log: list, 通信詳單,參考詳單格式
"""
missing_list = []
possibly_missing_list = []
part_missing_list = []
crawler_list = 0
call_log = []
call_log_url = "http://bj.189.cn/iframe/feequery/billDetailQuery.action"
today = date.today()
search_month = [x for x in range(0, -6, -1)]
for each_month in search_month:
query_date = today + relativedelta(months=each_month)
call_month = "%d%02d" % (query_date.year,query_date.month)
query_month = '%d年%02d月'%(query_date.year,query_date.month)
endDate = calendar.monthrange(query_date.year, query_date.month)[1]
call_log_data ={
"requestFlag": 'synchronization',
"billDetailType": 1,
"qryMonth": query_month,
"startTime":'1',
"accNum":kwargs['tel'],
"endTime":str(endDate),
}
start_time = time.time()
end_time = start_time + 12
aid_time_dict = dict()
retry_times = self.max_retry
log_for_retry = []
while 1:
log_for_retry.append((1, retry_times))
retry_times -= 1
code, key, resp = self.post(call_log_url, data=call_log_data)
if code:
missing_flag = True
elif u'该号码资料不存在' in resp.text:
self.log('user', 'user_prohibited_error', resp)
return 9, 'user_prohibited_error', [], [], []
elif u'尊敬的用户,您好,随机密码登录用户 无权限访问此功能' in resp.text:
self.log('website', 'website_busy_error', resp)
return 9, 'website_busy_error', call_log, missing_list, possibly_missing_list
elif u'用户设置了详单禁查!' in resp.text:
self.log('user', 'user_prohibited_error', resp)
return 9, 'user_prohibited_error', [], [], []
elif u'抱歉!查询失败,请稍后重试' in resp.text:
self.log('website', 'website_busy_error', resp)
missing_flag = True
elif u"未查询到该帐期详单!" in resp.text or u'未查询到您的详单信息' in resp.text:
missing_flag = False
elif u"系统正忙" in resp.text:
self.log('website', 'website_busy_error', resp)
missing_flag = True
elif u'调用后台详单查询方法出错!' in resp.text:
self.log('website', 'website_busy_error', resp)
missing_flag = True
else:
flag = True
break
now_time = time.time()
if retry_times >= 0:
aid_time_dict.update({retry_times: time.time()})
elif now_time < end_time:
loop_time = aid_time_dict.get(0, time.time())
left_time = end_time - loop_time
self.random_sleep(left_time)
else:
flag = False
if missing_flag:
missing_list.append(call_month)
else:
possibly_missing_list.append(call_month)
break
if not flag:
self.log('crawler', '{}重试记录{}'.format(call_month, log_for_retry), '')
continue
# for retry in xrange(self.max_retry):
# code, key, resp = self.post(call_log_url, data=call_log_data)
# if code != 0:
# missing_flag = True
# elif u"未查询到该帐期详单" in resp.text or u'未查询到您的详单信息' in resp.text:
# missing_flag = False
# elif u"系统正忙" in resp.text:
# missing_flag = True
# else:
# break
# else:
# if missing_flag:
# missing_list.append(call_month)
# else:
# self.log('crawler', '未查询到您的详单信息', resp)
# possibly_missing_list.append(call_month)
# continue
# if u'该号码资料不存在' in resp.text:
# self.log('user', 'user_prohibited_error', resp)
# return 9, 'user_prohibited_error', [], [], []
# if u'尊敬的用户,您好,随机密码登录用户 无权限访问此功能' in resp.text:
# self.log('website', 'website_busy_error', resp)
# return 9, 'website_busy_error', call_log, missing_list, possibly_missing_list
# if u'抱歉!查询失败,请稍后重试' in resp.text:
# self.log('website', 'website_busy_error', resp)
# missing_list.append(call_month)
# continue
# if u'用户设置了详单禁查!' in resp.text:
# self.log('user', 'user_prohibited_error', resp)
# return 9, 'user_prohibited_error', [], [], []
# # with open(call_month, 'w')as f:
# # f.write(resp.text)
key, level, message, data, page_num = self.call_log_get(resp.text, call_month)
if level != 0:
self.log('crawler', message, resp)
if data:
part_missing_list.append(call_month)
call_log.extend(data)
else:
missing_list.append(call_month)
crawler_list += 1
self.log('crawler', '{}重试记录{}'.format(call_month, log_for_retry), '')
continue
call_log.extend(data)
if page_num <= 1:
self.log('crawler', '{}重试记录{}'.format(call_month, log_for_retry), '')
continue
page_list = list(range(2, page_num + 1))
page_and_retry = [(page, self.max_retry) for page in page_list]
while page_and_retry:
page, retry_times = page_and_retry.pop(0)
log_for_retry.append((page, retry_times))
retry_times -= 1
call_log_data['billPage'] = page
code, key, resp = self.post(call_log_url, data=call_log_data)
if not code:
key, level, message, data, page = self.call_log_get(resp.text, call_month)
if not level:
call_log.extend(data)
continue
else:
crawler_list += 1
self.log('crawler', message, resp)
part_missing_list.append(call_month) if call_month not in part_missing_list else None
if data:
call_log.extend(data)
continue
now_time = time.time()
if retry_times >= 0:
page_and_retry.append((page, retry_times))
aid_time_dict.update({retry_times: time.time()})
elif now_time < end_time:
page_and_retry.append((page, retry_times))
loop_time = aid_time_dict.get(0, time.time())
left_time = end_time - loop_time
self.random_sleep(left_time, len(page_and_retry))
else:
part_missing_list.append(call_month) if call_month not in part_missing_list else None
self.log('crawler', '{}重试记录{}'.format(call_month, log_for_retry), '')
self.log("crawler", "缺失: {}, 可能缺失: {}, 部分缺失: {}".format(missing_list, possibly_missing_list, part_missing_list), "")
if len(missing_list + possibly_missing_list) == 6:
if crawler_list > 0:
return 9, 'crawl_error', call_log, missing_list, possibly_missing_list, part_missing_list
return 9, 'website_busy_error', call_log, missing_list, possibly_missing_list, part_missing_list
return 0, "success", call_log, missing_list, possibly_missing_list, part_missing_list
def call_log_get(self, resp, call_month):
"""
| `update_time` | string | 更新时间戳 |
| `call_cost` | string | 爬取费用 |
| `call_time` | string | 通话起始时间 |
| `call_method` | string | 呼叫类型(主叫, 被叫) |
| `call_type` | string | 通话类型(本地, 长途) |
| `call_from` | string | 本机通话地 |
| `call_to` | string | 对方归属地 |
| `call_duration` | string | 通话时长 |
"""
page_number = 0
records = []
try:
selector = etree.HTML(resp.decode('utf-8'))
call_form = selector.xpath('//table[@class="ued-table"]/tr')
if not call_form:
return 'html_error', 9, 'html_error', records, page_number
for item in call_form[1:-2]:
data = {}
data['month'] = call_month
data['update_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
data['call_cost'] = item.xpath('.//td[10]/text()')[0]
# 以下几行转换成时间戳
call_time = re.findall('\d{2}', item.xpath('.//td[6]/text()')[0])
call_time_change = | |
clsend airecv;
requestAnimEcho(string) broadcast;
removeBeans(int8 []) clsend airecv;
removeBeansEcho(int8 []) broadcast;
};
dclass DistributedPartyValentineTrampolineActivity : DistributedPartyTrampolineActivity {
};
dclass DistributedPartyVictoryTrampolineActivity : DistributedPartyTrampolineActivity {
};
dclass DistributedPartyWinterTrampolineActivity : DistributedPartyTrampolineActivity {
};
dclass DistributedPartyTugOfWarActivity : DistributedPartyTeamActivity {
reportKeyRateForce(uint32, int16/100) airecv clsend;
reportFallIn(uint8) airecv clsend;
setToonsPlaying(uint32 [0-4], uint32 [0-4]) required broadcast ram;
updateToonKeyRate(uint32, uint32) broadcast;
updateToonPositions(int16/1000) broadcast;
};
dclass DeleteManager : DistributedObject {
setInventory(blob) airecv clsend;
};
struct weeklyCalendarHoliday {
uint8 holidayId;
uint8 dayOfTheWeek;
};
struct yearlyCalendarHoliday {
uint8 holidayId;
uint8[] firstStartTime;
uint8[] lastEndTime;
};
struct oncelyCalendarHoliday {
uint8 holidayId;
uint16[] firstStartTime;
uint16[] lastEndTime;
};
struct relativelyCalendarHoliday {
uint8 holidayId;
uint16[] firstStartTime;
uint16[] lastEndTime;
};
struct startAndEndTime {
uint16[] startTime;
uint16[] endTime;
};
struct multipleStartHoliday {
uint8 holidayId;
startAndEndTime times[];
};
dclass NewsManager : DistributedObject {
setPopulation(uint32) broadcast ram;
setBingoWin(uint32) broadcast ram;
setBingoStart() broadcast;
setBingoOngoing() broadcast;
setBingoEnd() broadcast;
setCircuitRaceStart() broadcast;
setCircuitRaceOngoing() broadcast;
setCircuitRaceEnd() broadcast;
setTrolleyHolidayStart() broadcast;
setTrolleyHolidayOngoing() broadcast;
setTrolleyHolidayEnd() broadcast;
setTrolleyWeekendStart() broadcast;
setTrolleyWeekendOngoing() broadcast;
setTrolleyWeekendEnd() broadcast;
setMoreXpHolidayStart() broadcast;
setMoreXpHolidayOngoing() broadcast;
setMoreXpHolidayEnd() broadcast;
setRoamingTrialerWeekendStart() broadcast;
setRoamingTrialerWeekendOngoing() broadcast;
setRoamingTrialerWeekendEnd() broadcast;
setInvasionStatus(uint8, string, uint32, uint8) broadcast;
setHolidayIdList(uint32[]) broadcast ram;
holidayNotify() broadcast;
setWeeklyCalendarHolidays(weeklyCalendarHoliday []) required broadcast ram;
setYearlyCalendarHolidays(yearlyCalendarHoliday []) required broadcast ram;
setOncelyCalendarHolidays(oncelyCalendarHoliday []) required broadcast ram;
setRelativelyCalendarHolidays(relativelyCalendarHoliday []) required broadcast ram;
setMultipleStartHolidays(multipleStartHoliday []) required broadcast ram;
sendSystemMessage(string, uint8) broadcast ram;
};
dclass PurchaseManager : DistributedObject {
setPlayerIds(uint32, uint32, uint32, uint32) required broadcast ram;
setNewbieIds(uint32[]) required broadcast ram;
setMinigamePoints(uint8, uint8, uint8, uint8) required broadcast ram;
setPlayerMoney(uint16, uint16, uint16, uint16) required broadcast ram;
setPlayerStates(uint8, uint8, uint8, uint8) required broadcast ram;
setCountdown(int16) required broadcast ram;
setMetagameRound(int8) required broadcast ram;
setVotesArray(int16[]) required broadcast ram;
requestExit() airecv clsend;
requestPlayAgain() airecv clsend;
setInventory(blob, int16, uint8) airecv clsend;
setPurchaseExit() broadcast;
};
dclass NewbiePurchaseManager : PurchaseManager {
setOwnedNewbieId(uint32) required broadcast ram;
};
dclass SafeZoneManager : DistributedObject {
enterSafeZone() airecv clsend;
exitSafeZone() airecv clsend;
};
dclass TutorialManager : DistributedObject {
requestTutorial() airecv clsend;
rejectTutorial() airecv clsend;
requestSkipTutorial() airecv clsend;
skipTutorialResponse(uint8);
enterTutorial(uint32, uint32, uint32, uint32);
allDone() airecv clsend;
toonArrived() airecv clsend;
};
dclass CatalogManager : DistributedObject {
startCatalog() airecv clsend;
fetchPopularItems() airecv clsend;
setPopularItems(blob);
};
dclass DistributedMyTest : DistributedObject {
setMyTest(uint16) broadcast;
};
dclass DistributedTreasure : DistributedObject {
setTreasureType(uint16) required broadcast ram;
setPosition(int16/10, int16/10, int16/10) required broadcast ram;
requestGrab() airecv clsend;
setGrab(uint32) broadcast ram;
setReject() broadcast;
};
dclass DistributedSZTreasure : DistributedTreasure {
};
dclass DistributedEFlyingTreasure : DistributedSZTreasure {
};
dclass DistributedCashbotBossTreasure : DistributedTreasure {
setGoonId(uint32) required broadcast ram;
setFinalPosition(int16/10, int16/10, int16/10) required broadcast ram;
setStyle(uint16) required broadcast ram;
};
dclass DistributedLargeBlobSender : DistributedObject {
setMode(uint8) required broadcast ram;
setTargetAvId(uint32) required broadcast ram;
setChunk(blob);
setFilename(string);
setAck() airecv clsend;
};
dclass DistributedLevel : DistributedObject {
setLevelZoneId(uint32) required broadcast ram;
setPlayerIds(uint32[]) required broadcast ram;
setEntranceId(uint8) required broadcast ram;
setZoneIds(uint32[]) broadcast ram;
setStartTimestamp(int32) broadcast ram;
setOuch(uint8) airecv clsend;
requestCurrentLevelSpec(string, string) airecv clsend;
setSpecDeny(blob);
setSpecSenderDoId(uint32);
setAttribChange(uint32, blob, blob, blob) broadcast;
};
dclass DistributedEntity : DistributedObject {
setLevelDoId(uint32) required broadcast ram;
setEntId(uint32) required broadcast ram;
};
dclass DistributedInteractiveEntity : DistributedEntity {
setAvatarInteract(uint32) required broadcast ram;
requestInteract() airecv clsend;
rejectInteract();
requestExit() airecv clsend;
avatarExit(uint32) broadcast;
setState(string, int32) required broadcast ram;
};
dclass DistributedTrophyMgr : DistributedObject {
requestTrophyScore() airecv clsend;
};
dclass DistributedBuilding : DistributedObject {
setBlock(uint16, uint32) required broadcast ram;
setSuitData(int8, int8, int8) required broadcast ram;
setVictorList(uint32[]) broadcast ram;
setState(string, int16) broadcast ram;
setVictorReady() airecv clsend;
};
dclass DistributedAnimBuilding : DistributedBuilding {
};
dclass DistributedToonInterior : DistributedObject {
setZoneIdAndBlock(uint32, uint16) required broadcast ram;
setToonData(blob) required broadcast ram;
setState(string, int16) required broadcast ram;
nextSnowmanHeadPart() clsend airecv;
};
dclass DistributedToonHallInterior : DistributedToonInterior {
};
dclass DistributedSuitInterior : DistributedObject {
setZoneId(uint32) required broadcast ram;
setExtZoneId(uint32) required broadcast ram;
setDistBldgDoId(uint32) required broadcast ram;
setNumFloors(int8) required broadcast ram;
setToons(uint32[], uint16) broadcast ram;
setSuits(uint32[], uint32[], uint16[]) broadcast ram;
setState(string, int16) required broadcast ram;
setAvatarJoined() airecv clsend;
elevatorDone() airecv clsend;
reserveJoinDone() airecv clsend;
};
dclass DistributedCogdoBarrel : DistributedObject {
requestGrab() airecv clsend;
setIndex(uint32) required broadcast ram;
setState(uint32) required broadcast ram;
setGrab(uint32) broadcast ram;
setReject() broadcast;
};
dclass DistributedCogdoInterior : DistributedObject {
setZoneId(uint32) required broadcast ram;
setExtZoneId(uint32) required broadcast ram;
setDistBldgDoId(uint32) required broadcast ram;
setNumFloors(int8) required broadcast ram;
setShopOwnerNpcId(uint32) required broadcast ram;
setSOSNpcId(uint32) broadcast ram;
setFOType(int8) broadcast ram;
setToons(uint32[], uint16) broadcast ram;
setSuits(uint32[], uint32[], uint16[]) broadcast ram;
setState(string, int16) required broadcast ram;
setAvatarJoined() airecv clsend;
elevatorDone() airecv clsend;
reserveJoinDone() airecv clsend;
toonLeftBarrelRoom() airecv clsend;
toonBarrelRoomIntroDone() airecv clsend;
setBarrelRoomReward(uint32 [], uint8 []) broadcast;
toonBarrelRoomRewardDone() airecv clsend;
};
dclass DistributedCogdoBattleBldg : DistributedBattleBldg {
};
dclass DistCogdoGame : DistributedObject {
setInteriorId(uint32) required broadcast ram;
setExteriorZone(uint32) broadcast ram required;
setDifficultyOverrides(int32, int32) broadcast ram required;
setVisible() broadcast;
setIntroStart() broadcast;
setToonSad(uint32) broadcast;
setToonDisconnect(uint32) broadcast;
setAvatarReady() airecv clsend;
setGameStart(int16) broadcast;
setGameFinish(int16) broadcast;
};
dclass DistCogdoLevelGame : DistributedLevel, DistCogdoGame {
};
dclass DistCogdoMazeGame : DistCogdoGame {
requestAction(uint8, uint32) airecv clsend;
doAction(uint8, uint32, int16) broadcast;
setNumSuits(uint8 [3]) required broadcast;
requestUseGag(int16/10, int16/10, int16/10, int16) clsend airecv;
toonUsedGag(uint32, int16/10, int16/10, int16/10, int16) broadcast;
requestSuitHitByGag(uint8, uint8) clsend airecv;
suitHitByGag(uint32, uint8, uint8) broadcast;
requestHitBySuit(uint8, uint8, int16) clsend airecv;
toonHitBySuit(uint32, uint8, uint8, int16) broadcast;
requestHitByDrop() clsend airecv;
toonHitByDrop(uint32) broadcast;
requestPickUp(uint8) clsend airecv;
pickUp(uint32, uint8, int16) broadcast;
requestGag(uint8) clsend airecv;
hasGag(uint32, int16) broadcast;
};
dclass DistCogdoFlyingGame : DistCogdoGame {
requestAction(uint8, uint8) airecv clsend;
requestPickUp(uint16, uint8) airecv clsend;
pickUp(uint32, uint16, int16) broadcast;
debuffPowerup(uint32, uint16, int16) broadcast;
doAction(uint8, uint32) broadcast;
eagleExitCooldown(uint32, int16) broadcast;
toonSetAsEagleTarget(uint32, uint8, int16) broadcast;
toonClearAsEagleTarget(uint32, uint8, int16) broadcast;
toonDied(uint32, int32) broadcast;
toonSpawn(uint32, int32) broadcast;
toonSetBlades(uint32, int32) broadcast;
toonBladeLost(uint32) broadcast;
};
dclass DistCogdoBoardroomGame : DistCogdoLevelGame {
};
dclass DistCogdoCraneGame : DistCogdoLevelGame {
};
dclass DistCogdoCrane : DistributedObject {
setCraneGameId(uint32) required broadcast ram;
setIndex(uint8) required broadcast ram;
setState(char, uint32) broadcast ram;
clearSmoothing(int8) broadcast clsend;
setCablePos(uint8, int16/100, uint16%360/100, LinkPosition [3], int16) broadcast clsend;
};
dclass DistCogdoCraneObject : DistributedObject {
setCraneGameId(uint32) required broadcast ram;
setObjectState(char, uint32, uint32) broadcast ram;
requestGrab() airecv clsend;
rejectGrab();
requestDrop() airecv clsend;
hitFloor() clsend;
requestFree(int16/10, int16/10, int16/10, uint16%360/100) airecv clsend;
hitBoss(uint16/255) airecv clsend;
setX(int16/10) broadcast ram clsend airecv;
setY(int16/10) broadcast ram clsend airecv;
setZ(int16/10) broadcast ram clsend airecv;
setH(int16%360/10) broadcast ram clsend airecv;
setP(int16%360/10) broadcast ram clsend airecv;
setR(int16%360/10) broadcast ram clsend airecv;
setPos : setX, setY, setZ;
setHpr : setH, setP, setR;
setPosHpr : setX, setY, setZ, setH, setP, setR;
setXY : setX, setY;
setXZ : setX, setZ;
setXYH : setX, setY, setH;
setXYZH : setX, setY, setZ, setH;
setComponentL(uint64) broadcast ram clsend airecv;
setComponentX(int16/10) broadcast ram clsend airecv;
setComponentY(int16/10) broadcast ram clsend airecv;
setComponentZ(int16/10) broadcast ram clsend airecv;
setComponentH(int16%360/10) broadcast ram clsend airecv;
setComponentP(int16%360/10) broadcast ram clsend airecv;
setComponentR(int16%360/10) broadcast ram clsend airecv;
setComponentT(int16) broadcast ram clsend airecv;
setSmStop : setComponentT;
setSmH : setComponentH, setComponentT;
setSmZ : setComponentZ, setComponentT;
setSmXY : setComponentX, setComponentY, setComponentT;
setSmXZ : setComponentX, setComponentZ, setComponentT;
setSmPos : setComponentX, setComponentY, setComponentZ, setComponentT;
setSmHpr : setComponentH, setComponentP, setComponentR, setComponentT;
setSmXYH : setComponentX, setComponentY, setComponentH, setComponentT;
setSmXYZH : setComponentX, setComponentY, setComponentZ, setComponentH, setComponentT;
setSmPosHpr : setComponentX, setComponentY, setComponentZ, setComponentH, setComponentP, setComponentR, setComponentT;
setSmPosHprL : setComponentL, setComponentX, setComponentY, setComponentZ, setComponentH, setComponentP, setComponentR, setComponentT;
clearSmoothing(int8) broadcast clsend;
};
dclass DistCogdoCraneMoneyBag : DistCogdoCraneObject {
setIndex(uint8) required broadcast ram;
requestInitial() airecv clsend;
};
dclass DistCogdoCraneCog : DistributedObject {
setGameId(uint32) required broadcast ram;
setDNAString(blob) required broadcast ram;
setSpawnInfo(uint8, int16) required broadcast ram;
};
dclass DistributedHQInterior : DistributedObject {
setZoneIdAndBlock(uint32, uint16) required broadcast ram;
setLeaderBoard(blob) required broadcast ram;
setTutorial(uint8) required broadcast ram;
};
dclass DistributedGagshopInterior : DistributedObject {
setZoneIdAndBlock(uint32, uint16) required broadcast ram;
};
dclass DistributedPetshopInterior : DistributedObject {
setZoneIdAndBlock(uint32, uint16) required broadcast ram;
};
dclass DistributedKartShopInterior : DistributedObject {
setZoneIdAndBlock(uint32, uint16) required broadcast ram;
};
dclass DistributedDoor : DistributedObject {
setZoneIdAndBlock(uint32, uint32) required broadcast ram;
setSwing(int8) required broadcast ram;
setDoorType(uint8) required broadcast ram;
setDoorIndex(uint8) required broadcast ram;
setOtherZoneIdAndDoId(uint32, uint32);
requestEnter() airecv clsend;
requestExit() airecv clsend;
rejectEnter(int8);
avatarEnter(uint32) broadcast;
avatarExit(uint32) broadcast;
setState(string, int16) required broadcast ram;
setExitDoorState(string, int16) required broadcast ram;
};
dclass DistributedAnimDoor : DistributedDoor {
};
dclass DistributedLightSwitch : DistributedObject {
setInteriorDoId(uint32) required broadcast ram;
toggleLight() clsend airecv;
setLightState(bool) broadcast;
};
dclass DistributedHouseDoor : DistributedDoor {
};
dclass DistributedCogHQDoor : DistributedDoor {
};
dclass DistributedSellbotHQDoor : DistributedCogHQDoor {
informPlayer(uint8) broadcast ram;
};
dclass DistributedNPCToonBase : DistributedNode {
setName(string) required broadcast ram;
setDNAString(blob) required broadcast ram;
setPositionIndex(uint8) required broadcast ram;
setAnimState(string, int16/1000, int16) broadcast ram;
setPageNumber(int16, int8, int16) broadcast ram clsend;
avatarEnter() airecv clsend;
freeAvatar();
setHat(uint8 = 0, uint8 = 0, uint8 = 0) broadcast ram;
setGlasses(uint8 = 0, uint8 = 0, uint8 = 0) broadcast ram;
setBackpack(uint8 = 0, uint8 = 0, uint8 = 0) broadcast ram;
setShoes(uint8 = 0, uint8 = 0, uint8 = 0) broadcast ram;
};
dclass DistributedNPCToon : DistributedNPCToonBase {
setMovie(uint8, uint32, uint32, uint16[], int16) broadcast ram;
setMovieDone() airecv clsend;
chooseQuest(uint16) airecv clsend;
chooseTrack(int8) airecv clsend;
};
dclass DistributedNPCHQOfficer : DistributedNPCToon {
};
dclass DistributedNPCSpecialQuestGiver : DistributedNPCToonBase {
setMovie(uint8, uint32, uint32, uint16[], int16) broadcast ram;
setMovieDone() airecv clsend;
chooseQuest(uint16) airecv | |
<reponame>Xiaoyang-Lu/sst-elements<filename>src/sst/elements/memHierarchy/tests/testKingsley.py
import os
import sst
quiet = True
memCapacity = 4 # In GB
memPageSize = 4 # in KB
memNumPages = memCapacity * 1024 * 1024 / memPageSize
mesh_stops_x = 3
mesh_stops_y = 3
mesh_clock = 2200
ctrl_mesh_flit = 8
data_mesh_flit = 36
mesh_link_latency = "100ps" # Note, used to be 50ps, didn't seem to make a difference when bumping it up to 100
ctrl_mesh_link_bw = str( (mesh_clock * 1000 * 1000 * ctrl_mesh_flit) ) + "B/s"
data_mesh_link_bw = str( (mesh_clock * 1000 * 1000 * data_mesh_flit) ) + "B/s"
core_clock = "1800MHz"
coherence_protocol = "MESI"
ctrl_network_buffers = "32B"
data_network_buffers = "288B"
ctrl_network_params = {
"link_bw" : ctrl_mesh_link_bw,
"flit_size" : str(ctrl_mesh_flit) + "B",
"input_buf_size" : ctrl_network_buffers,
}
data_network_params = {
"link_bw" : data_mesh_link_bw,
"flit_size" : str(data_mesh_flit) + "B",
"input_buf_size" : data_network_buffers,
"port_priority_equal" : 1,
}
# Debug parameters for memH
debugAll = 0
debugL1 = max(debugAll, 0)
debugL2 = max(debugAll, 0)
debugDDRDC = max(debugAll, 0)
debugMemCtrl = max(debugAll, 0)
debugNIC = max(debugAll, 0)
debugLev = 3
# Verbose
verbose = 2
l1_cache_params = {
"cache_frequency" : core_clock,
"coherence_protocol" : coherence_protocol,
"replacement_policy" : "lru",
"cache_size" : "32KiB",
"associativity" : 8,
"cache_line_size" : 64,
"access_latency_cycles" : 4,
"tag_access_latency_cycles" : 1,
"mshr_num_entries" : 12, # Outstanding misses per core
"maxRequestDelay" : 10000000,
"events_up_per_cycle" : 2,
"mshr_latency_cycles" : 2,
"max_requests_per_cycle" : 1,
#"request_link_width" : "72B",
#"response_link_width" : "36B",
"L1" : 1,
"verbose" : verbose,
"debug" : debugL1,
"debug_level" : debugLev,
}
l2_prefetch_params = {
"prefetcher" : "cassini.StridePrefetcher",
"prefetcher.reach" : 16,
"prefetcher.detect_range" : 1
}
l2_cache_params = {
"cache_frequency" : core_clock,
"coherence_protocol" : coherence_protocol,
"replacement_policy" : "lru",
"cache_size" : "1MiB",
"associativity" : 16,
"cache_line_size" : 64,
"access_latency_cycles" : 8, # Guess - co-processor s/w dev guide says 11 for 512KiB cache
"tag_access_latency_cycles" : 3,
"mshr_num_entries" : 48, # Actually 48 reads and 32 writebacks
#"max_requests_per_cycle" : 2,
"mshr_latency_cycles" : 4,
#"request_link_width" : "72B",
"response_link_width" : "72B",
"memNIC.req.linkcontrol" : "kingsley.linkcontrol",
"memNIC.ack.linkcontrol" : "kingsley.linkcontrol",
"memNIC.fwd.linkcontrol" : "kingsley.linkcontrol",
"memNIC.data.linkcontrol" : "kingsley.linkcontrol",
"memNIC.req.network_bw" : ctrl_mesh_link_bw,
"memNIC.ack.network_bw" : ctrl_mesh_link_bw,
"memNIC.fwd.network_bw" : ctrl_mesh_link_bw,
"memNIC.data.network_bw" : data_mesh_link_bw,
"memNIC.req.network_input_buffer_size" : ctrl_network_buffers,
"memNIC.ack.network_input_buffer_size" : ctrl_network_buffers,
"memNIC.fwd.network_input_buffer_size" : ctrl_network_buffers,
"memNIC.data.network_input_buffer_size" : data_network_buffers,
"memNIC.req.network_output_buffer_size" : ctrl_network_buffers,
"memNIC.ack.network_output_buffer_size" : ctrl_network_buffers,
"memNIC.fwd.network_output_buffer_size" : ctrl_network_buffers,
"memNIC.data.network_output_buffer_size" : data_network_buffers,
"memNIC.debug" : debugNIC,
"memNIC.debug_level" : debugLev,
"verbose" : verbose,
"debug" : debugL2,
"debug_level" : debugLev
}
###### DDR Directory #######
ddr_dc_params = {
"coherence_protocol": coherence_protocol,
"memNIC.req.network_bw" : ctrl_mesh_link_bw,
"memNIC.ack.network_bw" : ctrl_mesh_link_bw,
"memNIC.fwd.network_bw" : ctrl_mesh_link_bw,
"memNIC.data.network_bw" : data_mesh_link_bw,
"clock" : str(mesh_clock) + "MHz",
"entry_cache_size" : 256*1024*1024, #Entry cache size of mem/blocksize
"mshr_num_entries" : 128,
"access_latency_cycles" : 2,
"memNIC.req.network_input_buffer_size" : ctrl_network_buffers,
"memNIC.req.network_output_buffer_size" : ctrl_network_buffers,
"memNIC.ack.network_input_buffer_size" : ctrl_network_buffers,
"memNIC.ack.network_output_buffer_size" : ctrl_network_buffers,
"memNIC.fwd.network_input_buffer_size" : ctrl_network_buffers,
"memNIC.fwd.network_output_buffer_size" : ctrl_network_buffers,
"memNIC.data.network_input_buffer_size" : data_network_buffers,
"memNIC.data.network_output_buffer_size" : data_network_buffers,
"verbose" : verbose,
"debug" : debugDDRDC,
"debug_level" : debugLev
}
##### TimingDRAM #####
# DDR4-2400
ddr_mem_timing_params = {
"verbose" : verbose,
"backing" : "none",
"backend" : "memHierarchy.timingDRAM",
"backend.clock" : "1200MHz",
"backend.id" : 0,
"backend.addrMapper" : "memHierarchy.simpleAddrMapper",
"backend.channel.transaction_Q_size" : 32,
"backend.channel.numRanks" : 2,
"backend.channel.rank.numBanks" : 16,
"backend.channel.rank.bank.CL" : 15,
"backend.channel.rank.bank.CL_WR" : 12,
"backend.channel.rank.bank.RCD" : 15,
"backend.channel.rank.bank.TRP" : 15,
"backend.channel.rank.bank.dataCycles" : 4,
"backend.channel.rank.bank.pagePolicy" : "memHierarchy.simplePagePolicy",
"backend.channel.rank.bank.transactionQ" : "memHierarchy.reorderTransactionQ",
"backend.channel.rank.bank.pagePolicy.close" : 0,
"memNIC.req.linkcontrol" : "kingsley.linkcontrol",
"memNIC.ack.linkcontrol" : "kingsley.linkcontrol",
"memNIC.fwd.linkcontrol" : "kingsley.linkcontrol",
"memNIC.data.linkcontrol" : "kingsley.linkcontrol",
"memNIC.req.network_bw" : ctrl_mesh_link_bw,
"memNIC.ack.network_bw" : ctrl_mesh_link_bw,
"memNIC.fwd.network_bw" : ctrl_mesh_link_bw,
"memNIC.data.network_bw" : data_mesh_link_bw,
"memNIC.req.network_input_buffer_size" : ctrl_network_buffers,
"memNIC.ack.network_input_buffer_size" : ctrl_network_buffers,
"memNIC.fwd.network_input_buffer_size" : ctrl_network_buffers,
"memNIC.data.network_input_buffer_size" : data_network_buffers,
"memNIC.req.network_output_buffer_size" : ctrl_network_buffers,
"memNIC.ack.network_output_buffer_size" : ctrl_network_buffers,
"memNIC.fwd.network_output_buffer_size" : ctrl_network_buffers,
"memNIC.data.network_output_buffer_size" : data_network_buffers,
}
# Miranda STREAM Bench params
thread_iters = 1000
cpu_params = {
"verbose" : 0,
"clock" : core_clock,
"printStats" : 1
}
gen_params = {
"verbose" : 0,
"n" : thread_iters,
"operandWidth" : 8,
}
class DDRBuilder:
def __init__(self, capacity):
self.next_ddr_id = 0
self.mem_capacity = capacity
def build(self, nodeID):
if not quiet:
print "Creating DDR controller " + str(self.next_ddr_id) + " out of 4 on node " + str(nodeID) + "..."
print " - Capacity: " + str(self.mem_capacity / 4) + " per DDR."
mem = sst.Component("ddr_" + str(self.next_ddr_id), "memHierarchy.MemController")
mem.addParams(ddr_mem_timing_params)
mem.addParams({
"backend.mem_size" : str(self.mem_capacity / 4) + "B",
})
mem.addParams({
"memNIC.req.linkcontrol" : "kingsley.linkcontrol",
"memNIC.ack.linkcontrol" : "kingsley.linkcontrol",
"memNIC.fwd.linkcontrol" : "kingsley.linkcontrol",
"memNIC.data.linkcontrol" : "kingsley.linkcontrol",
"memNIC.req.network_bw" : ctrl_mesh_link_bw,
"memNIC.ack.network_bw" : ctrl_mesh_link_bw,
"memNIC.fwd.network_bw" : ctrl_mesh_link_bw,
"memNIC.data.network_bw" : data_mesh_link_bw,
"memNIC.addr_range_start" : (64 * self.next_ddr_id),
"memNIC.addr_range_end" : (self.mem_capacity - (64 * self.next_ddr_id)),
"memNIC.interleave_step" : str(4 * 64) + "B",
"memNIC.interleave_size" : "64B",
"memNIC.accept_region" : 0,
})
self.next_ddr_id = self.next_ddr_id + 1
return (mem, "network", mesh_link_latency), (mem, "network_ack", mesh_link_latency), (mem, "network_fwd", mesh_link_latency), (mem, "network_data", mesh_link_latency)
class DDRDCBuilder:
def __init__(self, capacity):
self.next_ddr_dc_id = 0
self.memCapacity = capacity
def build(self, nodeID):
# Stripe addresses across each mem & stripe those across each DC for the mem
# Interleave 64B blocks across 8 DCs (and then map every 4th to the same DDR)
dcNum = nodeID % 2
if nodeID == 1 or nodeID == 2:
memId = 0
elif nodeID == 3 or nodeID == 6:
memId = 1
elif nodeID == 4 or nodeID == 7:
memId = 2
elif nodeID == 5 or nodeID == 8:
memId = 3
myStart = 0 + (memId * 64) + (dcNum * 64 * 4)
myEnd = self.memCapacity - 64 * (8 - memId - 4 * dcNum) + 63
if not quiet:
print "\tCreating ddr dc with start: " + str(myStart) + " end: " + str(myEnd)
dc = sst.Component("ddr_dc_" + str(self.next_ddr_dc_id), "memHierarchy.DirectoryController")
dc.addParams(ddr_dc_params)
dc.addParams({
"memNIC.req.linkcontrol" : "kingsley.linkcontrol",
"memNIC.ack.linkcontrol" : "kingsley.linkcontrol",
"memNIC.fwd.linkcontrol" : "kingsley.linkcontrol",
"memNIC.data.linkcontrol" : "kingsley.linkcontrol",
"memNIC.addr_range_start" : myStart,
"memNIC.addr_range_end" : myEnd,
"memNIC.interleave_step" : str(8 * 64) + "B",
"memNIC.interleave_size" : "64B",
"net_memory_name" : "ddr_" + str(memId),
})
self.next_ddr_dc_id = self.next_ddr_dc_id + 1
return (dc, "network", mesh_link_latency), (dc, "network_ack", mesh_link_latency), (dc, "network_fwd", mesh_link_latency), (dc, "network_data", mesh_link_latency)
class TileBuilder:
def __init__(self):
self.next_tile_id = 0
self.next_core_id = 0
self.next_addr_id = 0
self.base_a = 0
self.base_b = thread_iters * 8 * 36
self.base_c = self.base_b + thread_iters * 8 * 36
def build(self, nodeID):
# L2
tileL2cache = sst.Component("l2cache_" + str(self.next_tile_id), "memHierarchy.Cache")
tileL2cache.addParams(l2_cache_params)
tileL2cache.addParams(l2_prefetch_params)
tileL2cache.addParams({
})
l2bus = sst.Component("l2cachebus_" + str(self.next_tile_id), "memHierarchy.Bus")
l2bus.addParams({
"bus_frequency" : core_clock,
})
l2busLink = sst.Link("l2bus_link_" + str(self.next_tile_id))
l2busLink.connect( (l2bus, "low_network_0", mesh_link_latency),
(tileL2cache, "high_network_0", mesh_link_latency))
l2busLink.setNoCut()
self.next_tile_id = self.next_tile_id + 1
# Left Core L1
tileLeftL1 = sst.Component("l1cache_" + str(self.next_core_id), "memHierarchy.Cache")
tileLeftL1.addParams(l1_cache_params)
if not quiet:
print "Creating core " + str(self.next_core_id) + " on tile: " + str(self.next_tile_id) + "..."
# Left SMT
leftSMT = sst.Component("smt_" + str(self.next_core_id), "memHierarchy.multithreadL1")
leftSMT.addParams({
"clock" : core_clock,
"requests_per_cycle" : 2,
"responses_per_cycle" : 2,
})
# Left Core
mirandaL0 = sst.Component("thread_" + str(self.next_core_id), "miranda.BaseCPU")
mirandaL1 = sst.Component("thread_" + str(self.next_core_id + 18), "miranda.BaseCPU")
mirandaL0.addParams(cpu_params)
mirandaL1.addParams(cpu_params)
genL0 = mirandaL0.setSubComponent("generator", "miranda.STREAMBenchGenerator")
genL1 = mirandaL1.setSubComponent("generator", "miranda.STREAMBenchGenerator")
genL0.addParams(gen_params)
genL1.addParams(gen_params)
genL0.addParams({
"start_a" : self.base_a + self.next_core_id * thread_iters * 8,
"start_b" : self.base_b + self.next_core_id * thread_iters * 8,
"start_c" : self.base_c + self.next_core_id * thread_iters * 8
})
genL1.addParams({
"start_a" : self.base_a + (self.next_core_id + 18) * thread_iters * 8,
"start_b" : self.base_b + (self.next_core_id + 18) * thread_iters * 8,
"start_c" : self.base_c + (self.next_core_id + 18) * thread_iters * 8
})
# Thread 0
leftSMTCPUlink0 = sst.Link("smt_cpu_" + str(self.next_core_id))
leftSMTCPUlink0.connect( (mirandaL0, "cache_link", mesh_link_latency), (leftSMT, "thread0", mesh_link_latency) )
# Thread 1
leftSMTCPUlink1 = sst.Link("smt_cpu_" + str(self.next_core_id + 18))
leftSMTCPUlink1.connect( (mirandaL1, "cache_link", mesh_link_latency), (leftSMT, "thread1", mesh_link_latency) )
# SMT Shim <-> L1
leftSMTL1link = sst.Link("l1cache_smt_" + str(self.next_core_id))
leftSMTL1link.connect( (leftSMT, "cache", mesh_link_latency), (tileLeftL1, "high_network_0", mesh_link_latency) )
leftSMTCPUlink0.setNoCut()
leftSMTCPUlink1.setNoCut()
leftSMTL1link.setNoCut()
leftL1L2link = sst.Link("l1cache_link_" + str(self.next_core_id))
leftL1L2link.connect( (l2bus, "high_network_0", mesh_link_latency),
(tileLeftL1, "low_network_0", mesh_link_latency))
leftL1L2link.setNoCut()
self.next_core_id = self.next_core_id + 1
tileRightL1 = sst.Component("l1cache_" + str(self.next_core_id), "memHierarchy.Cache")
tileRightL1.addParams(l1_cache_params)
if not quiet:
print "Creating core " + str(self.next_core_id) + " on tile: " + str(self.next_tile_id) + "..."
# Right SMT
rightSMT = sst.Component("smt_" + str(self.next_core_id), "memHierarchy.multithreadL1")
rightSMT.addParams({
"clock" : core_clock,
"requests_per_cycle" : 2,
"responses_per_cycle" : 2,
})
# Right Core
mirandaR0 = sst.Component("thread_" + str(self.next_core_id), "miranda.BaseCPU")
mirandaR1 = sst.Component("thread_" + str(self.next_core_id + 18), "miranda.BaseCPU")
mirandaR0.addParams(cpu_params)
mirandaR1.addParams(cpu_params)
genR0 = mirandaR0.setSubComponent("generator", "miranda.STREAMBenchGenerator")
genR1 = mirandaR1.setSubComponent("generator", "miranda.STREAMBenchGenerator")
genR0.addParams(gen_params)
genR1.addParams(gen_params)
genR0.addParams({
"start_a" : self.base_a + self.next_core_id * thread_iters * 8,
"start_b" : self.base_b + self.next_core_id * thread_iters * 8,
"start_c" : self.base_c + self.next_core_id * thread_iters * 8
})
genR1.addParams({
"start_a" : self.base_a + (self.next_core_id + 18) * | |
<filename>astylo/iolib.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Input & Output library
"""
import sys, logging
logging.disable(sys.maxsize)
import subprocess as SP
import numpy as np
from astropy.io import fits
import h5py as H5
import csv
global fitsext, h5ext, ascext, csvext
fitsext = '.fits'
h5ext = '.h5'
ascext = '.txt'
csvext = '.csv'
def fclean(file, *alert):
'''
Clean folder/files
'''
SP.call('rm -rf '+file, shell=True)
for text in alert:
print(text)
def write_fits(file, header, data, wave=None, wmod=0, **hdrl):
'''
Write fits file
------ INPUT ------
file output FITS filename
header header of primary HDU
data data in primary HDU
wave data in table 1 (ndarray. Default: None)
wmod wave table format (0 - Image; 1 - BinTable. Default: 0)
------ OUTPUT ------
'''
for key, value in hdrl.items():
header[key] = value
primary_hdu = fits.PrimaryHDU(header=header, data=data)
hdul = fits.HDUList(primary_hdu)
## Add table
if wave is not None:
## Convert wave format
if isinstance(wave, fits.fitsrec.FITS_rec):
if wmod==0:
wave = wave[0][0][:,0]
else:
Nw = len(wave)
if wmod==1:
wave = np.array(wave).reshape((Nw,1))
col = fits.Column(array=[wave], format=str(Nw)+'E',
name='WAVE-TAB', unit='um', dim='(1,{})'.format(Nw))
tab = fits.BinTableHDU.from_columns([col], name='WCS-TAB ')
wave = tab.data
## Create table
if wmod==0:
hdu = fits.ImageHDU(data=wave, name='WAVE-TAB')
elif wmod==1:
hdu = fits.BinTableHDU(data=wave, name='WCS-TAB ')
hdul.append(hdu)
hdul.writeto(file+fitsext, overwrite=True)
def read_fits(file, file_unc=None, wmod=0):
'''
Read fits file (auto detect dim)
------ INPUT ------
file input FITS filename
file_unc input uncertainty file
wmod output wave mode (Default: 0)
0 - 1darray;
1 - FITS_rec.
------ OUTPUT ------
ds output dataset
header header of primary HDU
data data in primary HDU
header_w header of W-TAB
wave data in table 1 (None if does not exist)
unc uncertainty array
'''
## Initialize output object
ds = type('', (), {})()
ds.header_w = None
ds.wave = None
ds.unc = None
## Read header & data
with fits.open(file+fitsext) as hdul:
ds.HDUL = hdul
hdr = hdul[0].header
ds.data = hdul[0].data
ds.header = hdr
## Read wavelength
if len(hdul)==2:
ds.header_w = hdul[1].header
wave = hdul[1].data
if isinstance(hdul[1], fits.BinTableHDU):
if wmod==0:
wave = wave[0][0][:,0] ## Convert FITS_rec to 1darray
elif isinstance(hdul[1], fits.ImageHDU):
Nw = len(wave)
if wmod==1:
wave = np.array(wave).reshape((Nw,1))
col = fits.Column(array=[wave], format=str(Nw)+'E',
name='WAVE-TAB', unit='um', dim='(1,{})'.format(Nw))
tab = fits.BinTableHDU.from_columns([col], name='WCS-TAB ')
wave = tab.data
ds.wave = wave
if file_unc is not None:
## Read uncertainty data
with fits.open(file_unc+fitsext) as hdul:
ds.unc = hdul[0].data
return ds
def write_hdf5(file, name, data, group='/',
ind1=None, ind2=None, ind3=None, ind4=None,
ind5=None, ind6=None, ind7=None,
append=False, verbose=False):
'''
Write dataset into a h5 file (a single name/data_array per time, dim < 7)
Inspired by SwING inout library
------ INPUT ------
file output h5 filename
name name of the dataset (len <= 80)
data dataset (dim < 7)
group name of the group (Default: '/')
indx array index ([idimx_inf,idimx_sup] or idimx if data is scalar, Default: None)
append True: if not overwrite (Default: False)
verbose courtesy notification (Default: False)
------ OUTPUT ------
'''
## Preliminaries
##---------------
if (append):
hf = H5.File(file+h5ext, 'a')
else:
hf = H5.File(file+h5ext, 'w')
h5 = hf.require_group(group) # Default: group = '/'
## Check if it is a subarray
subarr = ( (ind1 is not None) or (ind2 is not None) or (ind3 is not None) or \
(ind4 is not None) or (ind5 is not None) or (ind6 is not None) )
## Convert all data to array format
darr = np.array(data)
sharr = darr.shape
if darr.dtype.kind == 'U':
## create_dataset does not support lists of UTF-8 yet
## "these strings are supposed to store only ASCII-encoded text"
## See http://docs.h5py.org/en/stable/strings.html
asciiList = [n.encode('ascii','ignore') for n in darr.flatten()]
darr = np.array(asciiList).reshape(sharr)
## Write dataset (dset)
##----------------------
## Case 0: check if the dataset already exists
try:
dset = h5[name]
hasdset = True
except:
hasdset = False
if (hasdset):
createdset = False
## No indx input
if (not subarr):
del h5[name]
createdset = True
else:
createdset = True
if (subarr):
h5.close()
raise ValueError('Dataset does not exist')
## Case 1: no dataset OR no subarr
if (createdset):
dset = h5.create_dataset(name, data=darr)
## Case 2: sub-array filling
if (subarr):
## Dimension of the array
Ndim = len(sharr)
## Indices indx[idimx_inf,idimx_sup]
if (Ndim > 0):
if ind1 is None:
ind1 = [0,sharr[0]]
elif (np.size(ind1) == 1):
ind1 = [ind1, ind1+1]
if (Ndim > 1):
if ind2 is None:
ind2 = [0,sharr[1]]
elif (np.size(ind2) == 1):
ind2 = [ind2, ind2+1]
if (Ndim > 2):
if ind3 is None:
ind3 = [0,sharr[2]]
elif (np.size(ind3) == 1):
ind3 = [ind3, ind3+1]
if (Ndim > 3):
if ind4 is None:
ind4 = [0,sharr[3]]
elif (np.size(ind4) == 1):
ind4 = [ind4, ind4+1]
if (Ndim > 4):
if ind5 is None:
ind5 = [0,sharr[4]]
elif (np.size(ind5) == 1):
ind5 = [ind5, ind5+1]
if (Ndim > 5):
if ind6 is None:
ind6 = [0,sharr[5]]
elif (np.size(ind6) == 1):
ind6 = [ind6, ind6+1]
if (Ndim > 6):
if ind7 is None:
ind7 = [0,sharr[6]]
elif (np.size(ind7) == 1):
ind7 = [ind7, ind7+1]
## Write the sub-array if indx are set
if Ndim == 0:
dset = arr[0]
elif Ndim == 1:
dset[ind1[0]:ind1[1]] = np.reshape( darr, (ind1[1]-ind1[0],) )
elif Ndim == 2:
dset[ind1[0]:ind1[1],ind2[0]:ind2[1]] = \
np.reshape( darr, (ind1[1]-ind1[0],ind2[1]-ind2[0]) )
elif Ndim == 3:
dset[ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1]] = \
np.reshape( darr, (ind1[1]-ind1[0],ind2[1]-ind2[0],ind3[0]-ind3[1]) )
elif Ndim == 4:
dset[ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1],ind4[0]:ind4[1]] = \
np.reshape( darr, (ind1[1]-ind1[0],ind2[1]-ind2[0], \
ind3[0]-ind3[1],ind4[0]-ind4[1]) )
elif Ndim == 5:
dset[ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1], \
ind4[0]:ind4[1],ind5[0]:ind5[1]] = \
np.reshape( darr, (ind1[1]-ind1[0],ind2[1]-ind2[0], \
ind3[0]-ind3[1],ind4[0]-ind4[1], \
ind5[0]-ind5[1]) )
elif Ndim == 6:
dset[ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1], \
ind4[0]:ind4[1],ind5[0]:ind5[1],ind6[0]:ind6[1]] = \
np.reshape( darr, (ind1[1]-ind1[0],ind2[1]-ind2[0], \
ind3[0]-ind3[1],ind4[0]-ind4[1], \
ind5[0]-ind5[1],ind6[0]-ind6[1]) )
elif Ndim == 7:
dset[ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1], \
ind4[0]:ind4[1],ind5[0]:ind5[1],ind6[0]:ind6[1],ind7[0]:ind7[1]] = \
np.reshape( darr, (ind1[1]-ind1[0],ind2[1]-ind2[0], \
ind3[0]-ind3[1],ind4[0]-ind4[1], \
ind5[0]-ind5[1],ind6[0]-ind6[1],ind7[0]-ind7[1]) )
hf.flush()
hf.close()
## Courtesy notification
##-----------------------
if (verbose):
if (append):
if (subarr):
print('[write_hdf5] Dataset {}/{} in the file:'.format(group,name))
print(' ', file+h5ext, ' has been modified.')
else:
print('[write_hdf5] Dataset {}/{} has been added in the file:'.format(group,name))
print(' ', file+h5ext, '.')
else:
print('[write_hdf5] Dataset {}/{} has been written in the new file:'.format(group,name))
print(' ', file+h5ext, '.')
def read_hdf5(file, name, group='/',
ind1=None, ind2=None, ind3=None, ind4=None,
ind5=None, ind6=None, ind7=None):
'''
Read h5 file (a single name/data_array per time, dim < 7)
Inspired by SwING inout library
------ INPUT ------
file input h5 filename
name name of the dataset (len <= 80)
group name of the group (Default: '/')
indx array index ([idimx_inf,idimx_sup] or idimx if data is scalar, Default: None)
------ OUTPUT ------
dset dataset
'''
## Preliminaries
##---------------
hf = H5.File(file+h5ext, 'r')
h5 = hf.require_group(group) # Default: group = '/'
Ndim = h5[name].ndim
shapIN = h5[name].shape
## Read dataset (dset)
##----------------------
## Indices indx[idimx_inf,idimx_sup]
if (Ndim > 0):
if ind1 is None:
ind1 = [0,shapIN[0]]
elif (np.size(ind1) == 1):
ind1 = [ind1, ind1+1]
if (Ndim > 1):
if ind2 is None:
ind2 = [0,shapIN[1]]
elif (np.size(ind2) == 1):
ind2 = [ind2, ind2+1]
if (Ndim > 2):
if ind3 is None:
ind3 = [0,shapIN[2]]
elif (np.size(ind3) == 1):
ind3 = [ind3, ind3+1]
if (Ndim > 3):
if ind4 is None:
ind4 = [0,shapIN[3]]
elif (np.size(ind4) == 1):
ind4 = [ind4, ind4+1]
if (Ndim > 4):
if ind5 is None:
ind5 = [0,shapIN[4]]
elif (np.size(ind5) == 1):
ind5 = [ind5, ind5+1]
if (Ndim > 5):
if ind6 is None:
ind6 = [0,shapIN[5]]
elif (np.size(ind6) == 1):
ind6 = [ind6, ind6+1]
if (Ndim > 6):
if ind7 is None:
ind7 = [0,shapIN[6]]
elif (np.size(ind7) == 1):
ind7 = [ind7, ind7+1]
# Read the array or the sub-array if some indx are set
if Ndim==0:
dset = h5[name]
if (str(dset.dtype).startswith('|S')):
dset = np.array(dset, dtype='unicode')
elif Ndim==1:
dset = h5[name][ind1[0]:ind1[1]]
if (str(dset.dtype).startswith('|S')):
dset = np.array(dset, dtype='unicode')
elif Ndim==2:
dset = h5[name][ind1[0]:ind1[1],ind2[0]:ind2[1]]
if (str(dset.dtype).startswith('|S')):
dset = np.array(dset, dtype='unicode')
elif Ndim==3:
dset = h5[name][ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1]]
if (str(dset.dtype).startswith('|S')):
dset = np.array(dset, dtype='unicode')
elif Ndim==4:
dset = h5[name][ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1], \
ind4[0]:ind4[1]]
if (str(dset.dtype).startswith('|S')):
dset = np.array(dset, dtype='unicode')
elif Ndim==5:
dset = h5[name][ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1], \
ind4[0]:ind4[1],ind5[0]:ind5[1]]
if (str(dset.dtype).startswith('|S')):
dset = np.array(dset, dtype='unicode')
elif Ndim==6:
dset = h5[name][ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1], \
ind4[0]:ind4[1],ind5[0]:ind5[1],ind6[0]:ind6[1]]
if (str(dset.dtype).startswith('|S')):
dset = np.array(dset, dtype='unicode')
elif Ndim==7:
dset = h5[name][ind1[0]:ind1[1],ind2[0]:ind2[1],ind3[0]:ind3[1], | |
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_names.metadata = {'url': '/deviceupdate/{instanceId}/v2/updates/providers/{provider}/names'} # type: ignore
def get_versions(
self,
provider, # type: str
name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PageableListOfStrings"]
"""Get a list of all update versions that match the specified provider and name.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PageableListOfStrings or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.iot.deviceupdate.models.PageableListOfStrings]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PageableListOfStrings"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_versions.metadata['url'] # type: ignore
path_format_arguments = {
'accountEndpoint': self._serialize.url("self._config.account_endpoint", self._config.account_endpoint, 'str', skip_quote=True),
'instanceId': self._serialize.url("self._config.instance_id", self._config.instance_id, 'str', skip_quote=True),
'provider': self._serialize.url("provider", provider, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'accountEndpoint': self._serialize.url("self._config.account_endpoint", self._config.account_endpoint, 'str', skip_quote=True),
'instanceId': self._serialize.url("self._config.instance_id", self._config.instance_id, 'str', skip_quote=True),
'provider': self._serialize.url("provider", provider, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PageableListOfStrings', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_versions.metadata = {'url': '/deviceupdate/{instanceId}/v2/updates/providers/{provider}/names/{name}/versions'} # type: ignore
def get_files(
self,
provider, # type: str
name, # type: str
version, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PageableListOfStrings"]
"""Get a list of all update file identifiers for the specified version.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:param version: Update version.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PageableListOfStrings or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.iot.deviceupdate.models.PageableListOfStrings]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PageableListOfStrings"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_files.metadata['url'] # type: ignore
path_format_arguments = {
'accountEndpoint': self._serialize.url("self._config.account_endpoint", self._config.account_endpoint, 'str', skip_quote=True),
'instanceId': self._serialize.url("self._config.instance_id", self._config.instance_id, 'str', skip_quote=True),
'provider': self._serialize.url("provider", provider, 'str'),
'name': self._serialize.url("name", name, 'str'),
'version': self._serialize.url("version", version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'accountEndpoint': self._serialize.url("self._config.account_endpoint", self._config.account_endpoint, 'str', skip_quote=True),
'instanceId': self._serialize.url("self._config.instance_id", self._config.instance_id, 'str', skip_quote=True),
'provider': self._serialize.url("provider", provider, 'str'),
'name': self._serialize.url("name", name, 'str'),
'version': self._serialize.url("version", version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PageableListOfStrings', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_files.metadata = {'url': '/deviceupdate/{instanceId}/v2/updates/providers/{provider}/names/{name}/versions/{version}/files'} # type: ignore
def get_file(
self,
provider, # type: str
name, # type: str
version, # type: str
file_id, # type: str
access_condition=None, # type: Optional["_models.AccessCondition"]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.File"]
"""Get a specific update file from the version.
:param provider: Update provider.
:type provider: str
:param name: Update name.
:type name: str
:param version: Update version.
:type version: str
:param file_id: File identifier.
:type file_id: str
:param access_condition: Parameter group.
:type access_condition: ~azure.iot.deviceupdate.models.AccessCondition
:keyword callable cls: A custom type or function that will be passed the direct response
:return: File, or the result of cls(response)
:rtype: ~azure.iot.deviceupdate.models.File or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.File"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_if_none_match = None
if access_condition is not None:
_if_none_match = access_condition.if_none_match
accept = "application/json"
# Construct URL
url = self.get_file.metadata['url'] # type: ignore
path_format_arguments = {
'accountEndpoint': self._serialize.url("self._config.account_endpoint", self._config.account_endpoint, 'str', skip_quote=True),
'instanceId': self._serialize.url("self._config.instance_id", self._config.instance_id, 'str', skip_quote=True),
'provider': self._serialize.url("provider", provider, 'str'),
'name': self._serialize.url("name", name, 'str'),
'version': self._serialize.url("version", version, 'str'),
'fileId': self._serialize.url("file_id", file_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _if_none_match is not None:
header_parameters['If-None-Match'] = self._serialize.header("if_none_match", _if_none_match, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 304]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('File', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_file.metadata = {'url': '/deviceupdate/{instanceId}/v2/updates/providers/{provider}/names/{name}/versions/{version}/files/{fileId}'} # type: ignore
def get_operations(
self,
filter=None, # type: Optional[str]
top=None, # type: Optional[int]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PageableListOfOperations"]
"""Get a list of all import update operations. Completed operations are kept for 7 days before
auto-deleted. Delete operations are not returned by this API version.
:param filter: Restricts the set of operations returned. Only one specific filter is supported:
"status eq 'NotStarted' or status eq 'Running'".
:type filter: str
:param top: Specifies a non-negative integer n that limits the number of items returned from a
collection. The service returns the number of available items up to but not greater than the
specified value n.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PageableListOfOperations or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.iot.deviceupdate.models.PageableListOfOperations]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PageableListOfOperations"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_operations.metadata['url'] # type: ignore
path_format_arguments = {
'accountEndpoint': self._serialize.url("self._config.account_endpoint", self._config.account_endpoint, 'str', skip_quote=True),
'instanceId': self._serialize.url("self._config.instance_id", self._config.instance_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query("top", top, 'int')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'accountEndpoint': self._serialize.url("self._config.account_endpoint", self._config.account_endpoint, 'str', skip_quote=True),
'instanceId': self._serialize.url("self._config.instance_id", self._config.instance_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PageableListOfOperations', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
get_operations.metadata = {'url': '/deviceupdate/{instanceId}/v2/updates/operations'} # type: ignore
def get_operation(
self,
operation_id, # type: str
access_condition=None, # type: Optional["_models.AccessCondition"]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Operation"]
"""Retrieve operation status.
:param operation_id: Operation identifier.
:type operation_id: str
:param access_condition: Parameter group.
:type access_condition: ~azure.iot.deviceupdate.models.AccessCondition
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Operation, or the result of cls(response)
:rtype: ~azure.iot.deviceupdate.models.Operation or None
:raises: | |
# Copyright (c) 2009, Motorola, Inc
#
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Motorola nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from .Resampler import ResamplerRR, ResamplerRC, ResamplerCR, ResamplerCC
def enumdims(ary, dims=(0,), complement=False):
"""Enumerate over the given array dimensions
yielding the index tuple and resulting array in sequence,
using ":" for each of the complementary dimensions.
For example, if x is an array of shape (2,3,4),
iterdims(x, [0]) yields 2 arrays of shape (3,4)
iterdims(x, [1]) yields 3 arrays of shape (2,4)
iterdims(x, [2]) yields 4 arrays of shape (2,3)
iterdims(x, [0,1]) yields 6 arrays of shape (4)
iterdims(x, [0,2]) yields 8 arrays of shape (3)
iterdims(x, [1,2]) yields 12 arrays of shape (2)
iterdims(x, [0,1,2]) yields 24 arrays of shape () (i.e., 0-D)
iterdims(x, []) yields 1 array of shape (2,3,4)
...
"""
dimsNoNegative = []
for d in dims:
if d < 0:
dimsNoNegative.append(len(ary.shape)+d)
else:
dimsNoNegative.append(d)
dims = tuple(dimsNoNegative)
cdims = tuple([i for i in range(len(ary.shape)) if i not in dims])
if complement:
dims, cdims = cdims, dims
x = ary.transpose(*(tuple(dims) + cdims))
ndindexArgs = tuple([x.shape[i] for i in range(len(dims))])
for idxTuple in np.ndindex(*ndindexArgs):
yield idxTuple, x[idxTuple]
def iterdims(ary, dims=(0,), complement=False):
"""Like enumdims but yielding only the partial arrays, not
the index tuple as well
for xi in iterdims(x): <-- equivalent to "for xi in x:"
for xi in iterdims(x,[-1]): <-- yields x[...,0], x[...,1], ... etc
for xi in iterdims(x,[0,-1]): <-- yields x[0,...,0], x[0,...,1], ... etc
for xi in iterdims(x,[0,1]): <-- yields x[0,0,...], x[0,1,...], ... etc
"""
for idx, x in enumdims(ary, dims, complement):
yield x
def full_index(x):
"""Return a list of arrays to index into array x."""
idx = [np.arange(xs) for xs in x.shape]
for i in range(len(idx)):
idx[i].shape += (1,)*(len(x.shape)-i-1)
return idx
def dim2back(x, xdim=-1):
"""
Transpose ndarray so that a given dimension is moved to the back.
Parameters
----------
x : ndarray
Input array.
xdim : int, optional
Dimension to put at back "x" input array. (default=-1)
Returns
-------
y : ndarray
view of x transposed
"""
num_dims_x = len(x.shape)
if xdim < 0:
xdim = num_dims_x + xdim
return x.transpose(*(list(range(xdim)) + list(range(xdim+1, num_dims_x)) + [xdim]))
def back2dim(x, xdim=-1):
"""
Transpose ndarray so that the back dimension is moved to a given position.
Parameters
----------
x : ndarray
Input array.
xdim : int, optional
Dimension at which to put the back "x" input array dimension.
(default=-1)
Returns
-------
y : ndarray
view of x transposed
"""
num_dims_x = len(x.shape)
if xdim < 0:
xdim = num_dims_x + xdim
return x.transpose(*(list(range(xdim)) + [num_dims_x-1] + \
list(range(xdim, num_dims_x-1))))
# Index into Resampler object type switchyard is
# (signal type complex, coefficient type complex) booleans
_SWITCH_YARD = {
(False, False): ResamplerRR,
(False, True): ResamplerRC,
(True, False): ResamplerCR,
(True, True): ResamplerCC
}
def klass_lookup(signal=1., coefficients=1.):
"""Return Resampler type based on input signal and coefficient objects.
"""
klass = _SWITCH_YARD[(np.iscomplexobj(signal), \
np.iscomplexobj(coefficients))]
return klass
class ResamplerBank(object):
"""
A bank of Resampler objects.
"""
def __init__(self, x, h, uprate=1, downrate=1, xdim=-1, hdim=-1):
"""
Construct the ResamplerBank object.
Parameters
----------
x : array-like
Input signal array. May be multi-dimensional (ND). The signals
will be operated on along the "xdim" dimension of x.
This is needed to determine how many Resamplers need to be created,
since each one needs to retain state.
h : array-like
FIR (finite-impulse response) filter coefficients array. May be ND.
The filters are along the "hdim" dimension of h.
uprate : int, optional
Upsampling rate. (default=1)
downrate : int, optional
Downsampling rate. (default=1)
xdim : int, optional
Dimension for "x" input signal array. (default=-1)
hdim : int, optional
Dimension for "h" coefficient array. (default=-1)
"""
x = np.atleast_1d(x)
h = np.atleast_1d(h)
klass = klass_lookup(x, h)
x = dim2back(x, xdim)
h = dim2back(h, hdim)
xi = full_index(x)
xi[-1] = xi[-1][0:1]
# xx is ignored
xx, hh = np.broadcast_arrays(x[xi], h)
self.hh = hh
bank = np.zeros(self.hh.shape[:-1], dtype=object)
for idx, hi in enumdims(self.hh, (-1,), complement=True):
bank[idx] = klass(uprate, downrate, hi)
self.bank = bank
self.r0 = self.bank.flat[0]
self.coefs_per_phase = (h.shape[-1] + uprate - 1) // uprate
self.xdim = xdim
if np.iscomplexobj(x) or np.iscomplexobj(h):
self.output_type = complex
else:
self.output_type = float
def apply(self, x, all_samples=False):
"""
Upsample, FIR filter, and downsample a signal or array of signals using
the bank of Resampler objects.
Parameters
----------
x : array-like
Input signal array. May be multi-dimensional (ND). The signals
will be operated on along the "xdim" dimension of x.
all_samples : bool, optional
If True, feeds in zeros after the input signal to "drain" the
resampler and get all the non-zero samples. (default=True)
Returns
-------
y : float ndarray
"""
x = np.atleast_1d(x)
x = dim2back(x, self.xdim)
# htemp is ignored
xx, htemp = np.broadcast_arrays(x, self.hh[..., 0:1])
in_count = xx.shape[-1]
if all_samples:
in_count += self.coefs_per_phase-1
z = np.zeros((self.coefs_per_phase-1,))
needed_out_count = self.r0.neededOutCount(in_count)
y = np.zeros(xx.shape[:-1] + (needed_out_count,), \
dtype=self.output_type)
for idx, xi in enumdims(xx, (-1,), complement=True):
out_count = self.bank[idx].apply(xi, y[idx])
if all_samples:
self.bank[idx].apply(z, y[idx][out_count:])
return back2dim(y, self.xdim)
def upfirdn(x, h, uprate=1, downrate=1, xdim=-1, hdim=-1, all_samples=True):
"""
Upsample, FIR filter, and downsample a signal or array of signals.
Parameters
----------
x : array-like
Input signal array. May be multi-dimensional (ND). The signals
will be operated on along the "xdim" dimension of x.
h : array-like
FIR (finite-impulse response) filter coefficients array. May be ND.
The filters are along the "hdim" dimension of h.
uprate : int, optional
Upsampling rate. (default=1)
downrate : int, optional
Downsampling rate. (default=1)
xdim : int, optional
Dimension for "x" input signal array. (default=-1)
hdim : int, optional
Dimension for "h" coefficient array. (default=-1)
all_samples : bool, optional
If True, feeds in zeros after the input signal to "drain" the resampler
and get all the non-zero samples. (default=True)
Returns
-------
y : float ndarray
The output signal array. The results of each upfirdn operation are
along the "xdim" dimension; the array is discontinuous if xdim is not
the last dimension.
Notes
-----
The standard rules of broadcasting apply to the input ND arrays x and h,
for those dimensions other than the "sample" dimension specified by
xdim and hdim. upfirdn operates along a single dimension, and
supports multiple such operations for all the other dimensions using
broadcasting; this allows you to, for example, operate on multiple signal
columns with a single filter, or apply multiple filters to a single signal.
The uprate and downrate however are scalar and apply to ALL operations.
In the case of ND, the most efficient choice of xdim is -1, that is, the
last dimension (assuming C-style input x); otherwise each signal is copied
prior to operating.
Examples
--------
>>> upfirdn([1,1,1], [1,1,1]) # FIR filter
array([ 1., 2., | |
<reponame>apaszke/jax<filename>jax/experimental/general_map.py<gh_stars>1-10
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import threading
import contextlib
from collections import namedtuple
from typing import Callable, Iterable, List, Tuple, Optional, Dict, Any
from warnings import warn
from functools import wraps, partial
import jax
from .. import numpy as jnp
from .. import core
from .. import linear_util as lu
from ..api import _mapped_axis_size, _check_callable, _check_arg
from ..tree_util import tree_flatten, tree_unflatten
from ..api_util import flatten_fun
from ..interpreters import partial_eval as pe
from ..interpreters import batching
from ..util import safe_map, safe_zip, curry
map = safe_map
zip = safe_zip
# Multi-dimensional generalized map
# TODO: Use a more lax type annotation (we need == and hash)
AxisName = str
ResourceAxisName = str
# TODO: At least support sequential mapping
class ResourceEnv(threading.local):
def __init__(self):
self.axes : Dict[ResourceAxisName, int] = {}
thread_resource_env = ResourceEnv()
@contextlib.contextmanager
def resources(**axes):
old_axes = thread_resource_env.axes
thread_resource_env.axes = axes
try:
yield
finally:
thread_resource_env.axes = old_axes
# This is really a Dict[AxisName, int], but we don't define a
# pytree instance for it, so that it is treated as a leaf.
class AxisNamePos(dict):
pass
A = AxisNamePos
# TODO: Some syntactic sugar to make the API more usable in a single-axis case?
# TODO: Are the resource axes scoped lexically or dynamically? Dynamically for now!
def xmap(fun: Callable,
in_axes, # PyTree[AxisNamePos]
out_axes, # PyTree[AxisNamePos],
schedule: Iterable[Tuple[AxisName, ResourceAxisName]]):
warn("xmap is an experimental feature and probably has bugs!")
_check_callable(fun)
def fun_mapped(*args, **kwargs):
args_flat, in_tree = tree_flatten((args, kwargs))
for arg in args_flat: _check_arg(arg)
# TODO: Check that:
# - every scheduled axis name appears in at least one input
# - every used resource axis name appears in the resource env
# - every axis name is scheduled to a single resource axis only once
# - every out axis has a distinct index
# - two axes mapped to the same resource never coincide (even inside f)
in_axes_flat, in_axes_tree = tree_flatten(in_axes)
# TODO: Verify that in_axes are equal, or better expand their prefix
# assert in_axes_tree == in_tree
out_axes_flat, out_axes_tree = tree_flatten(out_axes)
# TODO: Verify that out_axes are equal, or better expand their prefix
# assert out_axes_tree == in_tree
resource_to_axis: Dict[ResourceAxisName, List[AxisName]] = dict()
for (axis, resource) in schedule:
if resource not in resource_to_axis:
resource_to_axis[resource] = []
resource_to_axis[resource].append(axis)
# TODO: The order of maps should be derived from the schedule, not from the
# resource env. This doesn't really matter for as long as we only support
# vmap, but will be important (e.g. for tiling).
# We should be able to do that by building a graph of dependencies between
# resources based on the order in which they appear within each axis.
# If it has cycles then we cannot realize it. Otherwise, if the DAG doesn't
# uniquely identify a linear order, we should use the order of entries in
# the schedule to break ties.
resource_map = {resource: (pri, size)
for pri, (resource, size) in enumerate(thread_resource_env.axes.items())}
resource_map['vectorize'] = (len(resource_map), None)
map_sequence = sorted(resource_to_axis.items(),
key=lambda item: resource_map[item[0]][0])
axis_subst = {}
for axis, resource in schedule:
if axis not in axis_subst:
axis_subst[axis] = []
if resource == 'vectorize':
resource = f'v_{axis}'
else:
resource = f'r_{resource}'
axis_subst[axis].append(resource)
axis_subst = {axis: tuple(resources) for axis, resources in axis_subst.items()}
axis_sizes = _get_axis_sizes(args_flat, in_axes_flat)
jaxpr, out_tree = _trace_mapped_jaxpr(fun, args_flat, in_axes_flat, axis_sizes, in_tree)
jaxpr = jaxpr.map_jaxpr(partial(subst_axis_names, axis_subst=axis_subst))
f = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
f = hide_mapped_axes(f, in_axes_flat, out_axes_flat)
for resource, resource_axes in map_sequence[::-1]:
# TODO: Support sequential
# XXX: Even though multiple axes might be mapped to the 'vectorized'
# resource, we cannot vectorize them jointly, because they
# might require different axis sizes.
if resource == 'vectorize':
maps = [(f'v_{name}', [name]) for i, name in enumerate(resource_axes)]
else:
maps = [(f'r_{resource}', resource_axes)]
for raxis_name, axes in maps:
map_in_axes = map(lambda spec: lookup_exactly_one_of(spec, axes), in_axes_flat)
map_out_axes = map(lambda spec: lookup_exactly_one_of(spec, axes), out_axes_flat)
map_size = resource_map[resource][1]
f = vtile(f, map_in_axes, map_out_axes, tile_size=map_size, axis_name=raxis_name)
flat_out = f.call_wrapped(*args_flat)
return tree_unflatten(out_tree, flat_out)
return fun_mapped
def _delete_aval_axes(aval, axes: AxisNamePos):
assert isinstance(aval, core.ShapedArray)
shape = list(aval.shape)
for i in sorted(axes.values(), reverse=True):
del shape[i]
return core.ShapedArray(tuple(shape), aval.dtype)
def _with_axes(axes: Iterable[Tuple[AxisName, int]], f):
for name, size in axes:
f = core.extend_axis_env(name, size, None)(f)
return f()
def _trace_mapped_jaxpr(fun,
args_flat,
in_axes_flat: List[AxisNamePos],
axis_sizes: Dict[AxisName, int],
in_tree):
fun_flat, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)
avals_flat = [core.raise_to_shaped(core.get_aval(arg)) for arg in args_flat]
mapped_pvals = [pe.PartialVal.unknown(_delete_aval_axes(aval, in_axes))
for aval, in_axes in zip(avals_flat, in_axes_flat)]
jaxpr, _, consts = _with_axes(axis_sizes.items(),
lambda: pe.trace_to_jaxpr(fun_flat, mapped_pvals))
return core.ClosedJaxpr(jaxpr, consts), out_tree()
def _get_axis_sizes(args_flat: Iterable[Any], in_axes_flat: Iterable[AxisNamePos]):
axis_sizes: Dict[AxisName, int] = {}
for arg, in_axes in zip(args_flat, in_axes_flat):
for name, dim in in_axes.items():
if name in axis_sizes:
assert axis_sizes[name] == arg.shape[dim]
else:
axis_sizes[name] = arg.shape[dim]
return axis_sizes
def lookup_exactly_one_of(d: AxisNamePos, names: List[AxisName]) -> Optional[int]:
res = None
for name in names:
if name in d:
if res is not None:
raise ValueError("An input was mapped to the same resource twice")
res = d[name]
return res
def _squeeze_mapped_axes(arg, axes: AxisNamePos):
for dim in sorted(axes.values(), reverse=True):
arg = arg.squeeze(dim)
return arg
def _unsqueeze_mapped_axes(out, axes: AxisNamePos):
for dim in sorted(axes.values()):
out = jnp.expand_dims(out, dim)
return out
@lu.transformation
def hide_mapped_axes(flat_in_axes, flat_out_axes, *flat_args):
squeezed_args = map(_squeeze_mapped_axes, flat_args, flat_in_axes)
flat_outputs = yield squeezed_args, {}
yield map(_unsqueeze_mapped_axes, flat_outputs, flat_out_axes)
@curry
def tile_axis(arg, axis: Optional[int], tile_size):
if axis is None:
return arg
shape = list(arg.shape)
shape[axis:axis+1] = [tile_size, shape[axis] // tile_size]
return arg.reshape(shape)
def untile_axis(out, axis: Optional[int]):
if axis is None:
return out
shape = list(out.shape)
shape[axis:axis+2] = [shape[axis] * shape[axis+1]]
return out.reshape(shape)
# NOTE: This divides the in_axes by the tile_size and multiplies the out_axes by it.
def vtile(f_flat, in_axes_flat, out_axes_flat, tile_size: Optional[int], axis_name):
@lu.transformation
def _map_to_tile(*args_flat):
real_tile_size = tile_size
for arg, in_axis in zip(args_flat, in_axes_flat):
if real_tile_size is not None:
break
if in_axis is None:
continue
real_tile_size = arg.shape[in_axis]
assert real_tile_size is not None, "No mapped arguments?"
outputs_flat = yield map(tile_axis(tile_size=real_tile_size), args_flat, in_axes_flat), {}
yield map(untile_axis, outputs_flat, out_axes_flat)
return _map_to_tile(
batching.batch_fun(f_flat,
in_axes_flat,
out_axes_flat,
axis_name=axis_name))
# Single-dimensional generalized map
def gmap(fun: Callable, schedule, axis_name = None) -> Callable:
warn("gmap is an experimental feature and probably has bugs!")
_check_callable(fun)
binds_axis_name = axis_name is not None
axis_name = core._TempAxisName(fun) if axis_name is None else axis_name
@wraps(fun)
def f_gmapped(*args, **kwargs):
f = lu.wrap_init(fun)
args_flat, in_tree = tree_flatten((args, kwargs))
mapped_invars = (True,) * len(args_flat)
axis_size = _mapped_axis_size(in_tree, args_flat, (0,) * len(args_flat), "gmap")
parsed_schedule = _normalize_schedule(schedule, axis_size, binds_axis_name)
for arg in args_flat: _check_arg(arg)
flat_fun, out_tree = flatten_fun(f, in_tree)
outs = gmap_p.bind(
flat_fun, *args_flat,
axis_name=axis_name,
axis_size=axis_size,
mapped_invars=mapped_invars,
schedule=parsed_schedule,
binds_axis_name=binds_axis_name)
return tree_unflatten(out_tree(), outs)
return f_gmapped
class LoopType(enum.Enum):
vectorized = enum.auto()
parallel = enum.auto()
sequential = enum.auto()
Loop = namedtuple('Loop', ['type', 'size'])
def _normalize_schedule(schedule, axis_size, binds_axis_name):
if not schedule:
raise ValueError("gmap expects a non-empty schedule")
scheduled = 1
seen_none = False
for loop in schedule:
if loop[1] is not None:
scheduled *= loop[1]
elif seen_none:
raise ValueError("gmap schedule can only contain at most a single None size specification")
else:
seen_none = True
unscheduled = axis_size // scheduled
new_schedule = []
for i, loop in enumerate(schedule):
loop_type = _parse_name(loop[0])
if loop_type is LoopType.vectorized and i < len(schedule) - 1:
raise ValueError("vectorized loops can only appear as the last component of the schedule")
if loop_type is LoopType.sequential and binds_axis_name:
raise ValueError("gmaps that bind a new axis name cannot have sequential components in the schedule")
new_schedule.append(Loop(loop_type, loop[1] or unscheduled))
return tuple(new_schedule)
def _parse_name(name):
if isinstance(name, LoopType):
return name
try:
return LoopType[name]
except KeyError as err:
raise ValueError(f"Unrecognized loop type: {name}") from err
def gmap_impl(fun: lu.WrappedFun, *args, axis_size, axis_name, binds_axis_name, mapped_invars, schedule):
avals = [core.raise_to_shaped(core.get_aval(arg)) for arg in args]
scheduled_fun = _apply_schedule(fun, axis_size, axis_name, binds_axis_name,
mapped_invars, schedule, *avals)
return scheduled_fun(*args)
class _GMapSubaxis:
def __init__(self, axis_name, index):
self.axis_name = axis_name
self.index = index
def __repr__(self):
return f'<subaxis {self.index} of {self.axis_name}>'
def __hash__(self):
return hash((self.axis_name, self.index))
def __eq__(self, other):
return (isinstance(other, | |
'b'],
['solare', 'noun', 'b'],
['solco', 'noun', 'b'],
['soldato', 'noun', 'a'],
['soldo', 'noun', 'a'],
['sole', 'noun', 'a'],
['solenne', 'adjective', 'b'],
['solidarietà', 'noun', 'b'],
['solido', 'adjective', 'b'],
['solido', 'noun', 'b'],
['solitamente', 'adverb', 'b'],
['solitario', 'adjective', 'b'],
['solitario', 'noun', 'b'],
['solito', 'adjective', 'a'],
['solito', 'noun', 'a'],
['solitudine', 'noun', 'b'],
['solletico', 'noun', 'c'],
['sollevare', 'verb', 'a'],
['sollievo', 'noun', 'b'],
['solo', 'adjective', 'a'],
['solo', 'noun', 'a'],
['solo', 'adverb', 'a'],
['solo', 'conjunction', 'a'],
['soltanto', 'adverb', 'a'],
['soltanto', 'conjunction', 'a'],
['soluzione', 'noun', 'a'],
['somigliare', 'verb', 'b'],
['somma', 'noun', 'a'],
['sommare', 'verb', 'b'],
['sondaggio', 'noun', 'a'],
['sonno', 'noun', 'a'],
['sonoro', 'adjective', 'b'],
['sonoro', 'noun', 'b'],
['soppalco', 'noun', 'c'],
['sopportare', 'verb', 'a'],
['sopra', 'preposition', 'a'],
['sopra', 'adverb', 'a'],
['sopra', 'adjective', 'a'],
['sopra', 'noun', 'a'],
['soprabito', 'noun', 'c'],
['sopracciglio', 'noun', 'c'],
['soprammobile', 'noun', 'c'],
['soprannome', 'noun', 'c'],
['soprattutto', 'adverb', 'a'],
['sopravvalutare', 'verb', 'c'],
['sopravvivenza', 'noun', 'b'],
['sopravvivere', 'verb', 'a'],
['sorcio', 'noun', 'c'],
['sordo', 'adjective', 'b'],
['sordo', 'noun', 'b'],
['sorella', 'noun', 'a'],
['sorgente', 'pres_part', 'b'],
['sorgente', 'adjective', 'b'],
['sorgente', 'noun', 'b'],
['sorgere', 'verb', 'b'],
['sorpassare', 'verb', 'c'],
['sorpasso', 'noun', 'c'],
['sorprendente', 'pres_part', 'b'],
['sorprendente', 'adjective', 'b'],
['sorprendere', 'verb', 'b'],
['sorpresa', 'noun', 'a'],
['sorridente', 'pres_part', 'c'],
['sorridente', 'adjective', 'c'],
['sorridere', 'verb', 'a'],
['sorriso', 'noun', 'a'],
['sorso', 'noun', 'c'],
['sorta', 'noun', 'a'],
['sorte', 'noun', 'b'],
['sorteggiare', 'verb', 'c'],
['sorteggio', 'noun', 'c'],
['sorvegliare', 'verb', 'b'],
['sospendere', 'verb', 'b'],
['sospensione', 'noun', 'b'],
['sospeso', 'past_part', 'b'],
['sospeso', 'adjective', 'b'],
['sospeso', 'noun', 'b'],
['sospettare', 'verb', 'b'],
['sospetto', 'noun', 'a'],
['sospetto', 'adjective', 'a'],
['sospetto', 'noun', 'a'],
['sospirare', 'verb', 'b'],
['sospiro', 'noun', 'b'],
['sosta', 'noun', 'b'],
['sostanza', 'noun', 'a'],
['sostanzialmente', 'adverb', 'b'],
['sostare', 'verb', 'c'],
['sostegno', 'noun', 'b'],
['sostenere', 'verb', 'a'],
['sostenitore', 'adjective', 'b'],
['sostenitore', 'noun', 'b'],
['sostituire', 'verb', 'a'],
['sostituzione', 'noun', 'b'],
['sottaceto', 'adjective', 'c'],
['sottaceto', 'adverb', 'c'],
['sottaceto', 'noun', 'c'],
['sotterraneo', 'adjective', 'b'],
['sotterraneo', 'noun', 'b'],
['sottile', 'adjective', 'a'],
['sottile', 'noun', 'a'],
['sottile', 'adverb', 'a'],
['sottinteso', 'past_part', 'c'],
['sottinteso', 'adjective', 'c'],
['sottinteso', 'noun', 'c'],
['sotto', 'preposition', 'a'],
['sotto', 'adverb', 'a'],
['sotto', 'adjective', 'a'],
['sotto', 'noun', 'a'],
['sottofondo', 'noun', 'b'],
['sottolineare', 'verb', 'a'],
['sottolio', 'adverb', 'c'],
['sottolio', 'adjective', 'c'],
['sottomarino', 'adjective', 'c'],
['sottomarino', 'noun', 'c'],
['sottopassaggio', 'noun', 'c'],
['sottoporre', 'verb', 'a'],
['sottoscrivere', 'verb', 'b'],
['sottovalutare', 'verb', 'b'],
['sottrarre', 'verb', 'b'],
['sovietico', 'adjective', 'b'],
['sovietico', 'noun', 'b'],
['sovrano', 'adjective', 'b'],
['sovrano', 'noun', 'b'],
['sovrapporre', 'verb', 'b'],
['spaccare', 'verb', 'b'],
['spaccatura', 'noun', 'c'],
['spacciare', 'verb', 'b'],
['spacciatore', 'noun', 'c'],
['spaccio', 'noun', 'c'],
['spada', 'noun', 'b'],
['spaghetto', 'noun', 'b'],
['spagnolo', 'adjective', 'a'],
['spagnolo', 'noun', 'a'],
['spago', 'noun', 'c'],
['spalancare', 'verb', 'b'],
['spalla', 'noun', 'a'],
['spalmabile', 'adjective', 'c'],
['spalmare', 'verb', 'c'],
['spam', 'noun', 'b'],
['sparare', 'verb', 'a'],
['sparecchiare', 'verb', 'c'],
['spargere', 'verb', 'b'],
['sparire', 'verb', 'a'],
['sparo', 'noun', 'b'],
['sparso', 'past_part', 'b'],
['sparso', 'adjective', 'b'],
['spassare', 'verb', 'b'],
['spasso', 'noun', 'c'],
['spavaldo', 'adjective', 'c'],
['spaventare', 'verb', 'a'],
['spaventato', 'past_part', 'b'],
['spaventato', 'adjective', 'b'],
['spaventoso', 'adjective', 'b'],
['spaziale', 'adjective', 'b'],
['spazio', 'noun', 'a'],
['spazioso', 'adjective', 'c'],
['spazzare', 'verb', 'b'],
['spazzatura', 'noun', 'b'],
['spazzino', 'noun', 'c'],
['spazzola', 'noun', 'c'],
['spazzolare', 'verb', 'c'],
['spazzolino', 'noun', 'c'],
['spazzolone', 'noun', 'c'],
['specchiarsi', 'verb', 'c'],
['specchio', 'noun', 'a'],
['speciale', 'adjective', 'a'],
['speciale', 'noun', 'a'],
['specialista', 'noun', 'b'],
['specializzato', 'past_part', 'b'],
['specializzato', 'adjective', 'b'],
['specializzato', 'noun', 'b'],
['specialmente', 'adverb', 'b'],
['specie', 'noun', 'a'],
['specie', 'adverb', 'a'],
['specificare', 'verb', 'b'],
['specifico', 'adjective', 'a'],
['specifico', 'noun', 'a'],
['speck', 'noun', 'c'],
['spedire', 'verb', 'b'],
['spedizione', 'noun', 'b'],
['spegnere', 'verb', 'a'],
['spellare', 'verb', 'c'],
['spendere', 'verb', 'a'],
['spennare', 'verb', 'c'],
['spensierato', 'adjective', 'c'],
['spento', 'past_part', 'b'],
['spento', 'adjective', 'b'],
['speranza', 'noun', 'a'],
['sperare', 'verb', 'a'],
['sperimentale', 'adjective', 'b'],
['sperimentare', 'verb', 'b'],
['sperimentazione', 'noun', 'b'],
['sperone', 'noun', 'c'],
['spesa', 'noun', 'a'],
['spesso', 'adjective', 'b'],
['spesso', 'adverb', 'a'],
['spessore', 'noun', 'b'],
['spettacolare', 'adjective', 'b'],
['spettacolo', 'noun', 'a'],
['spettare', 'verb', 'b'],
['spettatore', 'noun', 'b'],
['spettinare', 'verb', 'c'],
['spettro', 'noun', 'b'],
['spezia', 'noun', 'c'],
['spezzare', 'verb', 'b'],
['spia', 'noun', 'b'],
['spiacere', 'verb', 'b'],
['spiaggia', 'noun', 'a'],
['spianare', 'verb', 'c'],
['spiare', 'verb', 'b'],
['spiazzo', 'noun', 'c'],
['spiccare', 'verb', 'b'],
['spicciolo', 'adjective', 'c'],
['spicciolo', 'noun', 'c'],
['spiedino', 'noun', 'c'],
['spiedo', 'noun', 'c'],
['spiegare', 'verb', 'a'],
['spiegazione', 'noun', 'a'],
['spietato', 'adjective', 'b'],
['spiga', 'noun', 'c'],
['spigolo', 'noun', 'c'],
['spillo', 'noun', 'c'],
['spina', 'noun', 'b'],
['spinacio', 'noun', 'c'],
['spingere', 'verb', 'a'],
['spinta', 'noun', 'b'],
['spionaggio', 'noun', 'c'],
['spirito', 'noun', 'a'],
['spiritoso', 'adjective', 'c'],
['spirituale', 'adjective', 'b'],
['spirituale', 'noun', 'b'],
['splendente', 'pres_part', 'c'],
['splendente', 'adjective', 'c'],
['splendere', 'verb', 'b'],
['splendido', 'adjective', 'b'],
['splendore', 'noun', 'b'],
['spogliare', 'verb', 'b'],
['spogliatoio', 'noun', 'c'],
['spoglio', 'noun', 'c'],
['spolverare', 'verb', 'c'],
['sponda', 'noun', 'b'],
['spontaneo', 'adjective', 'b'],
['sporcare', 'verb', 'b'],
['sporcizia', 'noun', 'c'],
['sporco', 'adjective', 'a'],
['sporco', 'noun', 'a'],
['sporgente', 'pres_part', 'c'],
['sporgente', 'adjective', 'c'],
['sporgente', 'noun', 'c'],
['sporgere', 'verb', 'b'],
['sport', 'noun', 'a'],
['sport', 'adjective', 'a'],
['sportello', 'noun', 'b'],
['sportivo', 'adjective', 'a'],
['sportivo', 'noun', 'a'],
['sposare', 'verb', 'a'],
['sposato', 'past_part', 'b'],
['sposato', 'adjective', 'b'],
['sposato', 'noun', 'b'],
['sposo', 'noun', 'b'],
['spostamento', 'noun', 'b'],
['spostare', 'verb', 'a'],
['spot', 'noun', 'b'],
['spranga', 'noun', 'c'],
['spray', 'adjective', 'c'],
['spray', 'noun', 'c'],
['sprecare', 'verb', 'b'],
['spreco', 'noun', 'c'],
['spremere', 'verb', 'c'],
['spremuta', 'noun', 'c'],
['sprofondare', 'verb', 'b'],
['sproposito', 'noun', 'c'],
['spruzzare', 'verb', 'c'],
['spuma', 'noun', 'c'],
['spumante', 'pres_part', 'c'],
['spumante', 'adjective', 'c'],
['spumante', 'noun', 'c'],
['spuntare', 'verb', 'b'],
['spuntino', 'noun', 'c'],
['spunto', 'noun', 'b'],
['sputare', 'verb', 'b'],
['sputo', 'noun', 'c'],
['squadra', 'noun', 'a'],
['squallido', 'adjective', 'c'],
['squalo', 'noun', 'c'],
['squarcio', 'noun', 'c'],
['squillare', 'verb', 'b'],
['squisito', 'adjective', 'c'],
['stabile', 'adjective', 'b'],
['stabile', 'noun', 'b'],
['stabilire', 'verb', 'a'],
['stabilità', 'noun', 'b'],
['staccare', 'verb', 'a'],
['stacco', 'noun', 'c'],
['stadio', 'noun', 'b'],
['staffa', 'noun', 'c'],
['stagione', 'noun', 'a'],
['stagno', 'noun', 'c'],
['stalla', 'noun', 'b'],
['stallone', 'noun', 'c'],
['stamattina', 'adverb', 'b'],
['stampa', 'noun', 'a'],
['stampare', 'verb', 'b'],
['stampatello', 'noun', 'c'],
['stampato', 'past_part', 'b'],
['stampato', 'adjective', 'b'],
['stampato', 'noun', 'b'],
['stampella', 'noun', 'c'],
['stampo', 'noun', 'c'],
['stancare', 'verb', 'b'],
['stanchezza', 'noun', 'b'],
['stanco', 'adjective', 'a'],
['standard', 'noun', 'b'],
['standard', 'adjective', 'b'],
['stanga', 'noun', 'c'],
['stanotte', 'adverb', 'b'],
['stanza', 'noun', 'a'],
['star', 'noun', 'b'],
['stare', 'verb', 'a'],
['stasera', 'adverb', 'a'],
['statale', 'adjective', 'b'],
['statale', 'noun', 'b'],
['statistica', 'noun', 'b'],
['statistico', 'adjective', 'b'],
['statistico', 'noun', 'b'],
['stato', 'noun', 'a'],
['stato', 'noun', 'a'],
['statua', 'noun', 'b'],
['statunitense', 'adjective', 'b'],
['statunitense', 'noun', 'b'],
['status', 'noun', 'b'],
['stavolta', 'adverb', 'b'],
['stazione', 'noun', 'a'],
['stella', 'noun', 'a'],
['stellare', 'adjective', 'b'],
['stendere', 'verb', 'b'],
['stendibiancheria', 'noun', 'c'],
['stereo', 'adjective', 'c'],
['stereo', 'noun', 'c'],
['sterlina', 'noun', 'b'],
['sterzare', 'verb', 'c'],
['sterzo', 'noun', 'c'],
['stesso', 'adjective', 'a'],
['stesso', 'pronoun', 'a'],
['stile', 'noun', 'a'],
['stima', 'noun', 'b'],
['stimare', 'verb', 'b'],
['stimolare', 'verb', 'b'],
['stimolo', 'noun', 'b'],
['stinco', 'noun', 'c'],
['stipendiare', 'verb', 'c'],
['stipendio', 'noun', 'a'],
['stirare', 'verb', 'b'],
['stivaletto', 'noun', 'c'],
['stoffa', 'noun', 'b'],
['stomaco', 'noun', 'b'],
['stonare', 'verb', 'c'],
['stop', 'loc-comando', 'c'],
['stop', 'noun', 'c'],
['stoppa', 'noun', 'c'],
['storcere', 'verb', 'c'],
['storia', 'noun', 'a'],
['storico', 'adjective', 'a'],
['storico', 'noun', 'a'],
['stornello', 'noun', 'c'],
['storta', 'noun', 'c'],
['storto', 'past_part', 'b'],
['storto', 'adjective', 'b'],
['storto', 'adverb', 'b'],
['storto', 'noun', 'b'],
['stoviglia', 'noun', 'c'],
['stracchino', 'noun', 'c'],
['straccio', 'noun', 'b'],
['strada', 'noun', 'a'],
['stradale', 'adjective', 'b'],
['stradale', 'noun', 'b'],
['strage', 'noun', 'b'],
['strangolare', 'verb', 'c'],
['straniero', 'adjective', 'a'],
['straniero', 'noun', 'a'],
['strano', 'adjective', 'a'],
['straordinario', 'adjective', 'a'],
['straordinario', 'noun', 'a'],
['strappare', 'verb', 'b'],
['strategia', 'noun', 'a'],
['strategico', 'adjective', 'b'],
['strato', 'noun', 'b'],
['strega', 'noun', 'a'],
['stregare', 'verb', 'b'],
['stregone', 'noun', 'c'],
['stress', 'noun', 'b'],
['stretta', 'noun', 'b'],
['strettamente', 'adverb', 'b'],
['stretto', 'past_part', 'a'],
['stretto', 'adjective', 'a'],
['stretto', 'noun', 'a'],
['strillare', 'verb', 'b'],
['strillo', 'noun', 'c'],
['stringa', 'noun', 'c'],
['stringere', 'verb', 'a'],
['striscia', 'noun', 'b'],
['strisciare', 'verb', 'b'],
['strofinaccio', 'noun', 'c'],
['stronzata', 'noun', 'b'],
['stronzo', 'noun', 'a'],
['stronzo', 'adjective', 'a'],
['strumento', 'noun', 'a'],
['strutto', 'past_part', 'c'],
['strutto', 'adjective', 'c'],
['strutto', 'noun', 'c'],
['struttura', 'noun', 'a'],
['strutturale', 'adjective', 'b'],
['struzzo', 'noun', 'c'],
['studente', 'noun', 'a'],
['studiare', 'verb', 'a'],
['studio', 'noun', 'a'],
['studioso', 'adjective', 'b'],
['studioso', 'noun', 'b'],
['stufa', 'noun', 'c'],
['stuoia', 'noun', 'c'],
['stupefacente', 'pres_part', 'b'],
['stupefacente', 'adjective', 'b'],
['stupefacente', 'noun', 'b'],
['stupendo', 'adjective', 'b'],
['stupido', 'adjective', 'a'],
['stupido', 'noun', 'a'],
['stupire', 'verb', 'b'],
['stupito', 'past_part', 'b'],
['stupito', 'adjective', 'b'],
['stupore', 'noun', 'b'],
['stuzzicadenti', 'noun', 'c'],
['stuzzicare', 'verb', 'c'],
['style', 'noun', 'b'],
['su', 'preposition', 'a'],
['su', 'adverb', 'a'],
['su', 'exclamation', 'a'],
['su', 'noun', 'a'],
['subire', 'verb', 'a'],
['subito', 'adverb', 'a'],
['succedere', 'verb', 'a'],
['successione', 'noun', 'b'],
['successivamente', 'adverb', 'b'],
['successivo', 'adjective', 'a'],
['successo', 'noun', 'a'],
['succhiare', 'verb', 'b'],
['succo', 'noun', 'b'],
['sud', 'noun', 'a'],
['sud', 'adjective', 'a'],
['sudamericano', 'adjective', 'c'],
['sudamericano', 'noun', 'c'],
['sudare', 'verb', 'b'],
['sudato', 'past_part', 'c'],
['sudato', 'adjective', 'c'],
['suddito', 'noun', 'b'],
['suddito', 'adjective', 'b'],
['suddividere', 'verb', 'b'],
['sudicio', 'adjective', 'c'],
['sudicio', 'noun', 'c'],
['sudore', 'noun', 'b'],
['sudtirolese', 'adjective', 'c'],
['sudtirolese', 'noun', 'c'],
['sufficiente', 'adjective', 'a'],
['suggerimento', 'noun', 'b'],
['suggerire', 'verb', 'a'],
['suggestivo', 'adjective', 'b'],
['sughero', 'noun', 'c'],
['sugo', 'noun', 'b'],
['suicidio', 'noun', 'b'],
['suino', 'noun', 'c'],
['suino', 'adjective', 'c'],
['suo', 'adjective', 'a'],
['suo', 'pronoun', 'a'],
['suocera', 'noun', 'c'],
['suocero', 'noun', 'c'],
['suola', 'noun', 'c'],
['suolo', 'noun', 'b'],
['suonare', 'verb', 'a'],
['suono', 'noun', 'a'],
['suora', 'noun', 'a'],
['super', 'adjective', 'b'],
['super', 'noun', 'b'],
['superare', 'verb', 'a'],
['superbia', 'noun', 'c'],
['superficiale', 'adjective', 'b'],
['superficie', 'noun', 'a'],
['superiore', 'adjective', 'a'],
['superiore', 'noun', 'a'],
['supermercato', 'noun', 'b'],
['supporre', 'verb', 'b'],
['supportare', 'verb', 'b'],
['supporto', 'noun', 'a'],
['supremo', 'adjective', 'b'],
['surgelato', 'past_part', 'c'],
['surgelato', 'adjective', 'c'],
['surgelato', 'noun', 'c'],
['suscitare', 'verb', 'b'],
['susina', 'noun', 'c'],
['susino', 'noun', 'c'],
['susseguirsi', 'verb', 'c'],
['sussurrare', 'verb', 'b'],
['svanire', 'verb', 'b'],
['svedese', 'adjective', 'c'],
['svedese', 'noun', 'c'],
['sveglia', 'noun', 'c'],
['svegliare', 'verb', 'a'],
['svegliarsi', 'verb', 'c'],
['sveglio', 'past_part', 'b'],
['sveglio', 'adjective', 'b'],
['svelare', 'verb', 'b'],
['svelto', 'adjective', 'c'],
['svenire', 'verb', 'b'],
['sventola', 'noun', 'c'],
['sviluppare', 'verb', 'a'],
['sviluppato', 'past_part', 'b'],
['sviluppato', 'adjective', 'b'],
['sviluppo', 'noun', 'a'],
['svizzero', 'adjective', 'b'],
['svizzero', 'noun', 'b'],
['svolazzare', 'verb', 'c'],
['svolgere', 'verb', 'a'],
['svolgimento', 'noun', 'c'],
['svolta', 'noun', 'b'],
['svuotare', 'verb', 'b'],
['tabaccaio', 'noun', 'c'],
['tabella', 'noun', 'b'],
['tacca', 'noun', 'c'],
['tacchino', 'noun', 'c'],
['tacco', 'noun', 'b'],
['tacere', 'verb', 'a'],
['tacere', 'noun', 'a'],
['tag', 'noun', 'b'],
['taglia', 'noun', 'b'],
['tagliare', 'verb', 'a'],
['tagliatella', 'noun', 'c'],
['tagliato', 'past_part', 'b'],
['tagliato', 'adjective', 'b'],
['tagliere', 'noun', 'c'],
['taglio', 'noun', 'a'],
['tagliola', 'noun', 'c'],
['talco', 'noun', 'c'],
['tale', 'adjective', 'a'],
['tale', 'pronoun', 'a'],
['tale', 'adverb', 'a'],
['taleggio', 'noun', 'c'],
['talento', 'noun', 'b'],
['talmente', 'adverb', 'a'],
['talpa', 'noun', 'c'],
['talpa', 'adjective', 'c'],
['talpa', 'noun', 'c'],
['talvolta', 'adverb', 'b'],
['tamburo', 'noun', 'c'],
['tamponare', 'verb', 'c'],
['tangente', 'pres_part', 'b'],
['tangente', 'adjective', 'b'],
['tangente', 'noun', 'b'],
['tanto', 'adjective', 'a'],
['tanto', 'pronoun', 'a'],
['tanto', 'noun', 'a'],
['tanto', 'adverb', 'a'],
['tanto', 'conjunction', 'a'],
['tappa', 'noun', 'b'],
['tappare', 'verb', 'b'],
['tappetino', 'noun', 'c'],
['tappeto', 'noun', 'b'],
['tappezzare', 'verb', 'c'],
['tappo', 'noun', 'c'],
['tarallo', 'noun', 'c'],
['tarantella', 'noun', 'c'],
['tardi', 'adverb', 'a'],
['tardo', 'adjective', 'a'],
['tardo', 'adverb', 'a'],
['targa', 'noun', 'b'],
['tariffa', 'noun', 'b'],
['tarlo', 'noun', 'c'],
['tartaruga', 'noun', 'c'],
['tartufo', 'noun', 'c'],
['tasca', 'noun', 'a'],
['tassa', 'noun', 'a'],
['tassare', 'verb', 'c'],
['tassello', 'noun', 'c'],
['tasso', 'noun', 'b'],
['tastiera', 'noun', 'b'],
['tasto', 'noun', 'b'],
['tatto', 'noun', 'c'],
['tatuaggio', 'noun', 'b'],
['taverna', 'noun', 'c'],
['tavola', 'noun', 'a'],
['tavoletta', 'noun', 'c'],
['tavolino', 'noun', 'b'],
['tavolo', 'noun', 'a'],
['taxi', 'noun', 'b'],
['tazza', 'noun', 'b'],
['tè', 'noun', 'b'],
['te', 'pronoun', 'noun'],
['te', 'team', 'noun'],
['teatrale', 'adjective', 'b'],
['teatro', 'noun', 'a'],
['tecnica', 'noun', 'a'],
['tecnicamente', 'adverb', 'b'],
['tecnico', 'adjective', 'a'],
['tecnico', 'noun', 'a'],
['tecnologia', 'noun', 'a'],
['tecnologico', 'adjective', 'b'],
['tedesco', 'adjective', 'a'],
['tedesco', 'noun', 'a'],
['tegame', 'noun', 'c'],
['teglia', 'noun', 'c'],
['tegola', 'noun', 'c'],
['tela', 'noun', 'b'],
['telaio', 'noun', 'c'],
['telecamera', 'noun', 'b'],
['telecomandato', 'past_part', 'c'],
['telecomandato', 'adjective', 'c'],
['telecronaca', 'noun', 'c'],
['telecronista', 'noun', 'c'],
['telefilm', 'noun', 'b'],
['telefonare', 'verb', 'a'],
['telefonata', 'noun', 'a'],
['telefonico', 'adjective', 'a'],
['telefonino', 'noun', 'b'],
['telefono', 'noun', 'a'],
['telegiornale', 'noun', 'b'],
['telegrafico', 'adjective', 'c'],
['telegrafo', 'noun', 'c'],
['telegramma', 'noun', 'c'],
['telescopio', 'noun', 'b'],
['televisione', 'noun', 'a'],
['televisivo', 'adjective', 'a'],
['televisore', 'noun', 'b'],
['tema', 'noun', 'a'],
['temere', 'verb', 'a'],
['temperatura', 'noun', 'a'],
['tempesta', 'noun', 'b'],
['tempio', 'noun', 'b'],
['tempo', 'noun', 'a'],
['temporale', 'noun', 'b'],
['temporaneo', 'adjective', 'b'],
['tenaglia', 'noun', 'c'],
['tenda', 'noun', 'a'],
['tendenza', 'noun', 'a'],
['tendere', 'verb', 'a'],
['tenebra', 'noun', | |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""A module for creating orographic smoothing coefficients"""
import operator
from typing import Dict, Optional
import iris
import numpy as np
from iris.cube import Cube, CubeList
from numpy import ndarray
from improver import BasePlugin
from improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS
from improver.metadata.utilities import create_new_diagnostic_cube
from improver.utilities.cube_manipulation import enforce_coordinate_ordering
from improver.utilities.spatial import GradientBetweenAdjacentGridSquares
class OrographicSmoothingCoefficients(BasePlugin):
"""
Class to generate smoothing coefficients for recursive filtering based on
orography gradients.
A smoothing coefficient determines how much "value" of a cell
undergoing filtering is comprised of the current value at that cell and
how much comes from the adjacent cell preceding it in the direction in
which filtering is being applied. A larger smoothing_coefficient results in
a more significant proportion of a cell's new value coming from its
neighbouring cell.
The smoothing coefficients are calculated from the orography gradient using
a simple equation with the user defined value for the power:
.. math::
\\rm{smoothing\\_coefficient} = \\rm{gradient}^{\\rm{power}}
The resulting values are scaled between min_gradient_smoothing_coefficient and
max_gradient_smoothing_coefficient to give the desired range of
smoothing_coefficients. These limiting values must be greater than or equal to
zero and less than or equal to 0.5.
Note that the smoothing coefficients are returned on a grid that is one cell
smaller in the given dimension than the input orography, i.e. the smoothing
coefficients in the x-direction are returned on a grid that is one cell
smaller in x than the input. This is because the coefficients are used in
both forward and backward passes of the recursive filter, so they need to be
symmetric between cells in the original grid to help ensure conservation.
"""
def __init__(
self,
min_gradient_smoothing_coefficient: float = 0.5,
max_gradient_smoothing_coefficient: float = 0.0,
power: float = 1,
use_mask_boundary: bool = False,
invert_mask: bool = False,
) -> None:
"""
Initialise class.
Args:
min_gradient_smoothing_coefficient:
The value of recursive filter smoothing_coefficient to be used
where the orography gradient is a minimum. Generally this number
will be larger than the max_gradient_smoothing_coefficient as
quantities are likely to be smoothed more across flat terrain.
max_gradient_smoothing_coefficient:
The value of recursive filter smoothing_coefficient to be used
where the orography gradient is a maximum. Generally this number
will be smaller than the min_gradient_smoothing_coefficient as
quantities are likely to be smoothed less across complex terrain.
power:
The power to be used in the smoothing_coefficient equation
use_mask_boundary:
A mask can be provided to this plugin to define a region in which
smoothing coefficients are set to zero, i.e. no smoothing. If this
option is set to True then rather than the whole masked region
being set to zero, only the cells that mark the transition from
masked to unmasked will be set to zero. The primary purpose for
this is to prevent smoothing across land-sea boundaries.
invert_mask:
By default, if a mask is provided and use_mask_boundary is False,
all the smoothing coefficients corresponding to a mask value of 1
will be zeroed. Setting invert_mask to True reverses this behaviour
such that mask values of 0 set the points to be zeroed in the
smoothing coefficients. If use_mask_boundary is True this option
has no effect.
"""
for limit in [
min_gradient_smoothing_coefficient,
max_gradient_smoothing_coefficient,
]:
if limit < 0 or limit > 0.5:
msg = (
"min_gradient_smoothing_coefficient and max_gradient_smoothing_coefficient "
"must be 0 <= value <=0.5 to help ensure better conservation across the "
"whole field to which the recursive filter is applied. The values provided "
"are {} and {} respectively".format(
min_gradient_smoothing_coefficient,
max_gradient_smoothing_coefficient,
)
)
raise ValueError(msg)
self.max_gradient_smoothing_coefficient = max_gradient_smoothing_coefficient
self.min_gradient_smoothing_coefficient = min_gradient_smoothing_coefficient
self.power = power
self.use_mask_boundary = use_mask_boundary
self.mask_comparison = operator.ge
if invert_mask:
self.mask_comparison = operator.le
def scale_smoothing_coefficients(self, cubes: CubeList) -> CubeList:
"""
This scales a set of smoothing_coefficients from input cubes to range
between the min_gradient_smoothing_coefficient and the
max_gradient_smoothing_coefficient.
Args:
cubes:
A list of smoothing_coefficient cubes that we need to take the
minimum and maximum values from.
Returns:
A list of smoothing_coefficient cubes scaled to within the
range specified.
"""
cube_min = min([abs(cube.data).min() for cube in cubes])
cube_max = max([abs(cube.data).max() for cube in cubes])
scaled_cubes = iris.cube.CubeList()
for cube in cubes:
scaled_data = (abs(cube.data) - cube_min) / (cube_max - cube_min)
scaled_data = (
scaled_data
* (
self.max_gradient_smoothing_coefficient
- self.min_gradient_smoothing_coefficient
)
+ self.min_gradient_smoothing_coefficient
)
scaled_cube = cube.copy(data=scaled_data)
scaled_cube.units = "1"
scaled_cubes.append(scaled_cube)
return scaled_cubes
def unnormalised_smoothing_coefficients(self, gradient_cube: Cube) -> ndarray:
"""
This generates initial smoothing_coefficient values from gradients
using a simple power law, for which the power is set at initialisation.
Using a power of 1 gives an output smoothing_coefficients_cube with
values equal to the input gradient_cube.
Args:
gradient_cube:
A cube of the normalised gradient
Returns:
An array containing the unscaled smoothing_coefficients.
"""
return np.abs(gradient_cube.data) ** self.power
def create_coefficient_cube(
self, data: ndarray, template: Cube, cube_name: str, attributes: Dict
) -> Cube:
"""
Update metadata in smoothing_coefficients cube. Remove any time
coordinates and rename.
Args:
data:
The smoothing coefficient data to store in the cube.
template:
A gradient cube, the dimensions of which are used as a template
for the coefficient cube.
cube_name:
A name for the resultant cube
attributes:
A dictionary of attributes for the new cube.
Returns:
A new cube of smoothing_coefficients
"""
for coord in template.coords(dim_coords=False):
for coord_name in ["time", "period", "realization"]:
if coord_name in coord.name():
template.remove_coord(coord)
attributes["title"] = "Recursive filter smoothing coefficients"
attributes.pop("history", None)
attributes["power"] = self.power
return create_new_diagnostic_cube(
cube_name,
"1",
template,
MANDATORY_ATTRIBUTE_DEFAULTS.copy(),
optional_attributes=attributes,
data=data,
)
def zero_masked(
self, smoothing_coefficient_x: Cube, smoothing_coefficient_y: Cube, mask: Cube
) -> None:
"""
Zero smoothing coefficients in regions or at boundaries defined by the
provided mask. The changes are made in place to the input cubes. The
behaviour is as follows:
use_mask_boundary = True:
The edges of the mask region are used to define where smoothing
coefficients should be zeroed. The zeroed smoothing coefficients
are between the masked and unmasked cells of the grid on which the
mask is defined.
invert_mask = False:
All smoothing coefficients within regions for which the mask has
value 1 are set to 0. The boundary cells between masked and
unmasked are also set to 0. Has no effect if use_mask_boundary=True.
invert_mask = True:
All smoothing coefficients within regions for which the mask has
value 0 are set to 0. The boundary cells between masked and
unmasked are also set to 0. Has no effect if use_mask_boundary=True.
Args:
smoothing_coefficient_x:
Smoothing coefficients calculated along the x-dimension.
smoothing_coefficient_y:
Smoothing coefficients calculated along the y-dimension.
mask:
The mask defining areas in which smoothing coefficients should
be zeroed.
"""
if self.use_mask_boundary:
zero_points_x = np.diff(mask.data, axis=1) != 0
zero_points_y = np.diff(mask.data, axis=0) != 0
| |
<filename>support/parse_data.py<gh_stars>0
import support.classes as classes
import os
import pickle
import numpy as np
import csv
import math
DATA_PATH = os.path.join(os.path.expanduser('~'), "learnml/data")
def prep(s, skipread=False):
'''
Takes a string with data set name, and runs the proper setup.
'''
print("Preparing data (s =", s, ")...")
if s == "toyReg":
toread = os.path.join("data", s, "info.dat")
if skipread:
with open(toread, mode="br") as f:
dinfo = pickle.load(f)
return dinfo
else:
return toyReg()
if s == "toyClass":
toread = os.path.join("data", s, "info.dat")
if skipread:
with open(toread, mode="br") as f:
dinfo = pickle.load(f)
return dinfo
else:
return toyClass()
if s == "MNIST":
toread = os.path.join("data", s, "info.dat")
if skipread:
with open(toread, mode="br") as f:
dinfo = pickle.load(f)
return dinfo
else:
return MNIST()
if s == "quantum":
toread = os.path.join("data", s, "info.dat")
if skipread:
with open(toread, mode="br") as f:
dinfo = pickle.load(f)
return dinfo
else:
return quantum()
if s == "NoisyOpt_isoBig":
toread = os.path.join("data", s, "info.dat")
if skipread:
with open(toread, mode="br") as f:
dinfo = pickle.load(f)
return dinfo
else:
return NoisyOpt_isoBig()
if s == "NoisyOpt_isoSmall":
toread = os.path.join("data", s, "info.dat")
if skipread:
with open(toread, mode="br") as f:
dinfo = pickle.load(f)
return dinfo
else:
return NoisyOpt_isoSmall()
def load(dinfo):
'''
Given the info (path to binary, shape) about a particular data set,
load relevant training and testing data sets.
'''
print("Reading data...")
return classes.Data(dinfo)
def quantum():
'''
Data preparation function, specific to the "quantum physics" dataset.
URL: http://osmot.cs.cornell.edu/kddcup/datasets.html
'''
dataset = "quantum"
dinfo = classes.DataInfo()
dinfo.mname = "LgstReg" # hard-coded model name.
print("Preparation (", dataset, ")...")
toread = os.path.join(DATA_PATH,
dataset,
"phy_train.dat")
# NOTE: only "train" has labels, so we split this dataset into
# train/test subsets for a supervised learning routine.
n = 50000
d = 78
X_tr = np.zeros( (n//2,d), dtype=np.float64 )
y_tr = np.zeros( (n//2,1), dtype=np.uint8 )
X_te = np.zeros( (n//2,d), dtype=np.float64 )
y_te = np.zeros( (n//2,1), dtype=np.uint8 )
with open(toread, newline="") as f_table:
f_reader = csv.reader(f_table, delimiter="\t")
idx = 0
switcher = True
for line in f_reader:
# Arbitrarily let first half be training, second half testing.
if switcher:
y_tr[idx,0] = np.uint8(line[1])
X_tr[idx,:] = np.array(line[2:-1], dtype=np.float64)
idx += 1
else:
y_te[idx,0] = np.uint8(line[1])
X_te[idx,:] = np.array(line[2:-1], dtype=np.float64)
idx += 1
# Once we've covered half the data, start on test data.
if idx == n//2:
switcher = False
idx = 0
print("Writing inputs...")
towrite = os.path.join("data", dataset, ("X_tr" + ".dat"))
with open(towrite, mode="bw") as g_bin:
X_tr.tofile(g_bin)
dinfo.X_tr["shape"] = (n//2,d)
dinfo.X_tr["path"] = towrite
dinfo.X_tr["dtype"] = np.float64
towrite = os.path.join("data", dataset, ("X_te" + ".dat"))
with open(towrite, mode="bw") as g_bin:
X_te.tofile(g_bin)
dinfo.X_te["shape"] = (n//2,d)
dinfo.X_te["path"] = towrite
dinfo.X_te["dtype"] = np.float64
print("Writing outputs...")
towrite = os.path.join("data", dataset, ("y_tr" + ".dat"))
with open(towrite, mode="bw") as g_bin:
y_tr.tofile(g_bin)
dinfo.y_tr["shape"] = (n//2,1)
dinfo.y_tr["path"] = towrite
dinfo.y_tr["dtype"] = np.uint8
towrite = os.path.join("data", dataset, ("y_te" + ".dat"))
with open(towrite, mode="bw") as g_bin:
y_te.tofile(g_bin)
dinfo.y_te["shape"] = (n//2,1)
dinfo.y_te["path"] = towrite
dinfo.y_te["dtype"] = np.uint8
# Save the dinfo dictionary for future use (so we don't have to read
# the original data every time).
towrite = os.path.join("data", dataset, "info.dat")
with open(towrite, mode="wb") as f:
pickle.dump(dinfo, f)
# Finally, return the dinfo dict.
return dinfo
def toyReg():
'''
Data preparation function, for a small toy set of data,
to be solved using a linear regression model.
'''
dataset = "toyReg"
dinfo = classes.DataInfo()
dinfo.mname = "LinReg" # hard-coded model name.
print("Preparation (", dataset, ")...")
n = 15 # training set size
m = 10 # testing set size
d = 3 # number of inputs
# Hand-prepared data, used below.
w_true = np.array([3.1415, 1.414214, 2.718282]).reshape((d,1))
X_tr = np.random.normal(loc=0.0, scale=0.5, size=n*d).reshape((n,d))
noise_tr = np.random.normal(loc=0.0, scale=1.0, size=n).reshape((n,1))
X_te = np.random.normal(loc=0.0, scale=0.5, size=m*d).reshape((m,d))
noise_te = np.random.normal(loc=0.0, scale=1.0, size=m).reshape((m,1))
# Inputs (training)
towrite = os.path.join("data", dataset, ("X_tr" + ".dat"))
data_arr = X_tr
dinfo.X_tr["shape"] = data_arr.shape
dinfo.X_tr["path"] = towrite
dinfo.X_tr["dtype"] = data_arr.dtype
with open(towrite, mode="bw") as g_bin:
data_arr.tofile(g_bin)
# Inputs (testing)
towrite = os.path.join("data", dataset, ("X_te" + ".dat"))
data_arr = X_te
dinfo.X_te["shape"] = data_arr.shape
dinfo.X_te["path"] = towrite
dinfo.X_te["dtype"] = data_arr.dtype
with open(towrite, mode="bw") as g_bin:
data_arr.tofile(g_bin)
# Outputs (training)
towrite = os.path.join("data", dataset, ("y_tr" + ".dat"))
data_arr = (np.dot(X_tr,w_true)+noise_tr).reshape((X_tr.shape[0],1))
dinfo.y_tr["shape"] = data_arr.shape
dinfo.y_tr["path"] = towrite
dinfo.y_tr["dtype"] = data_arr.dtype
with open(towrite, mode="bw") as g_bin:
data_arr.tofile(g_bin)
# Outputs (testing)
towrite = os.path.join("data", dataset, ("y_te" + ".dat"))
data_arr = (np.dot(X_te,w_true)+noise_te).reshape((X_te.shape[0],1))
dinfo.y_te["shape"] = data_arr.shape
dinfo.y_te["path"] = towrite
dinfo.y_te["dtype"] = data_arr.dtype
with open(towrite, mode="bw") as g_bin:
data_arr.tofile(g_bin)
# Save the dinfo dictionary for future use (so we don't have to read
# the original data every time).
towrite = os.path.join("data", dataset, "info.dat")
with open(towrite, mode="wb") as f:
pickle.dump(dinfo, f)
# Finally, return the dinfo dict.
return dinfo
def toyClass():
'''
Data preparation function, for a small toy set of data,
designed for classification using a multi-class logistic
regression model.
'''
dataset = "toyClass"
dinfo = classes.DataInfo()
dinfo.mname = "LgstReg" # hard-coded model name.
print("Preparation (", dataset, ")...")
n = 25 # training set size
m = 20 # testing set size
d = 3 # number of inputs
nc = 4 # number of classes.
# Hand-prepared weights (assuming 4 classes).
w_0 = np.array( [3.1415, 1.4142, 2.7182] ).reshape((d,1))
w_1 = np.array( [3.1415, -1.4142, 2.7182] ).reshape((d,1))
w_2 = np.array( [-3.1415, 1.4142, -2.7182] ).reshape((d,1))
# Put the weights into a (d x nc-1) matrix (note the transpose).
W_true = np.transpose(np.concatenate( (w_0,w_1,w_2) ).reshape((nc-1,d)))
# Randomly generated inputs.
X_tr = np.random.normal(loc=0, scale=1.0, size=n*d).reshape((n,d))
X_te = np.random.normal(loc=0, scale=1.0, size=m*d).reshape((m,d))
# Probabilities based on true underlying model (training).
A = np.zeros(n*nc).reshape((nc,n))
A[:-1,:] = np.dot(W_true, np.transpose(X_tr)) # leave last row as zeros.
P = np.exp(A) / np.sum(np.exp(A), axis=0) # (nc x n)
# Labels (training).
y_tr = np.zeros(n, dtype=np.uint8).reshape((n,1))
for i in range(n):
probs = P[:,i]
y_tr[i,0] = np.random.choice(nc, size=1, replace=True, p=probs)
# Probabilities based on true underlying model (testing).
A = np.zeros(m*nc).reshape((nc,m))
A[:-1,:] = np.dot(W_true, np.transpose(X_te)) # leave last row as zeros.
P = np.exp(A) / np.sum(np.exp(A), axis=0) # (nc x m)
# Labels (testing).
y_te = np.zeros(m, dtype=np.uint8).reshape((m,1))
for i in range(m):
probs = P[:,i]
y_te[i,0] = np.random.choice(nc, size=1, replace=True, p=probs)
# Write inputs (training).
towrite = os.path.join("data", dataset, ("X_tr" + ".dat"))
data_arr = X_tr
dinfo.X_tr["shape"] = data_arr.shape
dinfo.X_tr["path"] = towrite
dinfo.X_tr["dtype"] = data_arr.dtype
with open(towrite, mode="bw") as g_bin:
data_arr.tofile(g_bin)
# Write inputs (testing).
towrite = os.path.join("data", dataset, ("X_te" + ".dat"))
data_arr = X_te
dinfo.X_te["shape"] = data_arr.shape
dinfo.X_te["path"] = towrite
dinfo.X_te["dtype"] = data_arr.dtype
with open(towrite, mode="bw") as g_bin:
data_arr.tofile(g_bin)
# Write outputs (training).
towrite = os.path.join("data", dataset, ("y_tr" + ".dat"))
data_arr = y_tr
dinfo.y_tr["shape"] = data_arr.shape
dinfo.y_tr["path"] = towrite
dinfo.y_tr["dtype"] = data_arr.dtype
with open(towrite, mode="bw") as g_bin:
data_arr.tofile(g_bin)
# Write outputs (testing).
towrite = os.path.join("data", dataset, ("y_te" + ".dat"))
data_arr = y_te
dinfo.y_te["shape"] = data_arr.shape
dinfo.y_te["path"] = towrite
dinfo.y_te["dtype"] = data_arr.dtype
with open(towrite, mode="bw") as g_bin:
data_arr.tofile(g_bin)
# Save the dinfo dictionary for future use (so we don't have to read
# the original data every time).
towrite = os.path.join("data", dataset, "info.dat")
with open(towrite, mode="wb") as f:
pickle.dump(dinfo, f)
# Finally, return the dinfo dict.
return dinfo
def MNIST():
'''
Data preparation function, specific to the MNIST handwritten
digits data set.
URL: http://yann.lecun.com/exdb/mnist/
'''
dataset = "MNIST"
dinfo = classes.DataInfo()
dinfo.mname = "LgstReg" # hard-coded model name.
print("Preparation (", dataset, ")...")
print("Inputs (training)...")
toread = os.path.join(DATA_PATH,
dataset,
"train-images-idx3-ubyte")
towrite = os.path.join("data", dataset, ("X_tr" + ".dat"))
with open(toread, mode="rb") as f_bin:
f_bin.seek(4)
b = f_bin.read(4)
n = int.from_bytes(b, byteorder="big")
b = f_bin.read(4)
d_rows = int.from_bytes(b, byteorder="big")
b = f_bin.read(4)
d_cols = int.from_bytes(b, byteorder="big")
d = d_rows * d_cols
with open(towrite, mode="bw") as g_bin:
bytes_left = n * d
idx = 0
data_arr = np.empty( (n*d), dtype=np.uint8 )
while bytes_left > 0:
b = f_bin.read(1)
data_arr[idx] = np.uint8(int.from_bytes(b, byteorder="big"))
bytes_left -= 1
idx += 1
data_arr.tofile(g_bin)
dinfo.X_tr["shape"] = (n,d)
dinfo.X_tr["path"] = towrite
dinfo.X_tr["dtype"] = np.uint8
# --------------------------- #
print("Inputs (testing)...")
toread = os.path.join(DATA_PATH,
dataset,
"t10k-images-idx3-ubyte")
towrite = os.path.join("data", dataset, ("X_te" + ".dat"))
with open(toread, mode="rb") | |
<filename>tests/transports/test_urllib3.py
# BSD 3-Clause License
#
# Copyright (c) 2019, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import certifi
import mock
import pytest
import urllib3.poolmanager
from urllib3.exceptions import MaxRetryError, TimeoutError
from elasticapm.conf import constants
from elasticapm.transport.exceptions import TransportException
from elasticapm.transport.http import Transport, version_string_to_tuple
from elasticapm.utils import compat
from tests.utils import assert_any_record_contains
try:
import urlparse
except ImportError:
from urllib import parse as urlparse
@pytest.mark.flaky(reruns=3) # test is flaky on Windows
def test_send(waiting_httpserver, elasticapm_client):
waiting_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"})
transport = Transport(waiting_httpserver.url, client=elasticapm_client)
transport.start_thread()
try:
url = transport.send(compat.b("x"))
assert url == "http://example.com/foo"
finally:
transport.close()
@mock.patch("urllib3.poolmanager.PoolManager.urlopen")
def test_timeout(mock_urlopen, elasticapm_client):
elasticapm_client.server_version = (8, 0) # avoid making server_info request
transport = Transport("http://localhost", timeout=5, client=elasticapm_client)
transport.start_thread()
mock_urlopen.side_effect = MaxRetryError(None, None, reason=TimeoutError())
try:
with pytest.raises(TransportException) as exc_info:
transport.send("x")
assert "timeout" in str(exc_info.value)
finally:
transport.close()
@pytest.mark.flaky(reruns=3) # test is flaky on Windows
def test_http_error(waiting_httpserver, elasticapm_client):
elasticapm_client.server_version = (8, 0) # avoid making server_info request
waiting_httpserver.serve_content(code=418, content="I'm a teapot")
transport = Transport(waiting_httpserver.url, client=elasticapm_client)
transport.start_thread()
try:
with pytest.raises(TransportException) as exc_info:
transport.send("x")
for val in (418, "I'm a teapot"):
assert str(val) in str(exc_info.value)
finally:
transport.close()
@mock.patch("urllib3.poolmanager.PoolManager.urlopen")
def test_generic_error(mock_urlopen, elasticapm_client):
url, status, message, body = ("http://localhost:9999", 418, "I'm a teapot", "Nothing")
elasticapm_client.server_version = (8, 0) # avoid making server_info request
transport = Transport(url, client=elasticapm_client)
transport.start_thread()
mock_urlopen.side_effect = Exception("Oopsie")
try:
with pytest.raises(TransportException) as exc_info:
transport.send("x")
assert "Oopsie" in str(exc_info.value)
finally:
transport.close()
def test_http_proxy_environment_variable(elasticapm_client):
with mock.patch.dict("os.environ", {"HTTP_PROXY": "http://example.com"}):
transport = Transport("http://localhost:9999", client=elasticapm_client)
assert isinstance(transport.http, urllib3.ProxyManager)
def test_https_proxy_environment_variable(elasticapm_client):
with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com"}):
transport = Transport("http://localhost:9999", client=elasticapm_client)
assert isinstance(transport.http, urllib3.poolmanager.ProxyManager)
def test_https_proxy_environment_variable_is_preferred(elasticapm_client):
with mock.patch.dict("os.environ", {"https_proxy": "https://example.com", "HTTP_PROXY": "http://example.com"}):
transport = Transport("http://localhost:9999", client=elasticapm_client)
assert isinstance(transport.http, urllib3.poolmanager.ProxyManager)
assert transport.http.proxy.scheme == "https"
def test_no_proxy_star(elasticapm_client):
with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com", "NO_PROXY": "*"}):
transport = Transport("http://localhost:9999", client=elasticapm_client)
assert not isinstance(transport.http, urllib3.poolmanager.ProxyManager)
def test_no_proxy_host(elasticapm_client):
with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com", "NO_PROXY": "localhost"}):
transport = Transport("http://localhost:9999", client=elasticapm_client)
assert not isinstance(transport.http, urllib3.poolmanager.ProxyManager)
def test_no_proxy_all(elasticapm_client):
with mock.patch.dict("os.environ", {"HTTPS_PROXY": "https://example.com", "NO_PROXY": "*"}):
transport = Transport("http://localhost:9999", client=elasticapm_client)
assert not isinstance(transport.http, urllib3.poolmanager.ProxyManager)
def test_header_encodings(elasticapm_client):
"""
Tests that headers are encoded as bytestrings. If they aren't,
urllib assumes it needs to encode the data as well, which is already a zlib
encoded bytestring, and explodes.
"""
headers = {compat.text_type("X"): compat.text_type("V")}
elasticapm_client.server_version = (8, 0) # avoid making server_info request
transport = Transport("http://localhost:9999", headers=headers, client=elasticapm_client)
transport.start_thread()
try:
with mock.patch("elasticapm.transport.http.urllib3.PoolManager.urlopen") as mock_urlopen:
mock_urlopen.return_value = mock.Mock(status=202)
transport.send("")
_, args, kwargs = mock_urlopen.mock_calls[0]
if compat.PY2:
assert isinstance(args[1], compat.binary_type)
for k, v in kwargs["headers"].items():
assert isinstance(k, compat.binary_type)
assert isinstance(v, compat.binary_type)
finally:
transport.close()
@pytest.mark.flaky(reruns=3) # test is flaky on Windows
def test_ssl_verify_fails(waiting_httpsserver, elasticapm_client):
waiting_httpsserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"})
transport = Transport(waiting_httpsserver.url, client=elasticapm_client)
transport.start_thread()
try:
with pytest.raises(TransportException) as exc_info:
url = transport.send(compat.b("x"))
assert "certificate verify failed" in str(exc_info)
finally:
transport.close()
@pytest.mark.flaky(reruns=3) # test is flaky on Windows
@pytest.mark.filterwarnings("ignore:Unverified HTTPS")
def test_ssl_verify_disable(waiting_httpsserver, elasticapm_client):
waiting_httpsserver.serve_content(code=202, content="", headers={"Location": "https://example.com/foo"})
transport = Transport(waiting_httpsserver.url, verify_server_cert=False, client=elasticapm_client)
transport.start_thread()
try:
url = transport.send(compat.b("x"))
assert url == "https://example.com/foo"
finally:
transport.close()
@pytest.mark.flaky(reruns=3) # test is flaky on Windows
def test_ssl_verify_disable_http(waiting_httpserver, elasticapm_client):
"""
Make sure that ``assert_hostname`` isn't passed in for http requests, even
with verify_server_cert=False
"""
waiting_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"})
transport = Transport(waiting_httpserver.url, verify_server_cert=False, client=elasticapm_client)
transport.start_thread()
try:
url = transport.send(compat.b("x"))
assert url == "http://example.com/foo"
finally:
transport.close()
@pytest.mark.flaky(reruns=3) # test is flaky on Windows
def test_ssl_cert_pinning_http(waiting_httpserver, elasticapm_client):
"""
Won't fail, as with the other cert pinning test, since certs aren't relevant
for http, only https.
"""
waiting_httpserver.serve_content(code=202, content="", headers={"Location": "http://example.com/foo"})
transport = Transport(
waiting_httpserver.url,
server_cert=os.path.join(os.path.dirname(__file__), "wrong_cert.pem"),
verify_server_cert=True,
client=elasticapm_client,
)
transport.start_thread()
try:
url = transport.send(compat.b("x"))
assert url == "http://example.com/foo"
finally:
transport.close()
@pytest.mark.flaky(reruns=3) # test is flaky on Windows
def test_ssl_cert_pinning(waiting_httpsserver, elasticapm_client):
waiting_httpsserver.serve_content(code=202, content="", headers={"Location": "https://example.com/foo"})
cur_dir = os.path.dirname(os.path.realpath(__file__))
transport = Transport(
waiting_httpsserver.url,
server_cert=os.path.join(cur_dir, "..", "ca/server.pem"),
verify_server_cert=True,
client=elasticapm_client,
)
transport.start_thread()
try:
url = transport.send(compat.b("x"))
assert url == "https://example.com/foo"
finally:
transport.close()
@pytest.mark.flaky(reruns=3) # test is flaky on Windows
def test_ssl_cert_pinning_fails(waiting_httpsserver, elasticapm_client):
if compat.PY3:
waiting_httpsserver.serve_content(code=202, content="", headers={"Location": "https://example.com/foo"})
url = waiting_httpsserver.url
else:
# if we use the local test server here, execution blocks somewhere deep in OpenSSL on Python 2.7, presumably
# due to a threading issue that has been fixed in later versions. To avoid that, we have to commit a minor
# cardinal sin here and do an outside request to https://example.com (which will also fail the fingerprint
# assertion).
#
# May the Testing Goat have mercy on our souls.
url = "https://example.com"
transport = Transport(
url,
server_cert=os.path.join(os.path.dirname(__file__), "wrong_cert.pem"),
verify_server_cert=True,
client=elasticapm_client,
)
transport.start_thread()
try:
with pytest.raises(TransportException) as exc_info:
transport.send(compat.b("x"))
finally:
transport.close()
assert "Fingerprints did not match" in exc_info.value.args[0]
def test_config_url(elasticapm_client):
transport = Transport("http://example.com/" + constants.EVENTS_API_PATH, client=elasticapm_client)
assert transport._config_url == "http://example.com/" + constants.AGENT_CONFIG_PATH
def test_get_config(waiting_httpserver, elasticapm_client):
waiting_httpserver.serve_content(
code=200, content=b'{"x": "y"}', headers={"Cache-Control": "max-age=5", "Etag": "2"}
)
url = waiting_httpserver.url
transport = Transport(
url + "/" + constants.EVENTS_API_PATH,
client=elasticapm_client,
headers={"Content-Type": "application/x-ndjson", "Content-Encoding": "gzip"},
)
version, data, max_age = transport.get_config("1", {})
assert version == "2"
assert data == {"x": "y"}
assert max_age == 5
assert "Content-Encoding" not in waiting_httpserver.requests[0].headers
assert waiting_httpserver.requests[0].headers["Content-Type"] == "application/json"
@mock.patch("urllib3.poolmanager.PoolManager.urlopen")
def test_get_config_handle_exception(mock_urlopen, caplog, elasticapm_client):
transport = Transport("http://example.com/" + constants.EVENTS_API_PATH, client=elasticapm_client)
mock_urlopen.side_effect = urllib3.exceptions.RequestError(transport.http, "http://example.com/", "boom")
with caplog.at_level("DEBUG", "elasticapm.transport.http"):
version, data, max_age = transport.get_config("1", {})
assert version == "1"
assert max_age == 300
record = caplog.records[-1]
assert "HTTP error" in record.msg
def test_get_config_cache_headers_304(waiting_httpserver, caplog, elasticapm_client):
waiting_httpserver.serve_content(code=304, content=b"", headers={"Cache-Control": "max-age=5"})
url = waiting_httpserver.url
transport = Transport(url + "/" + constants.EVENTS_API_PATH, client=elasticapm_client)
with caplog.at_level("DEBUG", "elasticapm.transport.http"):
version, data, max_age = transport.get_config("1", {})
assert waiting_httpserver.requests[0].headers["If-None-Match"] == "1"
assert version == "1"
assert data is None
assert max_age == 5
record = caplog.records[-1]
assert "Configuration unchanged" in record.msg
def test_get_config_bad_cache_control_header(waiting_httpserver, caplog, elasticapm_client):
waiting_httpserver.serve_content(
code=200, content=b'{"x": "y"}', headers={"Cache-Control": "max-age=fifty", "Etag": "2"}
)
url = waiting_httpserver.url
transport = Transport(url + "/" + constants.EVENTS_API_PATH, client=elasticapm_client)
with caplog.at_level("DEBUG", "elasticapm.transport.http"):
version, data, max_age = transport.get_config("1", {})
assert version == "2"
assert data == {"x": "y"}
assert max_age == 300
record = caplog.records[-1]
assert record.message == "Could not parse Cache-Control header: max-age=fifty"
def test_get_config_empty_response(waiting_httpserver, caplog, elasticapm_client):
waiting_httpserver.serve_content(code=200, content=b"", headers={"Cache-Control": "max-age=5"})
url = waiting_httpserver.url
transport = Transport(url + "/" + constants.EVENTS_API_PATH, client=elasticapm_client)
with caplog.at_level("DEBUG", "elasticapm.transport.http"):
version, data, max_age = transport.get_config("1", {})
assert version == "1"
assert data is None
assert max_age == 5
record = caplog.records[-1]
assert record.message == "APM Server answered with empty body and status code 200"
def test_use_certifi(elasticapm_client):
transport = Transport("/" + constants.EVENTS_API_PATH, client=elasticapm_client)
assert transport.ca_certs == certifi.where()
elasticapm_client.config.update("2", use_certifi=False)
assert not transport.ca_certs
@pytest.mark.parametrize(
"version,expected",
[
(
"1.2.3",
(1, 2, 3),
),
(
"1.2.3-alpha1",
(1, 2, 3, "alpha1"),
),
(
"1.2.3alpha1",
(1, 2, "3alpha1"),
),
(
"",
(),
),
],
)
def test_server_version_to_tuple(version, expected):
assert version_string_to_tuple(version) == expected
def test_fetch_server_info(waiting_httpserver, elasticapm_client):
waiting_httpserver.serve_content(
code=200,
content=b'{"version": "8.0.0-alpha1"}',
)
url = waiting_httpserver.url
transport = Transport(url + "/" + constants.EVENTS_API_PATH, client=elasticapm_client)
transport.fetch_server_info()
assert elasticapm_client.server_version == (8, 0, 0, "alpha1")
def test_fetch_server_info_no_json(waiting_httpserver, caplog, elasticapm_client):
waiting_httpserver.serve_content(
code=200,
content=b'"version": "8.0.0-alpha1"',
)
url = waiting_httpserver.url
transport = Transport(url + "/" + constants.EVENTS_API_PATH, client=elasticapm_client)
with caplog.at_level("WARNING"):
transport.fetch_server_info()
assert elasticapm_client.server_version is None
assert_any_record_contains(caplog.records, "JSON decoding error while fetching server information")
def test_fetch_server_info_no_version(waiting_httpserver, caplog, elasticapm_client):
waiting_httpserver.serve_content(
code=200,
content=b"{}",
)
url = waiting_httpserver.url
transport = | |
<filename>frameworks/pycellchem-2.0/src/RD/ReactionDiffusion.py
#---------------------------------------------------------------------------
#
# ReactionDiffusion.py: implementation of reaction-diffusion chemical systems
#
# originally based on the breve Hypercycle.[tz/py] demo by <NAME>
# <<EMAIL>>, www.spiderland.org
#
# by <NAME>, Univ. Basel, Switzerland, January 2010
# 20150910: removed breve dependencies to run within PyCellChemistry
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright (C) 2015 <NAME>
# Contact: http://www.artificial-chemistries.org/
#
# This file is part of PyCellChemistry.
#
# PyCellChemistry is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 3, as published by the Free Software Foundation.
#
# PyCellChemistry is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyCellChemistry, see file COPYING. If not, see
# http://www.gnu.org/licenses/
#
import sys
sys.path.insert(0, '..')
from artchem.ReactionVessel import *
import WritePNG as png
class ReactionDiffusionSystem( ODESystem ):
""" a PDE integrator for a reaction-diffusion system """
def __init__( self, x, y, dx=1.0, periodic=True, nneighs=4 ):
ODESystem.__init__( self )
self.sizex = x # grid size (x, y)
self.sizey = y
self.dx = dx # square grid cells of size (dx, dx)
self.periodic = periodic # boundary conditions
self.nneighs = self.set_neighbors(nneighs) # neighborhood config
self.conc = None # concentrations of all species on grid
self.dcdt = None # spatial dcdt for PDE integration
self.prod = None
self.rate = None
self.diff = None
self.diffcoef = None # diffusion coefficients
self.color = {} # color of a molecule
def close( self ):
""" closes the PDE system such that it can be integrated """
if self.me != None: return # already closed
ODESystem.close(self)
ns = self.nspecies()
x = self.sizex
y = self.sizey
self.conc = np.zeros((ns, x, y))
self.dcdt = np.zeros((x, y))
self.prod = np.zeros((x, y))
self.rate = np.zeros((x, y))
self.diff = np.zeros((x, y))
self.diffcoef = np.zeros(ns)
def set_diffcoef( self, mol, value ):
""" set the diffusion coefficient of molecule mol """
if (mol == '' or self.species.count(mol) < 1):
return
i = self.species.index(mol)
self.diffcoef[i] = value
def get_diffcoef( self, mol ):
""" returns the diffusion coefficient of molecule mol """
if (mol == '' or self.species.count(mol) < 1):
return 0.0
i = self.species.index(mol)
return self.diffcoef[i]
def set_color( self, mol, color):
""" set color of molecule to color=(red, green, blue) """
self.color[mol] = color
def get_color( self, mol ):
""" returns the color that has been assigned to molecule mol """
if mol == '' or mol not in self.species or mol not in self.color:
return (0, 0, 0)
return self.color[mol]
def set_periodic( self, flag ):
""" periodic (toroidal) vs. non-periodic boundary conditions """
self.periodic = flag
def get_periodic( self ):
""" True if the current setup is periodic boundary """
return self.periodic
def set_neighbors( self, nn ):
""" set neighborhood configuration: nn =
2: simple (north and right neighbors)
4: <NAME>
6: hexagonal lattice
8: Moore
"""
if nn not in [ 2, 4, 6, 8 ]:
print >> sys.stderr, "ns =", self.ns
exit -1
self.nneighs = nn
def get_neighbors( self ):
""" returns the current neighborhood configuration """
return self.nneighs
def get_pos( self, x, y ):
""" make sure coordinates always fall within boundaries """
if (self.periodic):
x = (x + self.sizex) % self.sizex
y = (y + self.sizey) % self.sizey
else:
x = min(max(x, 0), self.sizex)
y = min(max(y, 0), self.sizey)
return (x, y)
def get_conc_by_index( self, idx, x, y ):
""" get concentration of molecule by index, respecting boundaries """
if (self.periodic):
(x, y) = self.get_pos(x, y)
return self.conc[idx, x, y]
elif (x >= 0 and x < self.sizex and y >= 0 and y < self.sizey):
return self.conc[idx, x, y]
else:
return 0.0
def get_conc( self, mol, x, y ):
""" get concentration of molecule by name, respecting boundaries """
if (mol == '' or self.species.count(mol) < 1):
return 0.0
i = self.species.index(mol)
return self.get_conc_by_index(i, x, y)
def set_conc( self, mol, conc, x, y ):
""" set the concentration of a molecule at position x,y to a
given value, respecting boundary conditions
"""
if (mol == '' or self.species.count(mol) < 1):
return
i = self.species.index(mol)
if (conc < 0):
conc = 0.0
if (self.periodic):
(x, y) = self.get_pos(x, y)
self.conc[i, x, y] = conc
elif (x >= 0 and x < self.sizex and y >= 0 and y < self.sizey):
self.conc[i, x, y] = conc
def deposit( self, mol, conc, x, y ):
""" deposit a given amount of a molecule at a precise location """
c = self.get_conc(mol, x, y)
self.set_conc(mol, c + conc, x, y)
def rnd_deposit( self, npoints, mol, conc, ampl=0.0):
""" deposit a random amount of a molecule at random locations """
if (mol == '' or self.species.count(mol) < 1):
return
c = conc
for i in range(npoints):
x = np.random.randint(self.sizex)
y = np.random.randint(self.sizey)
if (ampl > 0.0):
c = conc + ampl * np.random.random() - ampl / 2
self.deposit(mol, c, x, y)
def reset( self, mol, x, y ):
""" reset the concentration of a given molecule to zero """
self.set_conc(mol, 0.0, x, y)
def resetAll( self, mol='', conc=0.0 ):
""" reset the concentration of a given molecule to a given
value, overall on the grid; if no molecule is specified,
reset the concentrations of all molecules
"""
if (conc < 0):
conc = 0.0
if (mol == ''):
self.conc.fill(conc)
return
if (self.species.count(mol) < 1):
return
i = self.species.index(mol)
self.conc[i].fill(conc)
def set_patch_at( self, mol, conc, x, y ):
""" create a patch of chemical at a given location """
self.set_conc( mol, conc, x, y )
self.set_conc( mol, conc, x, ( y + 1 ) )
self.set_conc( mol, conc, x, ( y - 1 ) )
self.set_conc( mol, conc, ( x + 1 ), y )
self.set_conc( mol, conc, ( x - 1 ), y )
self.set_conc( mol, conc, (x - 1), ( y - 1 ) )
self.set_conc( mol, conc, (x - 1), ( y + 1 ) )
self.set_conc( mol, conc, ( x + 1 ), (y - 1) )
self.set_conc( mol, conc, ( x + 1 ), (y + 1) )
def set_patch( self, mol, conc ):
""" create a patch of chemical at a random location """
if (self.sizex < 3 or self.sizey < 3):
return
x = 1 + np.random.randint( self.sizex - 2 )
y = 1 + np.random.randint( self.sizey - 2 )
self.set_patch_at( mol, conc, x, y )
def set_patches( self, npatches, mol, initconc ):
""" create some initial random patches of chemicals """
m = mol
for i in range(npatches):
if (mol == ''):
c = np.random.randint( self.ns )
m = self.species[c]
self.set_patch(m, initconc)
def add_patch_at( self, mol, conc, x, y ):
""" add some concentration to a patch of chemical at a given
location
"""
self.deposit( mol, conc, x, y )
self.deposit( mol, conc, x, ( y + 1 ) )
self.deposit( mol, conc, x, ( y - 1 ) )
self.deposit( mol, conc, ( x + 1 ), y )
self.deposit( mol, conc, ( x - 1 ), y )
self.deposit( mol, conc, (x - 1), ( y - 1 ) )
self.deposit( mol, conc, (x - 1), ( y + 1 ) )
self.deposit( mol, conc, ( x + 1 ), (y - 1) )
self.deposit( mol, conc, ( x + 1 ), (y + 1) )
def add_patch( self, mol, conc ):
""" add a patch of chemical at a random location """
if (self.sizex < 3 or self.sizey < 3):
return
x = 1 + np.random.randint( self.sizex - 2 )
y = 1 + np.random.randint( self.sizey - 2 )
self.add_patch_at( mol, conc, x, y )
def add_patches( self, | |
as described
above. Indices correspond to the indices of `tensor` that aren't in
`row_labels`. Has one index labelled `svd_label`+"out" which connects
to S.
S : Tensor
Tensor with data consisting of a diagonal matrix of singular values.
Has two indices labelled `svd_label`+"out" and `svd_label`+"in" which
are contracted with with the `svd_label`+"in" label of U and the
`svd_label`+"out" of V respectively.
Examples
--------
>>> a=random_tensor(2,3,4, labels = ["i0", "i1", "i2"])
>>> U,S,V = tensor_svd(a, ["i0", "i2"])
>>> print(U)
Tensor object: shape = (2, 4, 3), labels = ['i0', 'i2', 'svd_in']
>>> print(V)
Tensor object: shape = (3, 3), labels = ['svd_out', 'i1']
>>> print(S)
Tensor object: shape = (3, 3), labels = ['svd_out', 'svd_in']
Recombining the three tensors obtained from SVD, yeilds a tensor very close
to the original.
>>> temp=tn.contract(S, V, "svd_in", "svd_out")
>>> b=tn.contract(U, temp, "svd_in", "svd_out")
>>> tn.distance(a,b)
1.922161284937472e-15
"""
t = tensor.copy()
# Move labels in row_labels to the beginning of list, and reshape data
# accordingly
total_input_dimension = 1
for i, label in enumerate(row_labels):
t.move_index(label, i)
total_input_dimension *= t.data.shape[i]
column_labels = [x for x in t.labels if x not in row_labels]
old_shape = t.data.shape
total_output_dimension = int(np.product(t.data.shape) / total_input_dimension)
data_matrix = np.reshape(t.data, (total_input_dimension,
total_output_dimension))
try:
u, s, v = np.linalg.svd(data_matrix, full_matrices=False)
except (np.linalg.LinAlgError, ValueError):
# Try with different lapack driver
warnings.warn(('numpy.linalg.svd failed, trying scipy.linalg.svd with' +
' lapack_driver="gesvd"'))
try:
u, s, v = sp.linalg.svd(data_matrix, full_matrices=False,
lapack_driver='gesvd')
except ValueError:
# Check for inf's and nan's:
print("tensor_svd failed. Matrix contains inf's: "
+ str(np.isinf(data_matrix).any())
+ ". Matrix contains nan's: "
+ str(np.isnan(data_matrix).any()))
raise # re-raise the exception
# New shape original index labels as well as svd index
U_shape = list(old_shape[0:len(row_labels)])
U_shape.append(u.shape[1])
U = Tensor(data=np.reshape(u, U_shape), labels=row_labels + [svd_label + "in"])
V_shape = list(old_shape)[len(row_labels):]
V_shape.insert(0, v.shape[0])
V = Tensor(data=np.reshape(v, V_shape),
labels=[svd_label + "out"] + column_labels)
S = Tensor(data=np.diag(s), labels=[svd_label + "out", svd_label + "in"])
# Absorb singular values S into either V or U
# or take the square root of S and absorb into both
if absorb_singular_values == "left":
U_new = contract(U, S, ["svd_in"], ["svd_out"])
V_new = V
return U_new, V_new
elif absorb_singular_values == "right":
V_new = contract(S, V, ["svd_in"], ["svd_out"])
U_new = U
return U_new, V_new
elif absorb_singular_values == "both":
sqrtS = S.copy()
sqrtS.data = np.sqrt(sqrtS.data)
U_new = contract(U, sqrtS, ["svd_in"], ["svd_out"])
V_new = contract(sqrtS, V, ["svd_in"], ["svd_out"])
return U_new, V_new
else:
return U, S, V
def tensor_qr(tensor, row_labels, qr_label="qr_"):
"""
Compute the QR decomposition of `tensor` after reshaping it into a matrix.
Indices with labels in `row_labels` are fused to form a single index
corresponding to the rows of the matrix (typically the left index of a
matrix). The remaining indices are fused to form the column index. A QR
decomposition is performed on this matrix, yielding two matrices q,r, where
q and is a rectangular matrix with orthonormal columns and r is upper
triangular. These two matrices are then reshaped into tensors Q and R.
Contracting Q and R along the indices labelled `qr_label` will yeild the
original input tensor `tensor`.
Parameters
----------
tensor : Tensor
The tensor on which the QR decomposition will be performed.
row_labels : list
List of labels specifying the indices of `tensor` which will form the
rows of the matrix on which the QR will be performed.
qr_label : str
Base label for the indices that are contracted between `Q` and `R`.
Returns
-------
Q : Tensor
Tensor obtained by reshaping the matrix q obtained from QR
decomposition. Has indices labelled by `row_labels` corresponding to
the indices labelled `row_labels` of `tensor` and has one index
labelled `qr_label`+"in" which connects to `R`.
R : Tensor
Tensor obtained by reshaping the matrix r obtained by QR decomposition.
Indices correspond to the indices of `tensor` that aren't in
`row_labels`. Has one index labelled `qr_label`+"out" which connects
to `Q`.
Examples
--------
>>> from tncontract.tensor import *
>>> t=random_tensor(2,3,4)
>>> print(t)
Tensor object: shape = (2, 3, 4), labels = ['i0', 'i1', 'i2']
>>> Q,R = tensor_qr(t, ["i0", "i2"])
>>> print(Q)
Tensor object: shape = (2, 4, 3), labels = ['i0', 'i2', 'qr_in']
>>> print(R)
Tensor object: shape = (3, 3), labels = ['qr_out', 'i1']
Recombining the two tensors obtained from `tensor_qr`, yeilds a tensor very
close to the original
>>> x = contract(Q, R, "qr_in", "qr_out")
>>> print(x)
Tensor object: shape = (2, 4, 3), labels = ['i0', 'i2', 'i1']
>>> distance(x,t)
9.7619164946377426e-16
"""
t = tensor.copy()
if not isinstance(row_labels, list):
# If row_labels is not a list, convert to list with a single entry
# "row_labels"
row_labels = [row_labels]
# Move labels in row_labels to the beginning of list, and reshape data
# accordingly
t.move_indices(row_labels, 0)
# Compute the combined dimension of the row indices
row_dimension = 1
for i, label in enumerate(t.labels):
if label not in row_labels:
break
row_dimension *= t.data.shape[i]
column_labels = [x for x in t.labels if x not in row_labels]
old_shape = t.data.shape
total_output_dimension = int(np.product(t.data.shape) / row_dimension)
data_matrix = np.reshape(t.data, (row_dimension,
total_output_dimension))
q, r = np.linalg.qr(data_matrix, mode="reduced")
# New shape original index labels as well as svd index
Q_shape = list(old_shape[0:len(row_labels)])
Q_shape.append(q.shape[1])
Q = Tensor(data=np.reshape(q, Q_shape), labels=row_labels + [qr_label + "in"])
R_shape = list(old_shape)[len(row_labels):]
R_shape.insert(0, r.shape[0])
R = Tensor(data=np.reshape(r, R_shape), labels=[qr_label + "out"] +
column_labels)
return Q, R
def tensor_lq(tensor, row_labels, lq_label="lq_"):
"""
Compute the LQ decomposition of `tensor` after reshaping it into a matrix.
Indices with labels in `row_labels` are fused to form a single index
corresponding to the rows of the matrix (typically the left index of a
matrix). The remaining indices are fused to form the column index. An LR
decomposition is performed on this matrix, yielding two matrices l,q, where
q and is a rectangular matrix with orthonormal rows and l is upper
triangular. These two matrices are then reshaped into tensors L and Q.
Contracting L and Q along the indices labelled `lq_label` will yeild the
original input `tensor`. Note that the LQ decomposition is actually
identical to the QR decomposition after a relabelling of indices.
Parameters
----------
tensor : Tensor
The tensor on which the LQ decomposition will be performed.
row_labels : list
List of labels specifying the indices of `tensor` which will form the
rows of the matrix on which the LQ decomposition will be performed.
lq_label : str
Base label for the indices that are contracted between `L` and `Q`.
Returns
-------
Q : Tensor
Tensor obtained by reshaping the matrix q obtained by LQ decomposition.
Indices correspond to the indices of `tensor` that aren't in
`row_labels`. Has one index labelled `lq_label`+"out" which connects
to `L`.
L : Tensor
Tensor obtained by reshaping the matrix l obtained from LQ
decomposition. Has indices labelled by `row_labels` corresponding to
the indices labelled `row_labels` of `tensor` and has one index
labelled `lq_label`+"in" which connects to `Q`.
See Also
--------
tensor_qr
"""
col_labels = [x for x in tensor.labels if x not in row_labels]
temp_label = lbl.unique_label()
# Note the LQ is essentially equivalent to a QR decomposition, only labels
# are renamed
Q, L = tensor_qr(tensor, col_labels, qr_label=temp_label)
Q.replace_label(temp_label + "in", lq_label + "out")
L.replace_label(temp_label + "out", lq_label + "in")
return L, Q
def truncated_svd(tensor, row_labels, chi=0, threshold=1e-15,
absorb_singular_values="right", absolute = True):
"""
Will perform svd of a tensor, as in tensor_svd, and provide approximate
decomposition by truncating all but the largest k singular values then
absorbing S into U, V or both. Truncation is performedby specifying the
parameter chi (number of singular values to keep).
Parameters
----------
chi : int, optional
Maximum number of singular values of each tensor to keep after
performing singular-value decomposition.
threshold : float
Threshold for the magnitude of singular values to keep.
If absolute then singular values which are less than threshold will be truncated.
If relative then singular values which are less than | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2013 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from functools import partial
import logging
from lxml import etree
from lxml.builder import E
import openerp
from openerp import SUPERUSER_ID
from openerp import pooler, tools
import openerp.exceptions
from openerp.osv import fields,osv, expression
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class groups(osv.osv):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
def _get_full_name(self, cr, uid, ids, field, arg, context=None):
res = {}
for g in self.browse(cr, uid, ids, context):
if g.category_id:
res[g.id] = '%s / %s' % (g.category_id.name, g.name)
else:
res[g.id] = g.name
return res
def _search_group(self, cr, uid, obj, name, args, context=None):
operand = args[0][2]
operator = args[0][1]
lst = True
if isinstance(operand, bool):
domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]]
if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand):
return expression.AND(domains)
else:
return expression.OR(domains)
if isinstance(operand, basestring):
lst = False
operand = [operand]
where = []
for group in operand:
values = filter(bool, group.split('/'))
group_name = values.pop().strip()
category_name = values and '/'.join(values).strip() or group_name
group_domain = [('name', operator, lst and [group_name] or group_name)]
category_domain = [('category_id.name', operator, lst and [category_name] or category_name)]
if operator in expression.NEGATIVE_TERM_OPERATORS and not values:
category_domain = expression.OR([category_domain, [('category_id', '=', False)]])
if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values):
sub_where = expression.AND([group_domain, category_domain])
else:
sub_where = expression.OR([group_domain, category_domain])
if operator in expression.NEGATIVE_TERM_OPERATORS:
where = expression.AND([where, sub_where])
else:
where = expression.OR([where, sub_where])
return where
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'),
'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls'),
'rule_groups': fields.many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]),
'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'),
'view_access': fields.many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', 'Views'),
'comment' : fields.text('Comment', size=250, translate=True),
'category_id': fields.many2one('ir.module.category', 'Application', select=True),
'full_name': fields.function(_get_full_name, type='char', string='Group Name', fnct_search=_search_group),
}
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!')
]
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
ids = super(groups, self).search(cr, uid, args, context=context)
gs = self.browse(cr, uid, ids, context)
gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC'))
gs = gs[offset:offset+limit] if limit else gs[offset:]
return map(int, gs)
return super(groups, self).search(cr, uid, args, offset, limit, order, context, count)
def copy(self, cr, uid, id, default=None, context=None):
group_name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update({'name': _('%s (copy)')%group_name})
return super(groups, self).copy(cr, uid, id, default, context)
def write(self, cr, uid, ids, vals, context=None):
if 'name' in vals:
if vals['name'].startswith('-'):
raise osv.except_osv(_('Error'),
_('The name of the group can not start with "-"'))
res = super(groups, self).write(cr, uid, ids, vals, context=context)
self.pool.get('ir.model.access').call_cache_clearing_methods(cr)
return res
groups()
class res_users(osv.osv):
""" User class. A res.users record models an OpenERP user and is different
from an employee.
res.users class now inherits from res.partner. The partner model is
used to store the data related to the partner: lang, name, address,
avatar, ... The user model is now dedicated to technical data.
"""
__admin_ids = {}
_uid_cache = {}
_inherits = {
'res.partner': 'partner_id',
}
_name = "res.users"
_description = 'Users'
def _set_new_password(self, cr, uid, id, name, value, args, context=None):
if value is False:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
return
if uid == id:
# To change their own password users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise osv.except_osv(_('Operation Canceled'), _('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
self.write(cr, uid, id, {'password': value})
def _get_password(self, cr, uid, ids, arg, karg, context=None):
return dict.fromkeys(ids, '')
_columns = {
'id': fields.integer('ID'),
'login_date': fields.date('Latest connection', select=1),
'partner_id': fields.many2one('res.partner', required=True,
string='Related Partner', ondelete='restrict',
help='Partner-related data of the user'),
'login': fields.char('Login', size=64, required=True,
help="Used to log into the system"),
'password': fields.char('Password', size=64, invisible=True,
help="Keep empty if you don't want the user to be able to connect on the system."),
'new_password': fields.function(_get_password, type='char', size=64,
fnct_inv=_set_new_password, string='Set Password',
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again."),
'signature': fields.text('Signature'),
'active': fields.boolean('Active'),
'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at logon for this user, in addition to the standard menu."),
'menu_id': fields.many2one('ir.actions.actions', 'Menu Action', help="If specified, the action will replace the standard menu for this user."),
'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'),
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
'company_id': fields.many2one('res.company', 'Company', required=True,
help='The company this user is currently working for.', context={'user_preference': True}),
'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'),
# backward compatibility fields
'user_email': fields.related('email', type='char',
deprecated='Use the email field instead of user_email. This field will be removed with OpenERP 7.1.'),
}
def on_change_company_id(self, cr, uid, ids, company_id):
return {'warning' : {
'title': _("Company Switch Warning"),
'message': _("Please keep in mind that documents currently displayed may not be relevant after switching to another company. If you have unsaved changes, please make sure to save and close all forms before switching to a different company. (You can click on Cancel in the User Preferences now)"),
}
}
def onchange_state(self, cr, uid, ids, state_id, context=None):
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_state(cr, uid, partner_ids, state_id, context=context)
def onchange_type(self, cr, uid, ids, is_company, context=None):
""" Wrapper on the user.partner onchange_type, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_type(cr, uid, partner_ids, is_company, context=context)
def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None):
""" Wrapper on the user.partner onchange_address, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_address(cr, uid, partner_ids, use_parent_address, parent_id, context=context)
def _check_company(self, cr, uid, ids, context=None):
return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context))
_constraints = [
(_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']),
]
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def _get_company(self,cr, uid, context=None, uid2=False):
if not uid2:
uid2 = uid
user = self.pool.get('res.users').read(cr, uid, uid2, ['company_id'], context)
company_id = user.get('company_id', False)
return company_id and company_id[0] or False
def _get_companies(self, cr, uid, context=None):
c = self._get_company(cr, uid, context)
if c:
return [c]
return False
def _get_menu(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
try:
model, res_id = dataobj.get_object_reference(cr, uid, 'base', 'action_menu_admin')
if model != 'ir.actions.act_window':
return False
return res_id
except ValueError:
return False
def _get_group(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user')
result.append(group_id)
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager')
result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
_defaults = | |
Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1
up1 = UpSampling2D(size=(2,2))(res_out)
up1_conv = Conv2D(512, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up1)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1)
merge1_conv2 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1_conv1)
# Upsampling 2
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up2)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2)
merge2_conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2_conv1)
# Upsampling 3 & 4
up3 = UpSampling2D(size = (2,2))(merge2_conv2)
up3_conv1 = Conv2D(128, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3)
up3_conv2 = Conv2D(128, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3_conv1)
up4 = UpSampling2D(size = (2,2))(up3_conv2)
up4_conv = Conv2D(128, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up4)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer,up4_conv], axis = 3)
merge3_conv1 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3)
merge3_conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3_conv1)
# Upsample 5
up5 = UpSampling2D(size = (2,2))(merge3_conv2)
up5_conv = Conv2D(64, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5)
merge5_conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5_conv)
merge5_conv2 = Conv2D(64, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge5_conv1)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation = "softmax")(merge5_conv2)
# Smoothing
smooth_conv1 = Conv2D(12, 7, activation='relu', padding='same',
kernel_initializer='he_normal')(activation)
smooth_conv2 = Conv2D(12, 7, activation='relu', padding='same',
kernel_initializer='he_normal')(smooth_conv1)
# Final classification
classification = Conv2D(num_classes, 1, activation = "softmax")(smooth_conv2)
output = Reshape((dim*dim, num_classes))(classification)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model
def ResNet_UNet_More_Params(dim=512, num_classes=6):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes 3 skip connections
from previous VGG layers.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
from keras.models import Model
from keras.layers import Conv2D
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.applications.resnet50 import ResNet50
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1
up1 = UpSampling2D(size=(2,2))(res_out)
up1_conv = Conv2D(512, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up1)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1)
merge1_conv2 = Conv2D(512, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge1_conv1)
# Upsampling 2
up2 = UpSampling2D(size = (2,2))(merge1_conv2)
up2_conv = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up2)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2)
merge2_conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge2_conv1)
# Upsampling 3 & 4
up3 = UpSampling2D(size = (2,2))(merge2_conv2)
up3_conv1 = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3)
up3_conv2 = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up3_conv1)
up4 = UpSampling2D(size = (2,2))(up3_conv2)
up4_conv = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up4)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer,up4_conv], axis = 3)
merge3_conv1 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3)
merge3_conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge3_conv1)
# Upsample 5
up5 = UpSampling2D(size = (2,2))(merge3_conv2)
up5_conv = Conv2D(256, 2, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5)
merge5_conv1 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(up5_conv)
merge5_conv2 = Conv2D(256, 3, activation = 'relu', padding = 'same',
kernel_initializer = 'he_normal')(merge5_conv1)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation = "softmax")(merge5_conv2)
output = Reshape((dim*dim, num_classes))(activation)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model
def ResNet_UNet_BN(dim=512, num_classes=6):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes 3 skip connections
from previous VGG layers.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used to
determine the dimension of output map. i.e. model.predict()
returns array that can be reshaped to (dim, dim,
num_classes).
Output:
model - an uncompiled keras model. Check output shape before use.
"""
from keras.models import Model
from keras.layers import Conv2D, BatchNormalization
from keras.layers import UpSampling2D, Reshape, concatenate
from keras.activations import relu
from keras.applications.resnet50 import ResNet50
# Import a headless ResNet50
resnet = ResNet50(input_shape = (None, None, 3), include_top=False)
# Attached U-net from second last layer - activation_49
res_out = resnet.layers[-2].output
# Standard U-Net upsampling 512 -> 256 -> 128 -> 64
# Upsampling 1
up1 = UpSampling2D(size=(2, 2))(res_out)
up1_conv = Conv2D(512, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up1)
up1_conv = BatchNormalization()(up1_conv)
#up1_conv = relu(up1_conv)
prev_layer = resnet.get_layer("activation_40").output
merge1 = concatenate([prev_layer,up1_conv], axis = 3)
merge1_conv1 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge1)
merge1_conv1 = BatchNormalization()(merge1_conv1)
#merge1_conv1 = relu(merge1_conv1)
merge1_conv2 = Conv2D(512, 3, activation='relu', padding='same', kernel_initializer = 'he_normal')(merge1_conv1)
merge1_conv2 = BatchNormalization()(merge1_conv2)
#merge1_conv2 = relu(merge1_conv2)
# Upsampling 2
up2 = UpSampling2D(size=(2, 2))(merge1_conv2)
up2_conv = Conv2D(256, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up2)
up2_conv = BatchNormalization()(up2_conv)
#up2_conv = relu(up2_conv)
prev_layer = resnet.get_layer("activation_22").output
merge2 = concatenate([prev_layer,up2_conv], axis = 3)
merge2_conv1 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge2)
merge2_conv1 = BatchNormalization()(merge2_conv1)
#merge2_conv1 = relu(merge2_conv1)
merge2_conv2 = Conv2D(256, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge2_conv1)
merge2_conv2 = BatchNormalization()(merge2_conv2)
#merge2_conv2 = relu(merge2_conv2)
# Upsampling 3
up3 = UpSampling2D(size=(2,2))(merge2_conv2)
up3_conv1 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up3)
up3_conv1 = BatchNormalization()(up3_conv1)
#up3_conv1 = relu(up3_conv1)
up3_conv2 = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up3_conv1)
up3_conv2 = BatchNormalization()(up3_conv2)
#up3_conv2 = relu(up3_conv2)
# Upsampling 4
up4 = UpSampling2D(size=(2,2))(up3_conv2)
up4_conv = Conv2D(128, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up4)
up4_conv = BatchNormalization()(up4_conv)
#up4_conv = relu(up4_conv)
prev_layer = resnet.get_layer("activation_1").output
merge3 = concatenate([prev_layer, up4_conv], axis=3)
merge3_conv1 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge3)
merge3_conv1 = BatchNormalization()(merge3_conv1)
#merge3_conv1 = relu(merge3_conv1)
merge3_conv2 = Conv2D(128, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge3_conv1)
merge3_conv2 = BatchNormalization()(merge3_conv2)
#merge3_conv2 = relu(merge3_conv2)
# Upsample 5
up5 = UpSampling2D(size=(2,2))(merge3_conv2)
up5_conv = Conv2D(64, 2, activation='relu', padding='same', kernel_initializer='he_normal')(up5)
up5_conv = BatchNormalization()(up5_conv)
#up5_conv = relu(up5_conv)
merge5_conv1 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(up5_conv)
merge5_conv1 = BatchNormalization()(merge5_conv1)
#merge5_conv1 = relu(merge5_conv1)
merge5_conv2 = Conv2D(64, 3, activation='relu', padding='same', kernel_initializer='he_normal')(merge5_conv1)
merge5_conv2 = BatchNormalization()(merge5_conv2)
#merge5_conv2 = relu(merge5_conv2)
# Activation and reshape for training
activation = Conv2D(num_classes, 1, activation="softmax")(merge5_conv2)
output = Reshape((dim*dim, num_classes))(activation)
# Build model
model = Model(inputs=[resnet.input], outputs=[output])
return model
def ResNet_UNet_Dropout(dim=512, num_classes=6, dropout=0.5, final_activation=True):
"""
Returns a ResNet50 Nework with a U-Net
like upsampling stage. Inlcudes skip connections
from previous ResNet50 layers.
Uses a SpatialDrop on the final layer as introduced
in https://arxiv.org/pdf/1411.4280.pdf, 2015.
Input:
dim - the size of the input image. Note that is should be
a square of 2 so that downsampling and upsampling
always match. ie. 128 -> 64 -> 32 -> 64 -> 128
This is only needed for training.
num_classes - the number of classes in the whole problem. Used | |
at DLPy example folder.
Returns
-------
:class:`MomentumSolver`
'''
def __init__(self, momentum=0.9, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1, step_size=10,
power=0.75, use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('momentum')
self.add_parameter('momentum', momentum)
class AdamSolver(Solver):
'''
Adam solver object
Parameters
----------
beta1 : double, optional
Specifies the exponential decay rate for the first moment in
the Adam learning algorithm.
beta2 : double, optional
Specifies the exponential decay rate for the second moment in
the Adam learning algorithm.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size: int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches
one of the specified steps, the learning rate is multiplied by the
value of the gamma parameter. For example, if you specify {5, 9, 13},
then the learning rate is multiplied by gamma after the fifth, ninth,
and thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`AdamSolver`
'''
def __init__(self, beta1=0.9, beta2=0.999, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1,
step_size=10, power=0.75, use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('adam')
self.add_parameter('beta1', beta1)
self.add_parameter('beta2', beta2)
class LBFGSolver(Solver):
'''
LBFG solver object
Parameters
----------
m : int
Specifies the number of corrections used in the L-BFGS update.
max_line_search_iters : int
Specifies the maximum number of line search iterations for
L-BFGS solver.
max_iters : int
Specifies the maximum number of iterations for the L-BFGS solver.
When the miniBatchSize option is not specified, each iteration
goes through at least one epoch. When the miniBatchSize option is
specified, each L-BFGS iteration processes one mini-batch.
The L-BFGS solver stops when the iteration number reaches the value
of the maxIters= option or the epoch number reaches the value of
the maxEpochs= option.
backtrack_ratio : double
Specifies the backtrack ratio of line search iterations for L-BFGS solver.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`LBFGSolver`
'''
def __init__(self, m, max_line_search_iters, max_iters, backtrack_ratio, learning_rate=0.001,
learning_rate_policy='fixed', gamma=0.1, step_size=10, power=0.75, use_locking=True,
clip_grad_max=None, clip_grad_min=None, steps=None, fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('lbfg')
self.add_parameters('m', m)
self.add_parameters('maxlinesearchiters', max_line_search_iters)
self.add_parameters('maxiters', max_iters)
self.add_parameters('backtrackratio', backtrack_ratio)
class NatGradSolver(Solver):
'''
Natural gradient solver object
Parameters
----------
approximation_type : int, optional
Specifies the approximate natural gradient type.
learning_rate : double, optional
Specifies the learning rate for the deep learning algorithm.
learning_rate_policy : string, optional
Specifies the learning rate policy for the deep learning algorithm.
Valid Values: FIXED, STEP, POLY, INV, MULTISTEP
Default: FIXED
gamma : double, optional
Specifies the gamma for the learning rate policy.
step_size : int, optional
Specifies the step size when the learning rate policy is set to STEP.
power : double, optional
Specifies the power for the learning rate policy.
use_locking : bool, optional
When it is false, the gradients are computed asynchronously with
multiple threads.
clip_grad_max : double, optional
Specifies the maximum gradient value. All gradients that are greater
than the specified value are set to the specified value.
clip_grad_min : double, optional
Specifies the minimum gradient value. All gradients that are less
than the specified value are set to the specified value.
steps : list-of-ints, optional
specifies a list of epoch counts. When the current epoch matches one
of the specified steps, the learning rate is multiplied by the value
of the gamma parameter. For example, if you specify {5, 9, 13}, then
the learning rate is multiplied by gamma after the fifth, ninth, and
thirteenth epochs.
fcmp_learning_rate : string, optional
specifies the FCMP learning rate function.
lr_scheduler : LRScheduler, optional
Specifies learning rate policy
DLPy provides you with some predefined learning rate policies.
1. FixedLR
2. StepLR
3. MultiStepLR
4. PolynomialLR
5. ReduceLROnPlateau
6. CyclicLR
Besides, you can also customize your own learning rate policy.
You can find more examples at DLPy example folder.
Returns
-------
:class:`NatGradSolver`
'''
def __init__(self, approximation_type=1, learning_rate=0.001, learning_rate_policy='fixed', gamma=0.1,
step_size=10, power=0.75, use_locking=True, clip_grad_max=None, clip_grad_min=None, steps=None,
fcmp_learning_rate=None, lr_scheduler=None):
Solver.__init__(self, learning_rate, learning_rate_policy, gamma, step_size, power, use_locking,
clip_grad_max, clip_grad_min, steps, fcmp_learning_rate, lr_scheduler)
self.set_method('natgrad')
self.add_parameter('approximationtype', approximation_type)
class Optimizer(DLPyDict):
'''
Optimizer object
Parameters
----------
algorithm : Algorithm, optional
Specifies the deep learning algorithm.
mini_batch_size : int, optional
Specifies the number of observations per thread in a mini-batch.
You can use this parameter to control the number of observations
that the action uses on each worker for each thread to compute
the gradient prior to updating the weights. Larger values use more
memory. When synchronous SGD is used (the default), the total
mini-batch size is equal to miniBatchSize * number of threads *
number of workers. When asynchronous SGD is used (by specifying
the elasticSyncFreq parameter), each worker trains its own local
model. In this case, the total mini-batch size for each worker is
miniBatchSize * number of threads.
seed : double, optional
Specifies the random number seed for the random number generator
in SGD. The default value, 0, and negative values indicate to use
random number streams based on the computer clock. Specify a value
that is greater than 0 for a reproducible random number sequence.
max_epochs : int, optional
Specifies the maximum number of epochs. For SGD with a single-machine
server or a session that uses one worker on a distributed server,
one | |
+ e + d)
data.write('\n')
data.write(f + e + e)
data.write('\n')
data.write(f + e + f)
data.write('\n')
data.write(f + e + g)
data.write('\n')
data.write(f + e + h)
data.write('\n')
data.write(f + e + i)
data.write('\n')
data.write(f + e + j)
data.write('\n')
data.write(f + f + a)
data.write('\n')
data.write(f + f + b)
data.write('\n')
data.write(f + f + c)
data.write('\n')
data.write(f + f + d)
data.write('\n')
data.write(f + f + e)
data.write('\n')
data.write(f + f + f)
data.write('\n')
data.write(f + f + g)
data.write('\n')
data.write(f + f + h)
data.write('\n')
data.write(f + f + i)
data.write('\n')
data.write(f + f + j)
data.write('\n')
data.write(f + g + a)
data.write('\n')
data.write(f + g + b)
data.write('\n')
data.write(f + g + c)
data.write('\n')
data.write(f + g + d)
data.write('\n')
data.write(f + g + e)
data.write('\n')
data.write(f + g + f)
data.write('\n')
data.write(f + g + g)
data.write('\n')
data.write(f + g + h)
data.write('\n')
data.write(f + g + i)
data.write('\n')
data.write(f + g + j)
data.write('\n')
data.write(f + h + a)
data.write('\n')
data.write(f + h + b)
data.write('\n')
data.write(f + h + c)
data.write('\n')
data.write(f + h + d)
data.write('\n')
data.write(f + h + e)
data.write('\n')
data.write(f + h + f)
data.write('\n')
data.write(f + h + g)
data.write('\n')
data.write(f + h + h)
data.write('\n')
data.write(f + h + i)
data.write('\n')
data.write(f + h + j)
data.write('\n')
data.write(f + i + a)
data.write('\n')
data.write(f + i + b)
data.write('\n')
data.write(f + i + c)
data.write('\n')
data.write(f + i + d)
data.write('\n')
data.write(f + i + e)
data.write('\n')
data.write(f + i + f)
data.write('\n')
data.write(f + i + g)
data.write('\n')
data.write(f + i + h)
data.write('\n')
data.write(f + i + i)
data.write('\n')
data.write(f + i + j)
data.write('\n')
data.write(f + j + a)
data.write('\n')
data.write(f + j + b)
data.write('\n')
data.write(f + j + c)
data.write('\n')
data.write(f + j + d)
data.write('\n')
data.write(f + j + e)
data.write('\n')
data.write(f + j + f)
data.write('\n')
data.write(f + j + g)
data.write('\n')
data.write(f + j + h)
data.write('\n')
data.write(f + j + i)
data.write('\n')
data.write(f + j + j)
data.write('\n')
data.write(g + a + b)
data.write('\n')
data.write(g + a + c)
data.write('\n')
data.write(g + a + d)
data.write('\n')
data.write(g + a + e)
data.write('\n')
data.write(g + a + f)
data.write('\n')
data.write(g + a + g)
data.write('\n')
data.write(g + a + h)
data.write('\n')
data.write(g + a + i)
data.write('\n')
data.write(g + a + j)
data.write('\n')
data.write(g + b + a)
data.write('\n')
data.write(g + b + b)
data.write('\n')
data.write(g + b + c)
data.write('\n')
data.write(g + b + d)
data.write('\n')
data.write(g + b + e)
data.write('\n')
data.write(g + b + f)
data.write('\n')
data.write(g + b + g)
data.write('\n')
data.write(g + b + h)
data.write('\n')
data.write(g + b + i)
data.write('\n')
data.write(g + b + j)
data.write('\n')
data.write(g + c + a)
data.write('\n')
data.write(g + c + b)
data.write('\n')
data.write(g + c + c)
data.write('\n')
data.write(g + c + d)
data.write('\n')
data.write(g + c + e)
data.write('\n')
data.write(g + c + f)
data.write('\n')
data.write(g + c + g)
data.write('\n')
data.write(g + b + g)
data.write('\n')
data.write(g + b + h + birthday)
data.write('\n')
data.write(g + b + i + birthday)
data.write('\n')
data.write(g + b + j + birthday)
data.write('\n')
data.write(g + c + a + birthday)
data.write('\n')
data.write(g + c + b + birthday)
data.write('\n')
data.write(g + c + c + birthday)
data.write('\n')
data.write(g + c + d + birthday)
data.write('\n')
data.write(g + c + e + birthday)
data.write('\n')
data.write(g + c + f + birthday)
data.write('\n')
data.write(g + c + g + birthday)
data.write('\n')
data.write(g + c + h)
data.write('\n')
data.write(g + c + i)
data.write('\n')
data.write(g + c + j)
data.write('\n')
data.write(g + d + a)
data.write('\n')
data.write(g + d + b)
data.write('\n')
data.write(g + d + c)
data.write('\n')
data.write(g + d + d)
data.write('\n')
data.write(g + d + e)
data.write('\n')
data.write(g + d + f)
data.write('\n')
data.write(g + d + g)
data.write('\n')
data.write(g + d + h)
data.write('\n')
data.write(g + d + i)
data.write('\n')
data.write(g + d + j)
data.write('\n')
data.write(g + e + a)
data.write('\n')
data.write(g + e + b)
data.write('\n')
data.write(g + e + c)
data.write('\n')
data.write(g + e + d)
data.write('\n')
data.write(g + e + e)
data.write('\n')
data.write(g + e + f)
data.write('\n')
data.write(g + e + g)
data.write('\n')
data.write(g + e + h)
data.write('\n')
data.write(g + e + i)
data.write('\n')
data.write(g + e + j)
data.write('\n')
data.write(g + f + a)
data.write('\n')
data.write(g + f + b)
data.write('\n')
data.write(g + f + c)
data.write('\n')
data.write(g + f + d)
data.write('\n')
data.write(g + f + e)
data.write('\n')
data.write(g + f + f)
data.write('\n')
data.write(g + f + g)
data.write('\n')
data.write(g + f + h)
data.write('\n')
data.write(g + f + i)
data.write('\n')
data.write(g + f + j)
data.write('\n')
data.write(g + g + a)
data.write('\n')
data.write(g + g + b)
data.write('\n')
data.write(g + g + c)
data.write('\n')
data.write(g + g + d)
data.write('\n')
data.write(g + g + e)
data.write('\n')
data.write(g + g + f)
data.write('\n')
data.write(g + g + g)
data.write('\n')
data.write(g + g + h)
data.write('\n')
data.write(g + g + i)
data.write('\n')
data.write(g + g + j)
data.write('\n')
data.write(g + h + a)
data.write('\n')
data.write(g + h + b)
data.write('\n')
data.write(g + h + c)
data.write('\n')
data.write(g + h + d)
data.write('\n')
data.write(g + h + e)
data.write('\n')
data.write(g + h + f)
data.write('\n')
data.write(g + h + g)
data.write('\n')
data.write(g + h + h)
data.write('\n')
data.write(g + h + i)
data.write('\n')
data.write(g + h + j)
data.write('\n')
data.write(g + i + a)
data.write('\n')
data.write(g + i + b)
data.write('\n')
data.write(g + i + c)
data.write('\n')
data.write(g + i + d)
data.write('\n')
data.write(g + i + e)
data.write('\n')
data.write(g + i + f)
data.write('\n')
data.write(g + i + g)
data.write('\n')
data.write(g + i + h)
data.write('\n')
data.write(g + i + i)
data.write('\n')
data.write(g + i + j)
data.write('\n')
data.write(g + j + a)
data.write('\n')
data.write(g + j + b)
data.write('\n')
data.write(g + j + c)
data.write('\n')
data.write(g + j + d)
data.write('\n')
data.write(g + j + e)
data.write('\n')
data.write(g + j + f)
data.write('\n')
data.write(g + j + g)
data.write('\n')
data.write(g + j + h)
data.write('\n')
data.write(g + j + i)
data.write('\n')
data.write(g + j + j)
data.write('\n')
data.write(h + a + b)
data.write('\n')
data.write(h + a + c)
data.write('\n')
data.write(h + a + d)
data.write('\n')
data.write(h + a + e)
data.write('\n')
data.write(h + a + f)
data.write('\n')
data.write(h + a + g)
data.write('\n')
data.write(h + a + h)
data.write('\n')
data.write(h + a + i)
data.write('\n')
data.write(h + a + j)
data.write('\n')
data.write(h + b + a)
data.write('\n')
data.write(h + b + b)
data.write('\n')
data.write(h + b + c)
data.write('\n')
data.write(h + b + d)
data.write('\n')
data.write(h + b + e)
data.write('\n')
data.write(h + b + f)
data.write('\n')
data.write(h + b + g)
data.write('\n')
data.write(h + b + h)
data.write('\n')
data.write(h + b + i)
data.write('\n')
data.write(h + b + j)
data.write('\n')
data.write(h + c + a)
data.write('\n')
data.write(h + c + b)
data.write('\n')
data.write(h + c + c)
data.write('\n')
data.write(h + c + d)
data.write('\n')
data.write(h + c + e)
data.write('\n')
data.write(h + c + f)
data.write('\n')
data.write(h + c + g)
data.write('\n')
data.write(h + c + h)
data.write('\n')
data.write(h + c + i)
data.write('\n')
data.write(h + c + j)
data.write('\n')
data.write(h + d + a)
data.write('\n')
data.write(h + d + b)
data.write('\n')
data.write(h + d + c)
data.write('\n')
data.write(h + d + d)
data.write('\n')
data.write(h + d + e)
data.write('\n')
data.write(h + d + f)
data.write('\n')
data.write(h + d + g)
data.write('\n')
data.write(h + d + h)
data.write('\n')
data.write(h + d + i)
data.write('\n')
data.write(h + d + j)
data.write('\n')
data.write(h + e + a)
data.write('\n')
data.write(h + e + b)
data.write('\n')
data.write(h + e + c)
data.write('\n')
data.write(h + e + d)
data.write('\n')
data.write(h | |
*= pi * radius / tex_size_main[1]
v_start = v
else:
v = 0.
for i in range(segs_h + 1):
angle_h = delta_angle_h * i + (0. if inverted else slice_radians)
x = radius_h * cos(angle_h)
y = radius_h * sin(angle_h) * (-1. if inverted else 1.)
if smooth:
normal = Vec3(x, y, z).normalized() * (-1. if inverted else 1.)
if has_uvs:
u = i / segs_h
if tex_size_main:
u *= arc / tex_size_main[0]
if mat_main:
u, v = mat_main.xform_point(Point2(u, v_start))
else:
u = 0.
vert = {
"pos": (x, y, z),
"normal": normal if smooth else None,
"uv": (u, v)
}
verts.append(vert)
if not smooth and 0 < i < segs_h:
verts.append(vert.copy())
else:
main_start_index = 0
# Define the bottom pole triangle vertices
if smooth:
normal = (0., 0., 1. if inverted else -1.)
for i in range(segs_h):
if has_uvs:
u = i / segs_h
v = 0.
if tex_size_main:
u *= arc / tex_size_main[0]
if mat_main:
u, v = mat_main.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": (0., 0., -radius),
"normal": normal if smooth else None,
"uv": (u, v)
}
verts.append(vert)
vertex_count = len(verts)
angle_v = bottom_angle + delta_angle_v
z = radius * -cos(angle_v)
# Define the vertices along the bottom pole or cap
radius_h = radius * sin(angle_v)
if has_uvs:
v = angle_v / pi
if tex_size_main:
v *= pi * radius / tex_size_main[1]
v_start = v
else:
v = 0.
for i in range(segs_h + 1):
angle_h = delta_angle_h * i + (0. if inverted else slice_radians)
x = radius_h * cos(angle_h)
y = radius_h * sin(angle_h) * (-1. if inverted else 1.)
if smooth:
normal = Vec3(x, y, z).normalized() * (-1. if inverted else 1.)
if has_uvs:
u = i / segs_h
if tex_size_main:
u *= arc / tex_size_main[0]
if mat_main:
u, v = mat_main.xform_point(Point2(u, v_start))
else:
u = 0.
vert = {
"pos": (x, y, z),
"normal": normal if smooth else None,
"uv": (u, v)
}
verts.append(vert)
if not smooth and 0 < i < segs_h:
verts.append(vert.copy())
# Define the vertex order of the polygons along the bottom pole or cap
if bottom_clip > -1.:
n = segs_h if smooth else segs_h * 2 - 1
f = 1 if smooth else 2
for i in range(0, segs_h * f, f):
vi1 = i + index_offset
vi2 = vi1 + 1
vi3 = vi2 + n
vi4 = vi3 + 1
indices.extend((vi1, vi4, vi3) if inverted else (vi1, vi2, vi3))
indices.extend((vi1, vi2, vi4) if inverted else (vi2, vi4, vi3))
if not smooth:
self._make_flat_shaded((vi1, vi2, vi3, vi4), verts)
if smooth:
index_offset += segs_h + 1
else:
for i in range(segs_h):
j = i + index_offset
n = 0 if smooth else i
inds = (j, j + segs_h + 1 + n, j + segs_h + n)
indices.extend(inds)
if not smooth:
self._make_flat_shaded(inds, verts)
if smooth:
index_offset += segs_h
# Define the main quad vertices
if not smooth:
index_offset = len(verts)
vert_index = len(verts) + segs_h + 1
n = segs_h + 1 if smooth else segs_h * 2
f = 1 if smooth else 2
for i in range(1 if smooth else 0, segs_v - 1):
angle_v = bottom_angle + delta_angle_v * (i + 1)
z = radius * -cos(angle_v)
radius_h = radius * sin(angle_v)
if has_uvs:
v = angle_v / pi
if tex_size_main:
v *= pi * radius / tex_size_main[1]
v_start = v
else:
v = 0.
for j in range(segs_h + 1):
angle_h = delta_angle_h * j + (0. if inverted else slice_radians)
x = radius_h * cos(angle_h)
y = radius_h * sin(angle_h) * (-1. if inverted else 1.)
if smooth:
normal = Vec3(x, y, z).normalized() * (-1. if inverted else 1.)
if has_uvs:
u = j / segs_h
if tex_size_main:
u *= arc / tex_size_main[0]
if mat_main:
u, v = mat_main.xform_point(Point2(u, v_start))
else:
u = 0.
vert = {
"pos": (x, y, z),
"normal": normal if smooth else None,
"uv": (u, v)
}
verts.append(vert)
if not smooth and 0 < j < segs_h:
verts.append(vert.copy())
# Define the vertex order of the main quads
if i > 0:
for j in range(0, segs_h * f, f):
vi1 = i * n + j + index_offset
vi2 = vi1 - n
vi3 = vi2 + 1
vi4 = vi1 + 1
indices.extend((vi1, vi2, vi4) if inverted else (vi1, vi2, vi3))
indices.extend((vi2, vi3, vi4) if inverted else (vi1, vi3, vi4))
if not smooth:
self._make_flat_shaded((vi1, vi2, vi3, vi4), verts)
if not smooth and i > 0:
# duplicate latest added vertices
verts.extend(v.copy() for v in verts[-segs_h * 2:])
index_offset += segs_h * 2
if top_clip < 1.:
# Define the top edge vertices
z = top_height
radius_h = sqrt(radius * radius - z * z)
if has_uvs:
v = (pi - acos(z / radius)) / pi
if tex_size_main:
v *= pi * radius / tex_size_main[1]
v_start = v
else:
v = 0.
for i in range(segs_h + 1):
angle_h = delta_angle_h * i + (0. if inverted else slice_radians)
x = radius_h * cos(angle_h)
y = radius_h * sin(angle_h) * (-1. if inverted else 1.)
if smooth:
normal = Vec3(x, y, z).normalized() * (-1. if inverted else 1.)
if has_uvs:
u = i / segs_h
if tex_size_main:
u *= arc / tex_size_main[0]
if mat_main:
u, v = mat_main.xform_point(Point2(u, v_start))
else:
u = 0.
vert = {
"pos": (x, y, z),
"normal": normal if smooth else None,
"uv": (u, v)
}
verts.append(vert)
if not smooth and 0 < i < segs_h:
verts.append(vert.copy())
else:
# Define the top pole triangle vertices
if smooth:
normal = (0., 0., -1. if inverted else 1.)
for i in range(segs_h):
if has_uvs:
u = i / segs_h
v = 1.
if tex_size_main:
u *= arc / tex_size_main[0]
v *= pi * radius / tex_size_main[1]
if mat_main:
u, v = mat_main.xform_point(Point2(u, v))
else:
u = v = 0.
vert = {
"pos": (0., 0., radius),
"normal": normal if smooth else None,
"uv": (u, v)
}
verts.append(vert)
index_offset = len(verts) - 1
# Define the vertex order of the polygons along the top pole or cap
if top_clip < 1.:
n = segs_h if smooth else segs_h * 2 - 1
f = 1 if smooth else 2
index_offset -= (segs_h - 1) * f + n + 2
for i in range(0, segs_h * f, f):
vi1 = i + index_offset
vi2 = vi1 + 1
vi3 = vi2 + n
vi4 = vi3 + 1
indices.extend((vi1, vi2, vi4) if inverted else (vi1, vi2, vi3))
indices.extend((vi1, vi4, vi3) if inverted else (vi2, vi4, vi3))
if not smooth:
self._make_flat_shaded((vi1, vi2, vi3, vi4), verts)
if smooth:
index_offset += segs_h + 1
else:
# Define the vertex order of the top pole triangles
for i in range(segs_h):
j = index_offset - i
n = 0 if smooth else i
inds = (j, j - segs_h - 1 - n, j - segs_h - n)
indices.extend(inds)
if not smooth:
self._make_flat_shaded(inds, verts)
vert_ranges["main"] = (main_start_index, len(verts))
if top_clip < 1.:
index_offset = len(verts)
if segs_tc and -radius < z < radius:
# Define the top cap triangle vertices
top_cap_start_index = index_offset
normal = (0., 0., -1. if inverted else 1.)
if has_uvs:
if tex_units and "top_cap" in tex_units:
tex_size = tex_units["top_cap"]
else:
tex_size = None
mat = self._get_tex_xform("top_cap")
u = v = .5
if has_uvs and mat:
u, v = mat.xform_point(Point2(u, v))
vert = {
"pos": (0., 0., z),
"normal": normal,
"uv": (u, v)
}
verts.append(vert)
r = radius_h / segs_tc
for i in range(segs_h + 1):
angle_h = delta_angle_h * i + (0. if inverted else slice_radians)
c = cos(angle_h)
s = sin(angle_h) * (-1. if inverted else 1.)
x = r * c
y = r * s
if has_uvs:
u = .5 | |
<reponame>hashnfv/hashnfv-sdnvpn
#!/usr/bin/python
#
# Copyright (c) 2017 All rights reserved
# This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
import logging
import os
import sys
import time
import requests
import re
import subprocess
import functest.utils.openstack_utils as os_utils
from opnfv.deployment.factory import Factory as DeploymentFactory
from sdnvpn.lib import config as sdnvpn_config
logger = logging.getLogger('sdnvpn_test_utils')
common_config = sdnvpn_config.CommonConfig()
ODL_USER = 'admin'
ODL_PASS = '<PASSWORD>'
def create_custom_flavor():
return os_utils.get_or_create_flavor(common_config.custom_flavor_name,
common_config.custom_flavor_ram,
common_config.custom_flavor_disk,
common_config.custom_flavor_vcpus)
def create_net(neutron_client, name):
logger.debug("Creating network %s", name)
net_id = os_utils.create_neutron_net(neutron_client, name)
if not net_id:
logger.error(
"There has been a problem when creating the neutron network")
sys.exit(-1)
return net_id
def create_subnet(neutron_client, name, cidr, net_id):
logger.debug("Creating subnet %s in network %s with cidr %s",
name, net_id, cidr)
subnet_id = os_utils.create_neutron_subnet(neutron_client,
name,
cidr,
net_id)
if not subnet_id:
logger.error(
"There has been a problem when creating the neutron subnet")
sys.exit(-1)
return subnet_id
def create_network(neutron_client, net, subnet1, cidr1,
router, subnet2=None, cidr2=None):
"""Network assoc won't work for networks/subnets created by this function.
It is an ODL limitation due to it handling routers as vpns.
See https://bugs.opendaylight.org/show_bug.cgi?id=6962"""
network_dic = os_utils.create_network_full(neutron_client,
net,
subnet1,
router,
cidr1)
if not network_dic:
logger.error(
"There has been a problem when creating the neutron network")
sys.exit(-1)
net_id = network_dic["net_id"]
subnet_id = network_dic["subnet_id"]
router_id = network_dic["router_id"]
if subnet2 is not None:
logger.debug("Creating and attaching a second subnet...")
subnet_id = os_utils.create_neutron_subnet(
neutron_client, subnet2, cidr2, net_id)
if not subnet_id:
logger.error(
"There has been a problem when creating the second subnet")
sys.exit(-1)
logger.debug("Subnet '%s' created successfully" % subnet_id)
return net_id, subnet_id, router_id
def create_instance(nova_client,
name,
image_id,
network_id,
sg_id,
secgroup_name=None,
fixed_ip=None,
compute_node='',
userdata=None,
files=None,
**kwargs
):
if 'flavor' not in kwargs:
kwargs['flavor'] = common_config.default_flavor
logger.info("Creating instance '%s'..." % name)
logger.debug(
"Configuration:\n name=%s \n flavor=%s \n image=%s \n"
" network=%s\n secgroup=%s \n hypervisor=%s \n"
" fixed_ip=%s\n files=%s\n userdata=\n%s\n"
% (name, kwargs['flavor'], image_id, network_id, sg_id,
compute_node, fixed_ip, files, userdata))
instance = os_utils.create_instance_and_wait_for_active(
kwargs['flavor'],
image_id,
network_id,
name,
config_drive=True,
userdata=userdata,
av_zone=compute_node,
fixed_ip=fixed_ip,
files=files)
if instance is None:
logger.error("Error while booting instance.")
sys.exit(-1)
else:
logger.debug("Instance '%s' booted successfully. IP='%s'." %
(name, instance.networks.itervalues().next()[0]))
# Retrieve IP of INSTANCE
# instance_ip = instance.networks.get(network_id)[0]
if secgroup_name:
logger.debug("Adding '%s' to security group '%s'..."
% (name, secgroup_name))
else:
logger.debug("Adding '%s' to security group '%s'..."
% (name, sg_id))
os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)
return instance
def generate_ping_userdata(ips_array, ping_count=10):
ips = ""
for ip in ips_array:
ips = ("%s %s" % (ips, ip))
ips = ips.replace(' ', ' ')
return ("#!/bin/sh\n"
"set%s\n"
"while true; do\n"
" for i do\n"
" ip=$i\n"
" ping -c %s $ip 2>&1 >/dev/null\n"
" RES=$?\n"
" if [ \"Z$RES\" = \"Z0\" ] ; then\n"
" echo ping $ip OK\n"
" else echo ping $ip KO\n"
" fi\n"
" done\n"
" sleep 1\n"
"done\n"
% (ips, ping_count))
def generate_userdata_common():
return ("#!/bin/sh\n"
"sudo mkdir -p /home/cirros/.ssh/\n"
"sudo chown cirros:cirros /home/cirros/.ssh/\n"
"sudo chown cirros:cirros /home/cirros/id_rsa\n"
"mv /home/cirros/id_rsa /home/cirros/.ssh/\n"
"sudo echo ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgnWtSS98Am516e"
"stBsq0jbyOB4eLMUYDdgzsUHsnxFQCtACwwAg9/2uq3FoGUBUWeHZNsT6jcK9"
"sCMEYiS479CUCzbrxcd8XaIlK38HECcDVglgBNwNzX/WDfMejXpKzZG61s98rU"
"ElNvZ0YDqhaqZGqxIV4ejalqLjYrQkoly3R+2k= "
"cirros@test1>/home/cirros/.ssh/authorized_keys\n"
"sudo chown cirros:cirros /home/cirros/.ssh/authorized_keys\n"
"chmod 700 /home/cirros/.ssh\n"
"chmod 644 /home/cirros/.ssh/authorized_keys\n"
"chmod 600 /home/cirros/.ssh/id_rsa\n"
)
def generate_userdata_with_ssh(ips_array):
u1 = generate_userdata_common()
ips = ""
for ip in ips_array:
ips = ("%s %s" % (ips, ip))
ips = ips.replace(' ', ' ')
u2 = ("#!/bin/sh\n"
"set%s\n"
"while true; do\n"
" for i do\n"
" ip=$i\n"
" hostname=$(ssh -y -i /home/cirros/.ssh/id_rsa "
"cirros@$ip 'hostname' </dev/zero 2>/dev/null)\n"
" RES=$?\n"
" if [ \"Z$RES\" = \"Z0\" ]; then echo $ip $hostname;\n"
" else echo $ip 'not reachable';fi;\n"
" done\n"
" sleep 1\n"
"done\n"
% ips)
return (u1 + u2)
def get_installerHandler():
installer_type = str(os.environ['INSTALLER_TYPE'].lower())
installer_ip = get_installer_ip()
if installer_type not in ["fuel", "apex"]:
logger.warn("installer type %s is neither fuel nor apex."
"returning None for installer handler" % installer_type)
return None
else:
if installer_type in ["apex"]:
developHandler = DeploymentFactory.get_handler(
installer_type,
installer_ip,
'root',
pkey_file="/root/.ssh/id_rsa")
if installer_type in ["fuel"]:
developHandler = DeploymentFactory.get_handler(
installer_type,
installer_ip,
'root',
'r00tme')
return developHandler
def get_nodes():
developHandler = get_installerHandler()
return developHandler.get_nodes()
def get_installer_ip():
return str(os.environ['INSTALLER_IP'])
def get_instance_ip(instance):
instance_ip = instance.networks.itervalues().next()[0]
return instance_ip
def wait_for_instance(instance):
logger.info("Waiting for instance %s to get a DHCP lease and "
"prompt for login..." % instance.id)
# The sleep this function replaced waited for 80s
tries = 40
sleep_time = 2
pattern = ".* login:"
expected_regex = re.compile(pattern)
console_log = ""
while tries > 0 and not expected_regex.search(console_log):
console_log = instance.get_console_output()
time.sleep(sleep_time)
tries -= 1
if not expected_regex.search(console_log):
logger.error("Instance %s seems not to boot up properly."
% instance.id)
return False
return True
def wait_for_instances_up(*args):
check = [wait_for_instance(instance) for instance in args]
return all(check)
def wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, net_id):
tries = 30
sleep_time = 1
nets = []
logger.debug("Waiting for network %s to associate with BGPVPN %s "
% (bgpvpn_id, net_id))
while tries > 0 and net_id not in nets:
nets = get_bgpvpn_networks(neutron_client, bgpvpn_id)
time.sleep(sleep_time)
tries -= 1
if net_id not in nets:
logger.error("Association of network %s with BGPVPN %s failed" %
(net_id, bgpvpn_id))
return False
return True
def wait_for_bgp_net_assocs(neutron_client, bgpvpn_id, *args):
check = [wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, id)
for id in args]
# Return True if all associations succeeded
return all(check)
def wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, router_id):
tries = 30
sleep_time = 1
routers = []
logger.debug("Waiting for router %s to associate with BGPVPN %s "
% (bgpvpn_id, router_id))
while tries > 0 and router_id not in routers:
routers = get_bgpvpn_routers(neutron_client, bgpvpn_id)
time.sleep(sleep_time)
tries -= 1
if router_id not in routers:
logger.error("Association of router %s with BGPVPN %s failed" %
(router_id, bgpvpn_id))
return False
return True
def wait_for_bgp_router_assocs(neutron_client, bgpvpn_id, *args):
check = [wait_for_bgp_router_assoc(neutron_client, bgpvpn_id, id)
for id in args]
# Return True if all associations succeeded
return all(check)
def wait_before_subtest(*args, **kwargs):
''' This is a placeholder.
TODO: Replace delay with polling logic. '''
time.sleep(30)
def assert_and_get_compute_nodes(nova_client, required_node_number=2):
"""Get the compute nodes in the deployment
Exit if the deployment doesn't have enough compute nodes"""
compute_nodes = os_utils.get_hypervisors(nova_client)
num_compute_nodes = len(compute_nodes)
if num_compute_nodes < 2:
logger.error("There are %s compute nodes in the deployment. "
"Minimum number of nodes to complete the test is 2."
% num_compute_nodes)
sys.exit(-1)
logger.debug("Compute nodes: %s" % compute_nodes)
return compute_nodes
def open_icmp(neutron_client, security_group_id):
if os_utils.check_security_group_rules(neutron_client,
security_group_id,
'ingress',
'icmp'):
if not os_utils.create_secgroup_rule(neutron_client,
security_group_id,
'ingress',
'icmp'):
logger.error("Failed to create icmp security group rule...")
else:
logger.info("This rule exists for security group: %s"
% security_group_id)
def open_http_port(neutron_client, security_group_id):
if os_utils.check_security_group_rules(neutron_client,
security_group_id,
'ingress',
'tcp',
80, 80):
if not os_utils.create_secgroup_rule(neutron_client,
security_group_id,
'ingress',
'tcp',
80, 80):
logger.error("Failed to create http security group rule...")
else:
logger.info("This rule exists for security group: %s"
% security_group_id)
def open_bgp_port(neutron_client, security_group_id):
if os_utils.check_security_group_rules(neutron_client,
security_group_id,
'ingress',
'tcp',
179, 179):
if not os_utils.create_secgroup_rule(neutron_client,
security_group_id,
'ingress',
'tcp',
179, 179):
logger.error("Failed to create bgp security group rule...")
else:
logger.info("This rule exists for security group: %s"
% security_group_id)
def exec_cmd(cmd, verbose):
success = True
logger.debug("Executing '%s'" % cmd)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output = ""
for line in iter(p.stdout.readline, b''):
output += line
if verbose:
logger.debug(output)
p.stdout.close()
returncode = p.wait()
if returncode != 0:
logger.error("Command %s failed to execute." % cmd)
success = False
return output, success
def check_odl_fib(ip, controller_ip):
"""Check that there is an entry in the ODL Fib for `ip`"""
url = "http://" + controller_ip + \
":8181/restconf/config/odl-fib:fibEntries/"
logger.debug("Querring '%s' for FIB entries", url)
res = requests.get(url, auth=(ODL_USER, ODL_PASS))
if res.status_code != 200:
logger.error("OpenDaylight response status code: %s", res.status_code)
return False
logger.debug("Checking whether '%s' is in the OpenDaylight FIB"
% controller_ip)
logger.debug("OpenDaylight FIB: \n%s" % res.text)
return ip in res.text
def run_odl_cmd(odl_node, cmd):
'''Run a command in the OpenDaylight Karaf shell
This is a bit flimsy because of shell quote escaping, make sure that
the cmd passed does not have any top level double quotes or this
function will break.
The /dev/null is used because client works, but outputs something
that contains "ERROR" and run_cmd doesn't like that.
'''
karaf_cmd = ('/opt/opendaylight/bin/client -h 127.0.0.1 "%s"'
' 2>/dev/null' % cmd)
return odl_node.run_cmd(karaf_cmd)
def wait_for_cloud_init(instance):
success = True
# ubuntu images take a long time to start
tries = 20
sleep_time = 30
logger.info("Waiting for cloud init of instance: {}"
"".format(instance.name))
while tries > 0:
instance_log = instance.get_console_output()
if "Failed to run module" in instance_log:
success = False
logger.error("Cloud init failed to run. Reason: %s",
instance_log)
break
if re.search(r"Cloud-init v. .+ | |
then everything that applies to Term B also applies to Term
A.
"""
__slots__ = ()
names = ["negatively_regulates"]
class Ontology(object):
"""This abstract class specifies the interface that all the
ontology classes should satisfy.
"""
def add_term(self, term):
"""Adds the given `term` to this ontology.
:Parameters:
- `term`: the term to be added; an instance of `GOTerm`.
"""
raise NotImplementedError
def ensure_term(self, term_or_id):
"""Given a `GOTerm` or a GO term ID, returns the term itself.
This method can be used in methods that expect a `GOTerm` to enable
them to be able to work with GO term IDs as well."""
if isinstance(term_or_id, GOTerm):
return term_or_id
return self.get_term_by_id(term_or_id)
def get_number_of_relationships(self):
"""Returns the number of relationships in this ontology."""
raise NotImplementedError
def get_number_of_terms(self):
"""Returns the number of terms in this ontology."""
raise NotImplementedError
def get_term_by_id(self, term_id):
"""Retrieves the given term from this ontology using its unique ID.
Terms in an ontology have a primary ID and may have several alternative
IDs. This method can be used to look up terms based on both their primary
or their alternative IDs.
Raises `NoSuchTermError` if the given term is not in this
ontology.
:Parameters:
- `term_id`: the primary or an alternative ID of a term we are
looking for.
:Returns:
the `GOTerm` corresponding to the given `term_id`.
"""
raise NotImplementedError
def has_term(self, term):
"""Checks whether the given term is in this ontology or not.
:Parameters:
- `term`: the term to look for; an instance of `GOTerm`.
"""
raise NotImplementedError
def remove_term(self, term):
"""Removes the given term from this ontology.
Raises `NoSuchTermError` if the given term is not in this
ontology.
:Parameters:
- `term`: the term to remove; an instance of `GOTerm`.
"""
raise NotImplementedError
def add_relationship(self, term1, term2, relationship):
"""Add a relationship between two terms to the ontology.
Ontologies are composed of triples in the following form:
`<SUBJECT> <PREDICATE> <OBJECT>`
e.g., ``"mitochondrion is_a organelle"``
We represent this as `term1 relationship term2`.
:Parameters:
- `subject_term`: the subject term; an instance of `GOTerm`.
- `object_term`: the object term; an instance of `GOTerm`.
- `relationship`: the predicate term (relationship type)
"""
raise NotImplementedError
def get_relationships(self, subject_term=None, object_term=None):
"""Returns all the relationships that were added to the two
given terms.
:Parameters:
- `subject_term`: the subject term; an instance of `GOTerm`.
``None`` means all terms.
- `object_term`: the object term; an instance of `GOTerm`.
``None`` means all terms.
:Returns:
a (possibly empty) list of relationships where `subject_term`
stands as the subject and `object_term` stands as the object.
If `subject_term` is ``None`` and `object_term` is not ``None``,
all relationships will be retrieved where `object_term` is the
object.
If `subject_term` is not ``None`` and `object_term` is ``None``,
all relationships will be retrieved where `subject_term` is
the subject.
If both `subject_term` and `object_term` are ``None``, all
relationships will be retrieved.
"""
raise NotImplementedError
def has_relationship(self, subject_term, object_term, \
relationship=GORelationship):
"""Checks whether there exists a relationship between the
two given terms.
:Parameters:
- `subject_term`: the subject term; an instance of `GOTerm`.
- `object_term`: the object term; an instance of `GOTerm`.
- `relationship`: the type of relationship we are interested in.
"""
return any(isinstance(rel, relationship) \
for rel in self.get_relationships(subject_term, object_term))
def remove_relationship(self, subject_term, object_term, relationship):
"""
Remove a relationship between two terms from the ontology.
See `add_relationship()` for an explanation of the relationship
structure.
:Parameters:
- `subject_term`: the subject term; an instance of `GOTerm`.
- `object_term`: the object term; an instance of `GOTerm`.
- `relationship`: the type of the relationship to be removed
from those two terms.
"""
raise NotImplementedError
def terms(self):
"""Returns an iterable of all the terms in the ontology."""
raise NotImplementedError
def __contains__(self, term):
"""Checks whether the given term is in this ontology or not.
This method enables us to use an ontology object in Python
expressions like ``if term in ontology:``.
:Parameters:
- `term`: the term to look for; an instance of `GOTerm`.
"""
return self.has_term(term)
class GeneOntologyNX(Ontology):
"""This class represents a gene ontology using NetworkX as the
underlying graph framework.
"""
def __init__(self, name=None, authority=None, identifier=None):
"""
:Parameters:
- `name`: name for the ontology
- `authority`: the name of the authority for this ontology
- `identifier`: an identifier for the ontology
"""
super(GeneOntologyNX, self).__init__()
self.name = name
self.authority = authority
self.identifier = identifier
# Store a reference to the NetworkX module here so we don't
# have to import it all the time. We cannot import NetworkX
# at the module level as the user might not have it.
try:
import networkx
# Check whether networkx is new enough to support dicts
# for nx.Graph.degree()
if isinstance(networkx.Graph().degree(), list):
raise ImportError
self._nx = networkx
except ImportError:
raise ImportError("networkx >= 1.4 is required to use %s" % \
(self.__class__.__name__, ))
# The NetworkX directed graph will serve as the backbone for
# operations.
self._internal_dag = self._nx.DiGraph()
# We'll use this so we can retrieve terms by their GO ID
# strings, too.
self._goid_dict = {}
def __repr__(self):
outstr = "<%s: %s>" % (self.__class__.__name__, self.name)
return outstr
# pylint: disable-msg=C0103
# C0103: invalid name
def _test_existence_in_internal_storage(self, term):
"""Check on the state of storage of a given term within all the
internal storage structures.
Returns a tuple of storage states, where `True` represents that
a term is stored by a storage structure, and `False` represents
it is not stored.
:Parameters:
- `term`: a `GOTerm` instance
"""
storage_states = (
term.id in self._internal_dag,
term.id in self._goid_dict
)
return storage_states
def has_term(self, term):
"""Checks whether the given term is in this ontology or not.
Raises `InternalStorageInconsistentError` in the event that
internal storage shows inconsistent states of storage for the
given term.
:Parameters:
- `term`: a `GOTerm` instance
"""
storage_states = self._test_existence_in_internal_storage(term)
# if all storage structures report existence, we're in a sane
# state; return True
if all(storage_states):
return True
# if all storage structures report no existence, we're in a sane
# state; return False
elif not any(storage_states):
return False
# if neither of those are true, something went horribly awry;
# raise an error
else:
raise InternalStorageInconsistentError("Term %s has"
" inconsistent states of storage." % term)
def add_term(self, term):
"""Add a term to the ontology.
:Parameters:
- `term`: a `GOTerm` instance
"""
if term.id in self._goid_dict:
raise ValueError("Term %s already exists in ontology." %
term.id)
if term.ontology is not None:
raise ValueError("Term %s is already added to another ontology." %
term.id)
# Add the term to this ontology
term.ontology = self
self._goid_dict[term.id] = term
self._internal_dag.add_node(term.id)
# Register all the alternative IDs of this term in the
# internal dict
for alt_id in term.aliases:
self._goid_dict[alt_id] = term
def get_number_of_terms(self):
"""Returns the number of terms in this ontology."""
return self._internal_dag.number_of_nodes()
def get_number_of_relationships(self):
"""Returns the number of relationships in this ontology."""
return self._internal_dag.number_of_edges()
def get_term_by_id(self, term_id):
"""Retrieve a term from the ontology by its GO ID.
This method also supports alternative IDs.
Raises `NoSuchTermError` if the given term is not in this
ontology.
:Parameters:
- `term_id`: a GO identifier (e.g., "GO:1234567")
"""
try:
return self._goid_dict[term_id]
except KeyError:
raise NoSuchTermError(term_id)
def remove_term(self, term):
"""Removes the given term from this ontology.
Raises `NoSuchTermError` if the given term is not in this
ontology.
:Parameters:
- `term`: the term to remove; an instance of `GOTerm`.
"""
try:
del self._goid_dict[term.id]
self._internal_dag.remove_node(term.id)
term.ontology = None
except KeyError:
raise NoSuchTermError(term.id)
def add_relationship(self, subject_term, object_term, relationship):
"""Add a relationship between two terms to the ontology.
Ontologies are composed of triples in the following form:
`<SUBJECT> <PREDICATE> <OBJECT>`
e.g., "mitochondrion is_a organelle"
We represent this as `term1 relationship term2`.
:Parameters:
- `subject_term`: the subject term; an instance of `GOTerm`.
- `object_term`: the object term; an instance of `GOTerm`.
- `relationship`: the predicate term (relationship type)
"""
# add the terms to the internal storage if they're not already
# there
for term in (subject_term, object_term):
if term not in self:
self.add_term(term)
self._internal_dag.add_edge(subject_term.id, object_term.id, \
relationship=relationship)
| |
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/MFD37/03-CTDBPD000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP03ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP04OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP01CNSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP03ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CP04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = | |
<gh_stars>1-10
import torch.nn as nn
import torch
import torch.functional as F
from graphs.models.attention_models.seq_base_models.mLSTM import LSTM
from graphs.models.attention_models.seq_base_models.mTransformerEncoder import Transformer_Encoder, myTransformerEncoderLayer
from graphs.models.attention_models.utils.positionalEncoders import *
import copy
from graphs.models.attention_models.stand_alone_att_vision import *
from graphs.models.attention_models.dynamic_cov import *
from graphs.models.custom_layers.attention import *
from graphs.models.custom_layers.MulilogueNet import *
from graphs.models.custom_layers.LSTHM import *
import einops
from graphs.models.attention_models.ViLBERT import MyViLBERT
class EEG_Encoder_E_3(nn.Module):
def __init__(self, dec):
super().__init__()
self.pad_1 = nn.ReflectionPad2d((5,5,1,1))
# self.pad_1 = nn.ZeroPad2d((5,5,1,1))
self.conv1 = nn.Conv2d(1, 10*dec, kernel_size=(2, 10), stride=(1, 1))
self.pad_2 = nn.ReflectionPad2d((2,2,0,0))
# self.pad_2 = nn.ZeroPad2d((2,2,0,0))
self.conv2 = nn.Conv2d(10*dec, 20*dec, kernel_size=(1, 5), stride=(1, 1))
self.conv3 = nn.Conv2d(20*dec, 20*dec, kernel_size=(4, 1), stride=(1, 1))
self.maxpool = nn.MaxPool2d(kernel_size=(2, 3))
self.maxpool_time = nn.MaxPool2d(kernel_size=(1, 3))
self.relu = torch.nn.ReLU()
self.conv1_bn = nn.BatchNorm2d(1)
def forward(self,x):
x1 = self.relu(self.conv1(self.pad_1(self.conv1_bn(x))))
x2 = self.maxpool(x1)
x3 = self.relu(self.conv2(self.pad_2(x2)))
x4 = self.relu(self.conv3(x3))
x5 = self.maxpool_time(x4)
return x5
class EEG_Encoder_Ch(nn.Module):
def __init__(self, dec):
super().__init__()
self.pad_1 = nn.ReflectionPad1d(2)
self.conv1 = nn.Conv1d(1, 20*dec, kernel_size=5, stride=1)
self.conv2 = nn.Conv1d(20*dec, 80*dec, kernel_size=1, stride=1)
self.conv3 = nn.Conv1d(80*dec, 160*dec, kernel_size=3, stride=1)
self.conv4 = nn.Conv1d(160*dec, 80*dec, kernel_size=1, stride=1)
self.conv5 = nn.Conv1d(80*dec, 40*dec, kernel_size=3, stride=1)
self.conv6 = nn.Conv1d(40*dec, 20*dec, kernel_size=1, stride=1)
self.pad_2 = nn.ReflectionPad1d(1)
# self.pad_2 = nn.ZeroPad2d((2,2,0,0))
# self.conv2 = nn.Conv1d(10*dec, 20*dec, kernel_size=5, stride=1)
self.maxpool_time = nn.MaxPool1d(4)
# self.maxpool_time = nn.AdaptiveAvgPool1d(225)
self.mypool = nn.Conv1d(10*dec, 10*dec, kernel_size=4, stride=4)
self.relu = torch.nn.ReLU()
self.conv1_bn = nn.BatchNorm1d(1)
self.conv2_bn = nn.BatchNorm1d(10*dec)
# self.alpha = nn.Parameter(torch.randn(10*dec, 4),requires_grad=True)
# self.softmax = nn.Softmax(dim=1)
def forward(self,x):
x = self.relu(self.conv1(self.pad_1(x)))
x = self.relu(self.conv2(x))
x = self.maxpool_time(x)
x = self.relu(self.conv3(self.pad_2(x)))
x = self.relu(self.conv4(x))
x = self.maxpool_time(x)
x = self.relu(self.conv5(self.pad_2(x)))
x = self.relu(self.conv6(x))
# x = self.maxpool_time(x)
return x
class EEG_Encoder_TFN(nn.Module):
def __init__(self, dec, d):
super().__init__()
self.conv_ch = nn.Sequential(
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(1, 64*dec, kernel_size=(1, 5), bias=False),
nn.ReLU(),
nn.MaxPool2d((1,2)),
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(64 * dec, 128 * dec, kernel_size=(1, 5), bias=False),
nn.ReLU(),
nn.MaxPool2d((1,2)),
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(128*dec, 2 * dec, kernel_size=(1, 5), bias=False),
nn.ReLU(),
nn.AvgPool2d((1,56))
)
self.conv_all = nn.Sequential(
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(1, 64*dec, kernel_size=(1, 5), bias=False),
nn.ReLU(),
nn.MaxPool2d((1,2)),
nn.ReflectionPad2d((2, 2, 1, 1)),
nn.Conv2d(64 * dec, 128 * dec, kernel_size=(2, 5), bias=False),
nn.ReLU(),
nn.MaxPool2d((1,2)),
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(128*dec, 14 * dec, kernel_size=(2, 5), bias=False),
nn.ReLU(),
nn.AvgPool2d((1,56))
)
def forward(self,x):
x_all = self.conv_all(x)
x_all = x_all.flatten(start_dim=1,end_dim=2).unsqueeze(dim=2)
m = []
for i in range(8):
a =x[:,:,i,:]
# print(a.shape)
m.append(self.conv_ch(a.unsqueeze(dim=2)))
m = torch.cat(m,dim=1)
x = torch.cat([x_all,m],dim=1)
return x
class EEG_Encoder_MultiToken(nn.Module):
def __init__(self, dec, transformer_type):
super().__init__()
# self.maxpool = nn.MaxPool2d(kernel_size=(1, 4))
# self.pos1 = PositionalEncoder(d_model=dmodel*8, same_time_step=7)
dmodel = 32 * dec
self.conv_eeg = nn.Sequential(
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(1, dmodel, kernel_size=(2, 5), stride=(1, 2), bias=False),
# nn.ReLU(),
)
self.conv_eog = nn.Sequential(
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(1, dmodel, kernel_size=(1, 5), stride=(1, 2), bias=False),
# nn.ReLU(),
)
self.interm_eeg_conv_0 = nn.Sequential(
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(dmodel, dmodel, kernel_size=(1, 5), stride=(1, 2), bias=False),
# nn.ReLU(),
)
self.interm_eog_conv_0 = nn.Sequential(
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(dmodel, dmodel, kernel_size=(1, 5), stride=(1, 2), bias=False),
# nn.ReLU(),
)
self.interm_eeg_conv_1 = nn.Sequential(
nn.ReflectionPad2d((1, 2, 0, 0)),
nn.Conv2d(dmodel, dmodel, kernel_size=(1, 5), stride=(1, 2), bias=False),
# nn.ReLU(),
)
self.interm_eog_conv_1 = nn.Sequential(
nn.ReflectionPad2d((1, 2, 0, 0)),
nn.Conv2d(dmodel, dmodel, kernel_size=(1, 5), stride=(1, 2), bias=False),
# nn.ReLU(),
)
# self.latent = nn.Parameter(torch.randn(16, 8, dmodel))
self.cls_token = nn.Parameter(torch.randn(1, dmodel,1, 1))
# self.cls_token = torch.cat([self.cls_token]*22, dim=0)
self.pos = PositionalEncoder(d_model=dmodel, same_time_step=8)
transformer_type = globals()[transformer_type]
self.att1_eeg = transformer_type(dmodel, 1)
self.att1_eog = transformer_type(dmodel, 1)
# self.att1_1 = My_Transformer_Layer(dmodel*450)
self.att2_eeg = transformer_type(dmodel, 1)
self.att2_eog = transformer_type(dmodel, 1)
# self.att2_1 = My_Transformer_Layer(dmodel*2*225)
self.att3_eeg = transformer_type(dmodel, 1)
self.att3_eog = transformer_type(dmodel, 1)
# self.att3_1 = My_Transformer_Layer(16 * dec*112)
self.avg_1 = nn.AvgPool2d(kernel_size=(1, 1500))
self.avg_2 = nn.AvgPool2d(kernel_size=(1, 750))
self.avg_3 = nn.AvgPool2d(kernel_size=(1, 375))
# self.avg = nn.AvgPool2d(kernel_size=(1, 23))
def forward(self,x):
x_shape = x[0].shape
if len(x_shape)>4:
for i in range(len(x)):
x[i] = x[i].flatten(start_dim=0, end_dim=1)
xeeg = self.conv_eeg(x[0])
xeog = self.conv_eog(x[1])
xeeg_inshape = xeeg.shape
xeog_inshape = xeog.shape
xeeg = self.pos(xeeg.flatten(start_dim=2).permute(0,2,1)).permute(0,2,1).view(xeeg_inshape)
xeog = self.pos(xeog.flatten(start_dim=2).permute(0,2,1)).permute(0,2,1).view(xeog_inshape)
# print(x.shape)
xeeg = self.att1_eeg(xeeg)
xeog = self.att1_eog(xeog)
eeg_token = self.avg_1(xeeg)
eog_token = self.avg_1(xeog)
xeeg = self.interm_eeg_conv_0(xeeg)
xeog = self.interm_eog_conv_0(xeog)
xeeg = torch.cat([xeeg, eog_token], dim=3)
xeog = torch.cat([xeog, eeg_token], dim=3)
xeeg = self.att2_eeg(xeeg)
xeog = self.att2_eog(xeog)
eeg_token = self.avg_2(xeeg[:,:,:,:-1])
eog_token = self.avg_2(xeog[:,:,:,:-1])
eeg_p_token = xeeg[:,:,:,-1].unsqueeze(dim=3)
eog_p_token = xeog[:,:,:,-1].unsqueeze(dim=3)
xeeg = self.interm_eeg_conv_1(xeeg)
xeog = self.interm_eog_conv_1(xeog)
xeeg = torch.cat([xeeg, eog_p_token, eog_token, self.cls_token.repeat(xeeg_inshape[0],1,1,1)], dim=3)
xeog = torch.cat([xeog, eeg_p_token, eeg_token, self.cls_token.repeat(xeeg_inshape[0],1,1,1)], dim=3)
xeeg = self.att3_eeg(xeeg)
xeog = self.att3_eog(xeog)
xm_eeg = self.avg_3(xeeg[:,:,:,:-3])
xm_eog = self.avg_3(xeog[:,:,:,:-3])
multi1_eeg = xeeg[:,:,:,-3].unsqueeze(dim=3)
multi2_eeg = xeeg[:,:,:,-2].unsqueeze(dim=3)
cls_eeg = xeeg[:,:,:,-1].unsqueeze(dim=3)
multi1_eog = xeog[:,:,:,-3].unsqueeze(dim=3)
multi2_eog = xeog[:,:,:,-2].unsqueeze(dim=3)
cls_eog = xeog[:,:,:,-1].unsqueeze(dim=3)
out = torch.cat([xm_eeg, multi1_eeg, multi2_eeg, cls_eeg, xm_eog, multi1_eog, multi2_eog, cls_eog],dim=1)
return out
class EEG_Encoder_Single(nn.Module):
def __init__(self, dec, transformer_type):
super().__init__()
# self.maxpool = nn.MaxPool2d(kernel_size=(1, 4))
# self.pos1 = PositionalEncoder(d_model=dmodel*8, same_time_step=7)
dmodel = 32 * dec
self.conv = nn.Sequential(
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(1, dmodel, kernel_size=(2, 5), stride=(1, 2), bias=False),
# nn.ReLU(),
)
self.interm_conv_0 = nn.Sequential(
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(dmodel, dmodel, kernel_size=(1, 5), stride=(1, 2), bias=False),
# nn.ReLU(),
)
self.interm_conv_1 = nn.Sequential(
nn.ReflectionPad2d((1, 2, 0, 0)),
nn.Conv2d(dmodel, dmodel, kernel_size=(1, 5), stride=(1, 2), bias=False),
# nn.ReLU(),
)
# self.latent = nn.Parameter(torch.randn(16, 8, dmodel))
# self.cls_token = nn.Parameter(torch.randn(1, dmodel,1, 1))
# self.cls_token = torch.cat([self.cls_token]*22, dim=0)
self.pos = PositionalEncoder(d_model=dmodel, same_time_step=8)
transformer_type = globals()[transformer_type]
self.att1 = transformer_type(dmodel, 1)
self.att2 = transformer_type(dmodel, 1)
self.att3 = transformer_type(dmodel, 1)
self.att4 = transformer_type(64*5, 1)
self.att5 = transformer_type(64*5, 1)
self.att6 = transformer_type(64*5, 1)
self.avg = nn.AvgPool2d(kernel_size=(1, 75))
def forward(self,x):
x_shape = x[0].shape
if len(x_shape)>4:
for i in range(len(x)):
x[i] = x[i].flatten(start_dim=0, end_dim=1)
x = self.conv(x[0])
x_inshape = x.shape
x = self.pos(x.flatten(start_dim=2).permute(0,2,1)).permute(0,2,1).view(x_inshape)
# print(x.shape)
x = self.att1(x)
x = self.interm_conv_0(x)
x = self.att2(x)
x = self.interm_conv_1(x)
x = self.att3(x)
x = self.avg(x)
x = x.view(x_shape[0],x.shape[1]*x.shape[2]*x.shape[3], 1, x_shape[1])
x = self.att4(x)
x = self.att5(x)
x = self.att6(x).permute(0,3,2,1)
x = x.flatten(start_dim=0, end_dim = 1)
return x
class EEG_Encoder_Ch_all(nn.Module):
def __init__(self, dec):
super().__init__()
self.pad_1 = nn.ReflectionPad1d(2)
self.conv1 = nn.Conv1d(1, 64*dec, kernel_size=5, stride=1)
self.conv2 = nn.Conv1d(64*dec, 128*dec, kernel_size=1, stride=1)
self.conv3 = nn.Conv1d(128*dec, 128*dec, kernel_size=3, stride=1)
self.conv4 = nn.Conv1d(128*dec, 64*dec, kernel_size=1, stride=1)
self.conv5 = nn.Conv1d(64*dec, 32*dec, kernel_size=3, stride=1)
self.conv6 = nn.Conv1d(32*dec, 16*dec, kernel_size=1, stride=1)
self.pad_2 = nn.ReflectionPad1d(1)
# self.pad_2 = nn.ZeroPad2d((2,2,0,0))
# self.conv2 = nn.Conv1d(10*dec, 20*dec, kernel_size=5, stride=1)
self.maxpool_time = nn.MaxPool1d(4)
self.avg_pool = nn.AvgPool1d(14)
self.relu = torch.nn.ReLU()
self.conv1_bn = nn.BatchNorm1d(1)
self.conv2_bn = nn.BatchNorm1d(10*dec)
# self.alpha = nn.Parameter(torch.randn(10*dec, 4),requires_grad=True)
# self.softmax = nn.Softmax(dim=1)
def forward(self,x):
x = self.relu(self.conv1(self.pad_1(x)))
x = self.relu(self.conv2(x))
x = self.maxpool_time(x)
x= self.relu(self.conv3(self.pad_2(x)))
x = self.relu(self.conv4(x))
x = self.maxpool_time(x)
x = self.relu(self.conv5(self.pad_2(x)))
x = self.relu(self.conv6(x))
x = self.avg_pool(x)
# x = self.maxpool_time(x)
return x
class EEG_Shuffle_channels(nn.Module):
def __init__(self, dec):
super().__init__()
# self.maxpool = nn.MaxPool2d(kernel_size=(1, 4))
# self.pos1 = PositionalEncoder(d_model=dmodel*8, same_time_step=7)
dmodel = 64 * dec
self.conv = nn.Sequential(
nn.ReflectionPad2d((2, 2, 0, 0)),
nn.Conv2d(1, 64 * dec, kernel_size=(1, 5), stride=(1,2), bias=False),
# nn.ReflectionPad2d((2, 2, 1, 0)),
# nn.Conv2d(64 * dec, 128 * dec, kernel_size=(1, 5), stride=(1, 2)),
# nn.ReLU(),
# # nn.MaxPool2d(kernel_size=(1, 2)),
# nn.ReflectionPad2d((2, 2, 0, 0)),
# nn.Conv2d(128 * dec, 16 * dec, kernel_size=(1, 5)),
# nn.ReLU(),
)
# self.interm_conv_0 = nn.Sequential(
# nn.ReflectionPad2d((2, 2, 1, 1)),
# nn.Conv2d(64 * dec, 128 * dec, kernel_size=(2, 5), stride=(1,2)),
# # nn.ReLU(),
# )
# self.interm_conv_1 = nn.Sequential(
# nn.ReflectionPad2d((2, 2, 0, 0)),
# nn.Conv2d(128 * dec, 16 * dec, kernel_size=(2, 5)),
# # nn.ReLU(),
# )
# self.latent = nn.Parameter(torch.randn(16, 8, dmodel))
# self.cls_token = nn.Parameter(torch.randn(1, 8, dmodel))
# self.pos = PositionalEncoder(d_model=dmodel*8)
self.att1 = My_Transformer_Layer_Ch_SmallFF(dmodel)
# self.att1_1 = My_Transformer_Layer(dmodel*450)
self.att2 = My_Transformer_Layer_Ch_SmallFF(dmodel*2)
# self.att2_1 = My_Transformer_Layer(dmodel*2*225)
self.att3 = My_Transformer_Layer_Ch_SmallFF(16 * dec)
# self.att3_1 = My_Transformer_Layer(16 * dec*112)
self.avg = nn.AvgPool2d(kernel_size=(1, 56))
import random
self.rands = random.sample(range(8), 8)
# self.rands = [7,0,1,2,3,4,5,6,7,1]
print("Our random shuffle is:")
print(self.rands)
def _shuffle_channels(self,x):
return x[:,:,self.rands,:]
def cross_attention(self,src):
print(self.latent_space.shape)
print(src.shape)
src2 = self.self_attn(self.latent_space, self.latent_space, src)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def s_att(self,src):
src2 = self.s_self_attn(src, src, src)[0]
src = src + self.dropout1(src2)
src = self.s_norm1(src)
src2 = self.s_linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.s_norm2(src)
return src
def forward(self, x):
x = self.conv(x)
x_shape = x.shape
# x_shape = x.shape
# x = x.permute(0,3,2,1)
# mm= []
# for i in range(8):
# m = []
# for j | |
-> int"""
return _libsedml.SedCurve_setXDataReference(self, *args)
def unsetXDataReference(self):
"""unsetXDataReference(SedCurve self) -> int"""
return _libsedml.SedCurve_unsetXDataReference(self)
def getYDataReference(self):
"""getYDataReference(SedCurve self) -> string"""
return _libsedml.SedCurve_getYDataReference(self)
def isSetYDataReference(self):
"""isSetYDataReference(SedCurve self) -> bool"""
return _libsedml.SedCurve_isSetYDataReference(self)
def setYDataReference(self, *args):
"""setYDataReference(SedCurve self, string yDataReference) -> int"""
return _libsedml.SedCurve_setYDataReference(self, *args)
def unsetYDataReference(self):
"""unsetYDataReference(SedCurve self) -> int"""
return _libsedml.SedCurve_unsetYDataReference(self)
def getElementName(self):
"""getElementName(SedCurve self) -> string"""
return _libsedml.SedCurve_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedCurve self) -> int"""
return _libsedml.SedCurve_getTypeCode(self)
def hasRequiredAttributes(self):
"""hasRequiredAttributes(SedCurve self) -> bool"""
return _libsedml.SedCurve_hasRequiredAttributes(self)
def setSedDocument(self, *args):
"""setSedDocument(SedCurve self, SedDocument d)"""
return _libsedml.SedCurve_setSedDocument(self, *args)
SedCurve_swigregister = _libsedml.SedCurve_swigregister
SedCurve_swigregister(SedCurve)
class SedListOfCurves(SedListOf):
"""Proxy of C++ SedListOfCurves class"""
__swig_setmethods__ = {}
for _s in [SedListOf]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedListOfCurves, name, value)
__swig_getmethods__ = {}
for _s in [SedListOf]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedListOfCurves, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedListOfCurves self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedListOfCurves
__init__(SedListOfCurves self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedListOfCurves
__init__(SedListOfCurves self) -> SedListOfCurves
__init__(SedListOfCurves self, SedNamespaces sedns) -> SedListOfCurves
"""
this = _libsedml.new_SedListOfCurves(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedListOfCurves self) -> SedListOfCurves"""
return _libsedml.SedListOfCurves_clone(self)
def get(self, *args):
"""
get(SedListOfCurves self, unsigned int n) -> SedCurve
get(SedListOfCurves self, unsigned int n) -> SedCurve
get(SedListOfCurves self, string sid) -> SedCurve
get(SedListOfCurves self, string sid) -> SedCurve
"""
return _libsedml.SedListOfCurves_get(self, *args)
def addCurve(self, *args):
"""addCurve(SedListOfCurves self, SedCurve c) -> int"""
return _libsedml.SedListOfCurves_addCurve(self, *args)
def getNumCurves(self):
"""getNumCurves(SedListOfCurves self) -> unsigned int"""
return _libsedml.SedListOfCurves_getNumCurves(self)
def createCurve(self):
"""createCurve(SedListOfCurves self) -> SedCurve"""
return _libsedml.SedListOfCurves_createCurve(self)
def remove(self, *args):
"""
remove(SedListOfCurves self, unsigned int n) -> SedCurve
remove(SedListOfCurves self, string sid) -> SedCurve
"""
return _libsedml.SedListOfCurves_remove(self, *args)
def getElementName(self):
"""getElementName(SedListOfCurves self) -> string"""
return _libsedml.SedListOfCurves_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedListOfCurves self) -> int"""
return _libsedml.SedListOfCurves_getTypeCode(self)
def getItemTypeCode(self):
"""getItemTypeCode(SedListOfCurves self) -> int"""
return _libsedml.SedListOfCurves_getItemTypeCode(self)
__swig_destroy__ = _libsedml.delete_SedListOfCurves
__del__ = lambda self : None;
SedListOfCurves_swigregister = _libsedml.SedListOfCurves_swigregister
SedListOfCurves_swigregister(SedListOfCurves)
class SedSurface(SedCurve):
"""Proxy of C++ SedSurface class"""
__swig_setmethods__ = {}
for _s in [SedCurve]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedSurface, name, value)
__swig_getmethods__ = {}
for _s in [SedCurve]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedSurface, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedSurface self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedSurface
__init__(SedSurface self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedSurface
__init__(SedSurface self) -> SedSurface
__init__(SedSurface self, SedNamespaces sedns) -> SedSurface
__init__(SedSurface self, SedSurface orig) -> SedSurface
"""
this = _libsedml.new_SedSurface(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedSurface self) -> SedSurface"""
return _libsedml.SedSurface_clone(self)
__swig_destroy__ = _libsedml.delete_SedSurface
__del__ = lambda self : None;
def getLogZ(self):
"""getLogZ(SedSurface self) -> bool const"""
return _libsedml.SedSurface_getLogZ(self)
def isSetLogZ(self):
"""isSetLogZ(SedSurface self) -> bool"""
return _libsedml.SedSurface_isSetLogZ(self)
def setLogZ(self, *args):
"""setLogZ(SedSurface self, bool logZ) -> int"""
return _libsedml.SedSurface_setLogZ(self, *args)
def unsetLogZ(self):
"""unsetLogZ(SedSurface self) -> int"""
return _libsedml.SedSurface_unsetLogZ(self)
def getZDataReference(self):
"""getZDataReference(SedSurface self) -> string"""
return _libsedml.SedSurface_getZDataReference(self)
def isSetZDataReference(self):
"""isSetZDataReference(SedSurface self) -> bool"""
return _libsedml.SedSurface_isSetZDataReference(self)
def setZDataReference(self, *args):
"""setZDataReference(SedSurface self, string zDataReference) -> int"""
return _libsedml.SedSurface_setZDataReference(self, *args)
def unsetZDataReference(self):
"""unsetZDataReference(SedSurface self) -> int"""
return _libsedml.SedSurface_unsetZDataReference(self)
def getElementName(self):
"""getElementName(SedSurface self) -> string"""
return _libsedml.SedSurface_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedSurface self) -> int"""
return _libsedml.SedSurface_getTypeCode(self)
def hasRequiredAttributes(self):
"""hasRequiredAttributes(SedSurface self) -> bool"""
return _libsedml.SedSurface_hasRequiredAttributes(self)
def setSedDocument(self, *args):
"""setSedDocument(SedSurface self, SedDocument d)"""
return _libsedml.SedSurface_setSedDocument(self, *args)
SedSurface_swigregister = _libsedml.SedSurface_swigregister
SedSurface_swigregister(SedSurface)
class SedListOfSurfaces(SedListOf):
"""Proxy of C++ SedListOfSurfaces class"""
__swig_setmethods__ = {}
for _s in [SedListOf]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedListOfSurfaces, name, value)
__swig_getmethods__ = {}
for _s in [SedListOf]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedListOfSurfaces, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedListOfSurfaces self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedListOfSurfaces
__init__(SedListOfSurfaces self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedListOfSurfaces
__init__(SedListOfSurfaces self) -> SedListOfSurfaces
__init__(SedListOfSurfaces self, SedNamespaces sedns) -> SedListOfSurfaces
"""
this = _libsedml.new_SedListOfSurfaces(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedListOfSurfaces self) -> SedListOfSurfaces"""
return _libsedml.SedListOfSurfaces_clone(self)
def get(self, *args):
"""
get(SedListOfSurfaces self, unsigned int n) -> SedSurface
get(SedListOfSurfaces self, unsigned int n) -> SedSurface
get(SedListOfSurfaces self, string sid) -> SedSurface
get(SedListOfSurfaces self, string sid) -> SedSurface
"""
return _libsedml.SedListOfSurfaces_get(self, *args)
def addSurface(self, *args):
"""addSurface(SedListOfSurfaces self, SedSurface s) -> int"""
return _libsedml.SedListOfSurfaces_addSurface(self, *args)
def getNumSurfaces(self):
"""getNumSurfaces(SedListOfSurfaces self) -> unsigned int"""
return _libsedml.SedListOfSurfaces_getNumSurfaces(self)
def createSurface(self):
"""createSurface(SedListOfSurfaces self) -> SedSurface"""
return _libsedml.SedListOfSurfaces_createSurface(self)
def remove(self, *args):
"""
remove(SedListOfSurfaces self, unsigned int n) -> SedSurface
remove(SedListOfSurfaces self, string sid) -> SedSurface
"""
return _libsedml.SedListOfSurfaces_remove(self, *args)
def getElementName(self):
"""getElementName(SedListOfSurfaces self) -> string"""
return _libsedml.SedListOfSurfaces_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedListOfSurfaces self) -> int"""
return _libsedml.SedListOfSurfaces_getTypeCode(self)
def getItemTypeCode(self):
"""getItemTypeCode(SedListOfSurfaces self) -> int"""
return _libsedml.SedListOfSurfaces_getItemTypeCode(self)
__swig_destroy__ = _libsedml.delete_SedListOfSurfaces
__del__ = lambda self : None;
SedListOfSurfaces_swigregister = _libsedml.SedListOfSurfaces_swigregister
SedListOfSurfaces_swigregister(SedListOfSurfaces)
class SedOutput(SedBase):
"""Proxy of C++ SedOutput class"""
__swig_setmethods__ = {}
for _s in [SedBase]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedOutput, name, value)
__swig_getmethods__ = {}
for _s in [SedBase]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedOutput, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedOutput self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedOutput
__init__(SedOutput self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedOutput
__init__(SedOutput self) -> SedOutput
__init__(SedOutput self, SedNamespaces sedns) -> SedOutput
__init__(SedOutput self, SedOutput orig) -> SedOutput
"""
this = _libsedml.new_SedOutput(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedOutput self) -> SedOutput"""
return _libsedml.SedOutput_clone(self)
__swig_destroy__ = _libsedml.delete_SedOutput
__del__ = lambda self : None;
def getId(self):
"""getId(SedOutput self) -> string"""
return _libsedml.SedOutput_getId(self)
def isSetId(self):
"""isSetId(SedOutput self) -> bool"""
return _libsedml.SedOutput_isSetId(self)
def setId(self, *args):
"""setId(SedOutput self, string id) -> int"""
return _libsedml.SedOutput_setId(self, *args)
def unsetId(self):
"""unsetId(SedOutput self) -> int"""
return _libsedml.SedOutput_unsetId(self)
def getName(self):
"""getName(SedOutput self) -> string"""
return _libsedml.SedOutput_getName(self)
def isSetName(self):
"""isSetName(SedOutput self) -> bool"""
return _libsedml.SedOutput_isSetName(self)
def setName(self, *args):
"""setName(SedOutput self, string name) -> int"""
return _libsedml.SedOutput_setName(self, *args)
def unsetName(self):
"""unsetName(SedOutput self) -> int"""
return _libsedml.SedOutput_unsetName(self)
def getElementName(self):
"""getElementName(SedOutput self) -> string"""
return _libsedml.SedOutput_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedOutput self) -> int"""
return _libsedml.SedOutput_getTypeCode(self)
def hasRequiredAttributes(self):
"""hasRequiredAttributes(SedOutput self) -> bool"""
return _libsedml.SedOutput_hasRequiredAttributes(self)
def hasRequiredElements(self):
"""hasRequiredElements(SedOutput self) -> bool"""
return _libsedml.SedOutput_hasRequiredElements(self)
def setSedDocument(self, *args):
"""setSedDocument(SedOutput self, SedDocument d)"""
return _libsedml.SedOutput_setSedDocument(self, *args)
def connectToChild(self):
"""connectToChild(SedOutput self)"""
return _libsedml.SedOutput_connectToChild(self)
SedOutput_swigregister = _libsedml.SedOutput_swigregister
SedOutput_swigregister(SedOutput)
class SedListOfOutputs(SedListOf):
"""Proxy of C++ SedListOfOutputs class"""
__swig_setmethods__ = {}
for _s in [SedListOf]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedListOfOutputs, name, value)
__swig_getmethods__ = {}
for _s in [SedListOf]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedListOfOutputs, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedListOfOutputs self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedListOfOutputs
__init__(SedListOfOutputs self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedListOfOutputs
__init__(SedListOfOutputs self) -> SedListOfOutputs
__init__(SedListOfOutputs self, SedNamespaces sedns) -> SedListOfOutputs
"""
this = _libsedml.new_SedListOfOutputs(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedListOfOutputs self) -> SedListOfOutputs"""
return _libsedml.SedListOfOutputs_clone(self)
def get(self, *args):
"""
get(SedListOfOutputs self, unsigned int n) -> SedOutput
get(SedListOfOutputs self, unsigned int n) -> SedOutput
get(SedListOfOutputs self, string sid) -> SedOutput
get(SedListOfOutputs self, string sid) -> SedOutput
"""
return _libsedml.SedListOfOutputs_get(self, *args)
def addOutput(self, *args):
"""addOutput(SedListOfOutputs self, SedOutput o) -> int"""
return _libsedml.SedListOfOutputs_addOutput(self, *args)
def getNumOutputs(self):
"""getNumOutputs(SedListOfOutputs self) -> unsigned int"""
return _libsedml.SedListOfOutputs_getNumOutputs(self)
def createReport(self):
"""createReport(SedListOfOutputs self) -> SedReport"""
return _libsedml.SedListOfOutputs_createReport(self)
def createPlot2D(self):
"""createPlot2D(SedListOfOutputs self) -> SedPlot2D"""
return _libsedml.SedListOfOutputs_createPlot2D(self)
def createPlot3D(self):
"""createPlot3D(SedListOfOutputs self) -> SedPlot3D"""
return _libsedml.SedListOfOutputs_createPlot3D(self)
def remove(self, *args):
"""
remove(SedListOfOutputs self, unsigned int n) -> SedOutput
remove(SedListOfOutputs self, string sid) -> SedOutput
"""
return _libsedml.SedListOfOutputs_remove(self, *args)
def getElementName(self):
"""getElementName(SedListOfOutputs self) -> string"""
return _libsedml.SedListOfOutputs_getElementName(self)
def getTypeCode(self):
"""getTypeCode(SedListOfOutputs self) -> int"""
return _libsedml.SedListOfOutputs_getTypeCode(self)
def getItemTypeCode(self):
"""getItemTypeCode(SedListOfOutputs self) -> int"""
return _libsedml.SedListOfOutputs_getItemTypeCode(self)
__swig_destroy__ = _libsedml.delete_SedListOfOutputs
__del__ = lambda self : None;
SedListOfOutputs_swigregister = _libsedml.SedListOfOutputs_swigregister
SedListOfOutputs_swigregister(SedListOfOutputs)
class SedReport(SedOutput):
"""Proxy of C++ SedReport class"""
__swig_setmethods__ = {}
for _s in [SedOutput]: __swig_setmethods__.update(getattr(_s,'__swig_setmethods__',{}))
__setattr__ = lambda self, name, value: _swig_setattr(self, SedReport, name, value)
__swig_getmethods__ = {}
for _s in [SedOutput]: __swig_getmethods__.update(getattr(_s,'__swig_getmethods__',{}))
__getattr__ = lambda self, name: _swig_getattr(self, SedReport, name)
__repr__ = _swig_repr
def __init__(self, *args):
"""
__init__(SedReport self, unsigned int level=SEDML_DEFAULT_LEVEL, unsigned int version=SEDML_DEFAULT_VERSION) -> SedReport
__init__(SedReport self, unsigned int level=SEDML_DEFAULT_LEVEL) -> SedReport
__init__(SedReport self) -> SedReport
__init__(SedReport self, SedNamespaces sedns) -> SedReport
__init__(SedReport self, SedReport orig) -> SedReport
"""
this = _libsedml.new_SedReport(*args)
try: self.this.append(this)
except: self.this = this
def clone(self):
"""clone(SedReport self) -> | |
this raises, don't use 'vFvvvvvv' as a signature...
self.redirected = FunctionType(string, filespec)
assert(not self.redirected.redirect and not self.redirected.usestruct)
def getchar(self, c: str) -> int:
return self._bare.getchar(c)
def getcharidx(self, i: int) -> int:
return self._bare.getcharidx(i)
def splitchar(self) -> List[int]:
return self._bare.splitchar()
def __hash__(self) -> int:
return str.__hash__(self.orig)
def __eq__(self, o: object):
return isinstance(o, FunctionType) and ((self.orig == o.orig) and (o is self or not isinstance(self, StructFunctionType)))
class StructFunctionType(FunctionType):
def __init__(self, string: str, filespec: FileSpec) -> None:
super().__init__(string, filespec)
assert(self.usestruct)
self.filespec = filespec
self.filespec.structsuses.append(self)
self.returnsstruct = string[0] in self.filespec.structs
if self.returnsstruct:
if self.hasemu:
string = "pFEp" + string[3:]
else:
string = "pFp" + string[2:]
for struct in self.filespec.structs:
string = string.replace(struct, self.filespec.structs[struct].repl)
self.redirected = FunctionType(string, self.filespec)
class _BareFunctionType(FunctionType): # Fake derived
def __new__(cls, *largs, **kwargs):
return object.__new__(cls)
def __init__(self, string: str, filespec: FileSpec, isstruct: bool) -> None:
self.orig = string
self.filespec = filespec
self.isstruct = isstruct
def getchar(self, c: str) -> int:
if c in FileSpec.rvalues:
return FileSpec.rvalues.index(c)
else:
assert(self.isstruct)
return self.filespec.structs.__keys__.index(c) + len(FileSpec.rvalues)
def getcharidx(self, i: int) -> int:
return self.getchar(self.orig[i])
def splitchar(self) -> List[int]:
try:
ret = [
len(self.orig), self.getcharidx(0),
*map(self.getcharidx, range(2, len(self.orig)))
]
return ret
except ValueError as e:
raise ValueError("Value is " + self.orig) from e
except AssertionError as e:
raise ValueError("Value is " + self.orig) from e
# Allowed GOs: GO,GOM,GO2,GOS,GOW,GOWM,GOW2,GO2S
class Function:
def __init__(self, name: str, funtype: FunctionType, gotype: str, filespec: FileSpec, filename: Filename, line: str) -> None:
self._noE = False
self.no_dlsym: bool = False
if "//%" in line:
additional_meta = line.split("//%")[1].split(" ")[0].strip()
if additional_meta.endswith(",noE"):
self._noE = True
additional_meta = additional_meta[:-4]
if additional_meta == 'noE':
assert not self._noE, "Duplicated 'noE'"
self._noE = True
elif additional_meta == '%':
self.no_dlsym = True
else:
raise NotImplementedError("Changing the function type 'on the fly' is not supported")
funtypeerr = ValueError("Invalid function type " + gotype)
if not gotype.startswith("GO"):
raise funtypeerr
gotype = gotype[2:]
self.isweak = (len(gotype) > 0) and (gotype[0] == "W")
if self.isweak:
gotype = gotype[1:]
self.ismy = (len(gotype) > 0) and (gotype[0] == "M")
self.is2 = (len(gotype) > 0) and (gotype[0] == "2")
self.retS = (len(gotype) > 0) and (gotype[0] == "S")
if self.ismy or self.is2 or self.retS:
gotype = gotype[1:]
if self.retS:
self.ismy = True
assert((self.no_dlsym and (funtype.orig.startswith("pFp") or funtype.orig.startswith("pFEp")))
or (isinstance(funtype, StructFunctionType) and funtype.returnsstruct))
self._noE = self._noE or self.no_dlsym
if isinstance(funtype, StructFunctionType) and funtype.returnsstruct and not self.retS:
gotype = "GO" + \
("W" if self.isweak else "") + \
("M" if self.ismy else "") + ("2" if self.is2 else "")
raise ValueError("Function type " + funtype.orig + " needs to return a structure, but doesn't (currently " + gotype + ")")
if gotype != "":
raise funtypeerr
self.name = name
self.type = funtype
self.filespec = filespec
assert(not isinstance(funtype, StructFunctionType) or filespec is funtype.filespec) # No reason why not, so assert()
if self.is2:
self.fun2 = line.split(',')[2].split(')')[0].strip()
if self.type.hasemu != self.fun2.startswith("my_") and not self._noE:
# If this raises because of a different prefix, open a pull request
print("\033[91mThis is probably not what you meant!\033[m ({0}:{1})".format(filename, line[:-1]), file=sys.stderr)
self.invalid = True
if (self.ismy and not self.type.hasemu and not self.is2) and not self._noE:
# Probably invalid on box86; if not so, remove/comment this whole 'if' (and also open an issue)
print("\033[94mAre you sure of this?\033[m ({0}:{1})".format(filename, line[:-1]), file=sys.stderr)
self.invalid = True
return
if self.type.hasemu and not self.ismy and not self.is2:
# Certified invalid
print("\033[91mThis is probably not what you meant!\033[m ({0}:{1})".format(filename, line[:-1]), file=sys.stderr)
self.invalid = True
return
if self._noE and not self.ismy and not self.is2:
raise ValueError("Invalid meta: 'no E' provided but function is not a GOM")
if self.ismy or self.is2:
# Add this to the typedefs
self.filespec.typedefs[self.type.withoutE].append(self)
DefineType = NewType('DefineType', str)
@final
class Define:
name: DefineType
inverted_: bool
defines: List[DefineType] = []
def __init__(self, name: DefineType, inverted_: bool) -> None:
# All values for "name" are in defines (throw otherwise)
if name not in Define.defines:
raise KeyError(name)
self.name = name
self.inverted_ = inverted_
def copy(self) -> "Define":
return Define(self.name, self.inverted_)
def value(self) -> int:
return Define.defines.index(self.name)*2 + (1 if self.inverted_ else 0)
def invert(self) -> "Define":
"""
invert -- Transform a `defined()` into a `!defined()` and vice-versa, in place.
"""
self.inverted_ = not self.inverted_
return self
def inverted(self) -> "Define":
"""
inverted -- Transform a `defined()` into a `!defined()` and vice-versa, out-of-place.
"""
return Define(self.name, not self.inverted_)
def __str__(self) -> str:
if self.inverted_:
return "!defined(" + self.name + ")"
else:
return "defined(" + self.name + ")"
def __eq__(self, o) -> bool:
return isinstance(o, Define) and (self.name == o.name) and (self.inverted_ == o.inverted_)
@final
class Clause:
defines: List[Define]
def __init__(self, defines: Union[List[Define], str] = []) -> None:
if isinstance(defines, str):
if defines == "":
self.defines = []
else:
self.defines = list(
map(
lambda x:
Define(DefineType(x[9:-1] if x[0] == '!' else x[8:-1]), x[0] == '!')
, defines.split(" && ")
)
)
else:
self.defines = [d.copy() for d in defines]
def copy(self) -> "Clause":
return Clause(self.defines)
def append(self, define: Define) -> "Clause":
if any((define2.name == define.name) and (define2.inverted_ != define.inverted_) for define2 in self.defines):
raise ValueError("Tried to append an incompatible clause")
self.defines.append(define)
return self
def invert_last(self) -> "Clause":
self.defines[-1].invert()
return self
def pop_last(self) -> "Clause":
if len(self.defines) > 0: self.defines.pop()
return self
def empty(self) -> bool:
return self.defines == []
def __str__(self) -> str:
return " && ".join(map(str, self.defines))
def __hash__(self):
return hash(str(self))
def __eq__(self, o) -> bool:
return isinstance(o, Clause) and (self.defines == o.defines)
ClausesStr = str
@final
class Clauses:
"""
Represent a list of clauses, aka a list of or-ed together and-ed "defined()"
conditions
"""
clauses: List[Clause]
def __init__(self, clauses: Union[List[Clause], str] = []) -> None:
if isinstance(clauses, str):
if clauses == "()":
self.clauses = []
elif ") || (" in clauses:
self.clauses = list(map(Clause, clauses[1:-1].split(") || (")))
else:
self.clauses = [Clause(clauses)]
else:
self.clauses = clauses[:]
def copy(self) -> "Clauses":
return Clauses(self.clauses[:])
def add(self, defines: Clause) -> "Clauses":
self.clauses.append(defines)
return self
def empty(self) -> bool:
return self.clauses == []
def splitdef(self) -> Sequence[int]:
"""
splitdef -- Sorting key function for #ifdefs
All #if defined(...) are sorted first by the length of its string
representation, then by the number of clauses, then by the number of
'&&' in each clause and then by the "key" of the tested names (left to
right, inverted placed after non-inverted).
"""
ret = [len(str(self)), len(self.clauses)] if len(self.clauses) > 0 else [-1]
for cunj in self.clauses:
ret.append(len(cunj.defines))
for cunj in self.clauses:
for d in cunj.defines:
ret.append(d.value())
return ret
def reduce(self) -> None:
"""
reduce -- Reduces the number of clauses in-place
Removes the most possible number of conditions, both by removing
conditions and by removing entire clauses.
As a side effect, sorts itself.
"""
# Early breaks
if any(c.empty() for c in self.clauses):
self.clauses = []
return
if len(self.clauses) == 0:
return
elif len(self.clauses) == 1:
clause = Clause()
for define in self.clauses[0].defines:
if define in clause.defines:
continue
elif define.inverted() in clause.defines:
clause = Clause(',') # This should never happen (and never happens without breaking encapsulation)
else:
clause.append(define)
clause.defines.sort(key=lambda d: Define.defines.index(d.name))
self.clauses = [clause]
return
elif len(self.clauses) == 2:
if len(self.clauses[0].defines) == len(self.clauses[1].defines) == 1:
if self.clauses[0].defines[0].inverted() == self.clauses[1].defines[0]:
self.clauses = []
return
# Quine-McCluskey algorithm
# matches: list of (matches, inverted_mask)
needed: List[Tuple[int, int]] = [
(i, 0)
for i in range(1<<len(Define.defines))
if any( # i matches any clause
all( # i matches all conditions in the clause
(i & (1<<Define.defines.index(define.name)) == 0) == define.inverted_
for define in clause.defines)
for clause in self.clauses)
]
last_combined = needed[:]
uncombinable: List[Tuple[int, int]] = []
while len(last_combined) > 0:
combined: List[Tuple[int, int]] = []
combinable: List[bool] = [False] * len(last_combined)
while len(last_combined) > 0:
attempt = last_combined[-1]
for idx, (i, m) in enumerate(last_combined):
if idx == len(last_combined) - 1:
if not combinable[idx]:
uncombinable.append(attempt)
elif m == attempt[1]:
if (i ^ attempt[0]) & ((i ^ attempt[0]) - 1) != 0:
continue # More than 1 bit of difference
combinable[idx] = True
combinable[len(last_combined) - 1] = True
add = (i | attempt[0], m | (i ^ attempt[0]))
if add in combined:
continue # Aleady added
combined.append(add)
last_combined.pop()
last_combined = combined
matches: Dict[int, List[Tuple[int, int]]] = {
i: [combination for combination in uncombinable if (i | combination[1]) == combination[0]] for i, _ in needed
}
self.clauses = []
matches_size: int = 1
while len(matches) != 0:
match_found = True
while match_found:
match_found = False
for i in matches:
if len(matches[i]) < matches_size:
raise NotImplementedError("There seems to be an error in the algorithm")
elif len(matches[i]) == matches_size:
match_found = True
self.clauses.append(
Clause([
Define(
n,
matches[i][0][0] & (1 << j) == 0
) for j, n in enumerate(Define.defines) if matches[i][0][1] & (1 << j) == 0
]))
self.clauses[-1].defines.sort(key=lambda d: Define.defines.index(d.name))
to_erase: List[int] = []
for j in matches:
if matches[i][0] in matches[j]:
to_erase.append(j)
for j in to_erase:
del matches[j]
break
matches_size = matches_size + 1
self.clauses.sort(key=lambda c: (len(c.defines), [Define.defines.index(d.name) for d in c.defines]))
def __str__(self) -> ClausesStr:
if len(self.clauses) == 1:
return str(self.clauses[0])
else:
return "(" + ") || (".join(map(str, self.clauses)) + ")"
def __hash__(self):
return hash(str(self))
def __eq__(self, o) -> bool:
return isinstance(o, Clauses) and (self.clauses == o.clauses)
JumbledFunctions = CustOrderedDictList[Clause, Function]
FilesSpecific = Dict[Filename, FileSpec]
SortedGlobals = CustOrderedDictList[Clauses, FunctionType]
SortedRedirects = CustOrderedDictList[Clauses, FunctionType]
def readFiles(files: Iterable[str]) -> Tuple[JumbledFunctions, JumbledFunctions, FilesSpecific]:
"""
readFiles
This function is the one that parses the files.
"""
gbls: JumbledFunctions = CustOrderedDictList()
redirects: JumbledFunctions = CustOrderedDictList()
filespecs: FilesSpecific = {}
symbols: Dict[str, Filename] = {}
need_halt: bool = False
for filepath in files:
filename: Filename = filepath.split("/")[-1]
dependants: Clause = Clause()
filespec = FileSpec()
filespecs[filename[:-10]] = filespec
def add_symbol_name(symname: Optional[str], weak: bool = False, symsname: Dict[str, List[Tuple[str, bool]]] = {"": []}):
# Optional arguments are evaluated only once!
nonlocal need_halt
if symname is None:
for c in symsname:
if (c != "") and (len(symsname[c]) != 0):
# Note: if this condition ever raises, check the wrapper pointed by it.
# If you find no problem, comment the error below, add a "pass" below (so python is happy)
# and open a ticket so I can fix this.
raise NotImplementedError("Some symbols are only implemented under one condition '{0}' (probably) ({1}/{2})"
.format(c, symsname[c][0][0], filename) | |
New Algorithms for Simulating Dynamical Friction
<NAME>, <NAME>, <NAME> — RadiaSoft, LLC
This notebook describes—and documents in code—algorithms for simulating
the dynamical friction experienced by ions in the presence of magnetized electrons.
The $\LaTeX$ preamble is here. $$ %% math text \newcommand{\hmhsp}{\mspace{1mu}}% math hair space \newcommand{\mhsp}{\mspace{2mu}}% math hair space \newcommand{\ud}{\mathop{}\!\mathrm{d}}% upright d for differential \newcommand{\ui}{\mathrm{i}}% upright i for imaginary unit \newcommand{\ue}{\mathrm{e}}% upright e for Euler number %% \newcommand{\Mion}{m_\text{ion}} \newcommand{\Me}{m_\text{e}} %% \newcommand{\vQion}{\vec{q}_\text{ion}} \newcommand{\vPion}{\vec{p}_\text{ion}} \newcommand{\Qion}[1]{#1_\text{ion}} \newcommand{\Pion}[1]{p_{\text{ion},\hmhsp#1}} %% \newcommand{\vQe}{\vec{q}_\text{e}} \newcommand{\vPe}{\vec{p}_\text{e}} \newcommand{\Qe}[1]{#1_\text{e}} \newcommand{\Pe}[1]{p_{\text{e},\hmhsp#1}} %% \newcommand{\Map}[2][]{\mathcal{#2}^{#1}} %% \newcommand{\pgc}{p_\text{gc}} \newcommand{\xgc}{x_\text{gc}} \newcommand{\ygc}{y_\text{gc}} $$
In [3]:
""" Python preamble """
%matplotlib inline
In [4]:
print mp
NameErrorTraceback (most recent call last)
<ipython-input-4-f80345107578> in <module>()
----> 1 print mp
NameError: name 'mp' is not defined
In [5]:
""" Python preamble (cont.) """
from __future__ import division
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
import matplotlib as mpl
from scipy.constants import pi
from scipy.constants import speed_of_light as clight
from scipy.constants import epsilon_0 as eps0
from scipy.constants import mu_0 as mu0
from scipy.constants import elementary_charge as qe
from scipy.constants import electron_mass as me
from scipy.constants import proton_mass as mp
from scipy.constants import Boltzmann as kB
fourPiEps0 = 4 * pi * eps0
invFourPiEps0 = 1 / fourPiEps0
""" reset some default options """
np.set_printoptions(linewidth=96)
""" indexing """
(Ix, Ipx, Iy, Ipy, Iz, Ipz) = range(6)
""" prefixes """
(femto, pico, nano, micro, milli, one, kilo, mega, giga, tera, peta) = \
10. ** np.asarray(range(-15, 15+1, 3))
We define the ion charge and mass here as global parameters.
We do the same for the magnetic field strength $B$ and the
thermal velocity $v_\text{th}$.
Then we compute various related derived quantities.
In [6]:
"""
angular frequency of Larmor rotations
NB: This is a *signed* quantity, which means
that for electrons, say, you must set Z = -1.
"""
def omega_Larmor(mass, B, Z = 1):
return Z * qe * B / mass
Z_ion = 1
M_ion = mp
B_mag = 1. # Tesla
e_temp = 300. # Kelvin
N_gyro = 100 # a somewhat arbitrary choice, range [100, 160]
""" derived quantities """
V_th = math.sqrt(2 * kB * e_temp / me)
rho_gc = me * V_th / (qe * B_mag)
Omega_e = omega_Larmor(me, B_mag, Z = -1)
T_e = (2 * pi) / abs(Omega_e)
T_intxn = N_gyro * T_e
print "V_th = ", V_th
print "rho_gc / µm = ", rho_gc / micro
print "Omega_e / s^(-1) = ", Omega_e
print "frequency / GHz = ", Omega_e / (2 * pi) / giga
print "T_e / ns = ", T_e / nano
print "T_intxn / ns = ", T_intxn / nano
V_th = 95361.4171888
rho_gc / µm = 0.542189740332
Omega_e / s^(-1) = -1.7588200236e+11
frequency / GHz = -27.9924900765
T_e / ns = 0.0357238672682
T_intxn / ns = 3.57238672682
Two-body Magnetized Collisions
The Hamiltonian for a two-body interaction between an ion and a magnetized electron is $$ \vphantom{\Big]} H(\vQion, \vPion, \vQe, \vPe) = H_0(\vPion, \Qe{y}, \vPe)
+ H_\text{C}(\vQion, \vQe)
$$ where $$$$\begin{align} H_0(\vPion, \Qe{y}, \vPe) &= \frac{1}{2\Mion}\bigl(\Pion{x}^2 + \Pion{y}^2 + \Pion{z}^2\bigr) + \frac{1}{2\Me}\bigl((\Pe{x} + e B \Qe{y})^2 + \Pe{y}^2 + \Pe{z}^2\bigr),\\[1ex] H_\text{C}(\vQion, \vQe) &= -\frac{Ze^2}{4\pi\varepsilon_0} \big/ {\sqrt{(\Qion{x}-\Qe{x})^2 + (\Qion{y}-\Qe{y})^2 + (\Qion{z}-\Qe{z})^2}}, \end{align}$$
$$1ex] $$ and $$$ denotes the elementary quantum of charge.
The simplest second-order scheme for integrating this system uses
a split-operator approach: We approximate the total map $\Map{M}$ for a
time step of size $h$ by the symmetric form $$ \vphantom{\Big]} \Map{M}(h) \approx \Map{M}_0(h/2) \Map{M}_C(h) \Map{M}_0(h/2) $$ where $\Map{M}_0$ and $\Map{M}_C$ are the exact maps for the Hamiltonians $H_0$
and $H_C$ respectively. The map $\Map{M}_0$ is a simple linear map. The map
$\Map{M}_C$ generates a nonlinear kick of both ion and electron momenta.
Hamiltonians for Two-body Magnetized Collisions
In [5]:
"""
Hamiltonian for free ion and electron in a magnetic field, under
the assuption that the ion is unaffected by that magnetic field.
Arguments:
z_i (ndArray): 6 x N array of canonical coördinates
and conjugate momenta for the ions
z_e (ndArray): 6 x N array of canonical coördinates
and conjugate momenta for the electrons
In both of the above arrays, the six phase-space variables
are given in the order(x, px, y, py, z, pz)
Return:
the total 'free' energy of each ion-electron pair
"""
def H_twobody_0(z_i, z_e):
ham_i = ((z_i[Ipx,:] ** 2 + z_i[Ipy,:] ** 2 + z_i[Ipz,:] ** 2)
/ (2 * M_ion))
ham_e = ((z_e[Ipx,:] + (-qe) * B_mag * z_e[Iy,:]) ** 2
+ z_e[Ipy,:] ** 2 + z_e[Ipz,:] ** 2) / (2 * me)
return ham_i + ham_e
"""
Hamiltonian for the interaction of each ion-electron pair.
"""
def H_twobody_C(z_i, z_e):
g_ie = -(Z_ion * qe ** 2) / (4 * pi * eps0)
intxn = g_ie / np.sqrt(
+ (z_i[Ix,:] - z_e[Ix,:]) ** 2
+ (z_i[Iy,:] - z_e[Iy,:]) ** 2
+ (z_i[Iz,:] - z_e[Iz,:]) ** 2)
return intxn
"""
Total Hamiltonian for each ion-electron pair.
"""
def H_twobody(z_i, z_e):
ham_0 = H_twobody_0(z_i, z_e)
ham_C = H_twobody_C(z_i, z_e)
return ham_0 + ham_C
Maps for Two-body Magnetized Collisions
In [6]:
"""
define transfer maps for ions and electrons
There are three maps to define here: one each
for ions and electrons under H_0, and another
"""
""" matrix for a linear drift """
def MatD(mass, h):
Mdrift = np.identity(6)
for i in (Ix, Iy, Iz):
Mdrift[i, i + 1] = h / mass
return Mdrift
""" matrix for linear electron dynamics in a solenoidal field """
def MatK0_e(h):
mw = me * Omega_e
wh = Omega_e * h
cwh = math.cos(wh)
swh = math.sin(wh)
cwh1m = 2 * math.sin(wh / 2) ** 2 # 1 - cos(a) = 2 sin^2(a / 2)
MK0 = np.identity(6)
MK0[Iy, Iy ] = cwh
MK0[Ipy, Ipy] = cwh
MK0[Iy, Ipy] = swh / mw
MK0[Ipy, Iy ] = -mw * swh
MK0[Iz, Ipz] = h / me
MK0[Ix, Ipx] = swh / mw
MK0[Ix, Iy ] = swh
MK0[Ix, Ipy] = cwh1m / mw
MK0[Iy, Ipx] = -cwh1m / mw
MK0[Ipy, Ipx] = -swh
return MK0
"""
map phase-space coördinates forward in time by amount h
based on the Hamiltonian H_0, which describes the free
motion of ions and the motion of electrons in a solenoidal
magnetic field
"""
def MapZ_0(h, z_i, z_e):
mat = MatD(M_ion, h)
zf_i = mat.dot(z_i)
mat = MatK0_e(h)
zf_e = mat.dot(z_e)
return zf_i, zf_e
"""
map phase-space coördinates forward in time by amount h
based on the Hamiltonian H_C, which describes the collision
between a single ion-electron pair
"""
def MapZ_C(h, z_i, z_e):
g = h * Z_ion * qe ** 2 / (4 * pi * eps0)
dz = z_i - z_e
denom = (dz[Ix,:] ** 2 + dz[Iy,:] ** 2 + dz[Iz,:] ** 2) ** (3/2)
zf_i = z_i.copy()
zf_e = z_e.copy()
for ip in (Ipx, Ipy, Ipz):
zf_i[ip,:] = z_i[ip,:] - g * dz[ip - 1] / denom
zf_e[ip,:] = z_e[ip,:] + g * dz[ip - 1] / denom
return zf_i, zf_e
def apply_MapZ_0(h, n, z_i, z_e):
mat_i = MatD(M_ion, h)
mat_e = MatK0_e(h)
zf_i = [z_i]
zf_e = [z_e]
for i in range(n):
z_i = mat_i.dot(z_i)
z_e = mat_e.dot(z_e)
zf_i.append(z_i)
zf_e.append(z_e)
return np.asarray(zf_i), np.asarray(zf_e)
""" second-order split-operator integration for the total Hamiltonian """
def apply_MapZ(h, n, z_i, z_e):
hh = 0.5 * h
mat_i = MatD(M_ion, hh)
mat_e = MatK0_e(hh)
zf_i = [z_i]
zf_e = [z_e]
for i in range(n):
z_i = mat_i.dot(z_i)
z_e = mat_e.dot(z_e)
z_i, z_e = MapZ_C(h, z_i, z_e)
z_e = mat_e.dot(z_e)
z_i = mat_i.dot(z_i)
zf_i.append(z_i)
zf_e.append(z_e)
return np.asarray(zf_i), np.asarray(zf_e)
Guiding-center Coördinates and $\Theta$-J Coördinates
Transformations To and From Guiding-center Coördinates and $\Theta$-J Coördinates
We transform the electron's transverse phase-space coördinates
using the type-1 generating function $$ F_1(x,y;\, \phi,\ygc) = m\Omega\Bigl[\frac{1}{2}(y - \ygc)^2\cot\phi - y \ygc\Bigr]. $$ This yields the following transformation rules:
to guiding-center coördinates $$ \begin{align} m\Omega &= qB_0, \quad\text{(this is a signed quantity)}\\[1ex] \phi &= \arctan\Bigl(\frac{p_x + e B y}{p_y}\Bigr),\\[1ex] p_\phi &= \frac{1}{2m\Omega}\bigl[(p_x + m\Omega y)^2 + p_y^2\bigr],\\[1ex] \ygc &= -\frac{p_x}{m\Omega},\\[1ex] \pgc &= p_y + m\Omega x. \end{align} $$ from guiding-center coördinates $$ \begin{align} r_L &= \frac{1}{m\Omega}\sqrt{2m\Omega\,p_\phi}, \quad\text{(this is a signed quantity)}\\[1ex] x &= \frac{\pgc}{m\Omega} - r_L\cos\phi,\\[1ex] p_x &= -m\Omega\,\ygc,\\[1ex] y &= \ygc + r_L\sin\phi,\\[1ex] p_y &= m\Omega\,r_L\cos\phi. \end{align} $$
We also require the transformation to and from the coördinates $\Theta$-J: $$ \begin{align} \Theta &= \dotsb, \ J &= p_\phi + \frac{Ze^2}{4\pi\varepsilon_0} \frac{r_L}{\Omega} \frac{(\Qion{x}-\xgc)\cos\phi - (\Qion{y}-\ygc)\sin\phi}{% \bigl[(\Qion{x}-\Qe{x})^2 + (\Qion{y}-\Qe{y})^2 + (\Qion{z}-\Qe{z})^2
+ r_L^2\bigr]^{3/2}}.
\end{align} $$
$$ \begin{align} \phi &= \dotsb, \\ p_\phi &= \dotsb. \end{align} $$
In [7]:
""" convert to guiding-center coordinates """
def toGuidingCenter(z_e):
mOmega = me * Omega_e
| |
#AUTOGENERATED! DO NOT EDIT! File to edit: dev/09_full_svd_surrogate_script.ipynb (unless otherwise specified).
__all__ = ['main', 'invPreprocess', 'test_model_rel_err', 'reconFrameOnly', 'predictLatentVectors',
'get_lin_and_svd_rel_err', 'MLP', 'trainEpoch', 'validEpoch', 'build_latents', 'LatentVectors', 'arg_parser']
#Cell
# --- Must haves ---
import os, sys, argparse
sys.path.append('..')
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.cuda as cuda
import torch.nn as nn
import torchvision
import torch.nn.functional as F
from .mantaflowDatasets import MantaFlowDataset, getSingleSim, createMantaFlowTrainTest
from .utils import create_opt, create_one_cycle, find_lr, printNumModelParams, \
rmse, writeMessage, plotSampleWprediction, plotSampleWpredictionByChannel, \
plotSample, curl, jacobian, stream2uv, create_movie, convertSimToImage, \
pkl_save, pkl_load, reconFrame, rel_err
#from surrogates4sims.models import Generator, Encoder, AE_no_P, AE_xhat_z, AE_xhat_zV2
from .train import trainEpoch, validEpoch
from .svd import MantaFlowSVDDataset
import numpy as np
from tqdm import tqdm
from copy import deepcopy
import matplotlib.pyplot as plt
import pickle
#Cell
try: from nbdev.imports import IN_NOTEBOOK
except: IN_NOTEBOOK=False
print("Running in notebook" if IN_NOTEBOOK else "Not running in notebook")
#Cell
def main(args):
global p # to do: make this just p_shape and use p.shape because other function seem to just use its shape
global x_mx, x_mn
global device
global test_data, test_sims
# set the GPU
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
if args.gpu_ids:
gpu_str_list = ','.join([str(i) for i in args.gpu_ids])
os.environ["CUDA_VISIBLE_DEVICES"]= gpu_str_list
versionName_with_args = versionName + '_GPUs{}'.format(gpu_str_list.replace(',',''))
else:
versionName_with_args = versionName
print('='*82 + '\n\nRunning LIN experiments with command line arguments!\n\n'
+ '-'*82 + '\n'*2)
versionName_with_args += '_w{}_latentDim{}_hd{}_bz{}_epochs{}'.format(args.window,
args.numComponents,
hd,
bz,
epochs)
print(versionName_with_args)
print('\n' + '='*82 + '\n')
device = torch.device('cuda' if torch.cuda.is_available() and args.gpu_ids else 'cpu')
print('Using device:', device)
train_data, test_data = build_latents(svd_vec_file, args)
# instead of training
# returning these unprocessed datasets for exploration in the interactive notebook
if IN_NOTEBOOK:
return train_data, test_data
# reduce the dimensions of z down to the numComponents
for idx,d in enumerate(train_data):
X = d[0][:,:args.numComponents]
p = d[1]
train_data[idx] = (X,p)
for idx,d in enumerate(test_data):
X = d[0][:,:args.numComponents]
p = d[1]
test_data[idx] = (X,p)
# get max/min along each latent vec dimension
D = []
for d in train_data:
D.append(np.hstack(d))
D = np.vstack(D)
x_mx = np.max(D,axis=0)
x_mn = np.min(D,axis=0)
# build dataset of latent vectors
trainDataset = LatentVectors(train_data,mx=x_mx,mn=x_mn,doPreprocess=True,w=args.window,simLen=200)
testDataset = LatentVectors(test_data,mx=x_mx,mn=x_mn,doPreprocess=True,w=args.window,simLen=200)
# dataloaders
trainDataLoader = DataLoader(dataset=trainDataset, batch_size=bz, shuffle=True, drop_last=True)
testDataLoader = DataLoader(dataset=testDataset, batch_size=bz)
# build model
X,y = next(iter(trainDataLoader))
model = MLP(X, hiddenLayerSizes=hiddenLayers, activation=activation)
printNumModelParams(model)
if len(args.gpu_ids) > 1:
model = nn.DataParallel(model)
# training loop
L = nn.MSELoss()
max_lr = .001
opt = torch.optim.Adam(model.parameters(), lr=max_lr)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt,patience=patience)
versionName_with_args += '_lr{}'.format(str(max_lr))
try:
os.mkdir(cps)
except:
print("checkpoints directory already exists :)")
# create a summary writer.
train_writer = SummaryWriter(os.path.join(tensorboard_direc, versionName_with_args,'train'))
test_writer = SummaryWriter(os.path.join(tensorboard_direc, versionName_with_args,'valid'))
tensorboard_recorder_step = 0
total_steps = 0
model = model.to(device)
writeMessage('---------- Started Training ----------', versionName_with_args)
bestLoss = np.infty
# loop over the dataset multiple times
for epoch in tqdm(range(1, epochs+1)):
writeMessage("\n--- Epoch {0}/{1} ---".format(epoch, epochs), versionName_with_args)
model.train()
trainLoss, tensorboard_recorder_step, total_steps = trainEpoch(trainDataLoader,
train_writer, model, opt, L,
rmse, lr_scheduler,
tensorboard_rate, device,
tensorboard_recorder_step, total_steps)
writeMessage("trainLoss: {:.4e}".format(trainLoss),versionName_with_args)
writeMessage("LR: {:.4e}".format(opt.param_groups[0]['lr']),versionName_with_args)
# if trainLoss < bestLoss:
# bestLoss = trainLoss
# writeMessage("Better trainLoss: {:.4e}, Saving models...".format(bestLoss),versionName)
# torch.save(model.state_dict(), os.path.join(cps,versionName))
model.eval()
valLoss = validEpoch(testDataLoader, test_writer, model, L, rmse, device, tensorboard_recorder_step)
writeMessage("valLoss: {:.4e}".format(valLoss),versionName_with_args)
#checkpoint progress
if valLoss < bestLoss:
bestLoss = valLoss
writeMessage("\nBetter valLoss: {:.4e}, Saving models...".format(bestLoss),versionName_with_args)
torch.save(model.state_dict(), os.path.join(cps,versionName_with_args))
lr_scheduler.step(trainLoss)
#lr_scheduler.step(valLoss)
if opt.param_groups[0]['lr'] < 5e-8:
break
writeMessage('---------- Finished Training ----------', versionName_with_args)
# best val loss model
model.load_state_dict(torch.load(os.path.join(cps,versionName_with_args)))
model = model.to(device)
model.eval()
# relative errors of latent vectors
Err = []
for idx in range(len(test_data)):
e = test_model_rel_err(model,idx,test_data,True)
Err.append(e)
plt.savefig(os.path.join(tensorboard_direc,versionName_with_args,'LIN_rel_errors.pdf'))
print('\nMean latent vector relative error: {}'.format(np.mean(Err)))
# load dataset of simulation frames
trainData, testData = createMantaFlowTrainTest(dataDirec,simLen,testSplit,seed)
testDataset = MantaFlowDataset(testData, reverseXY=reverseXY,numToKeep=numSamplesToKeep, AE=False)
testDataLoader = DataLoader(dataset=testDataset, batch_size=len(testDataset))
X_test,p_test = next(iter(testDataLoader))
c,h,width = X_test.shape[1:]
test_sims = []
for i in range(len(test_data[:int(np.nan_to_num(numSamplesToKeep, posinf=1e10)/200)])):
A = X_test[simLen*i:simLen*(i+1),:]
test_sims.append(A)
test_sims = torch.stack(test_sims)
# load svd vectors
svd_data = pkl_load(SVDFn)
svd_vecs = svd_data['spatialVecs'][:,:args.numComponents]
# relative errors of simulation frames
svd_rel_err = []
lin_rel_err = []
for test_ind in range(len(test_data[:int(np.nan_to_num(numSamplesToKeep, posinf=1e10)/200)])):
Xhat_LIN, Xhat, ground_truth = get_lin_and_svd_rel_err(test_ind, model, svd_vecs)
rel_err_svd_only = rel_err(ground_truth,Xhat)
rel_err_lin = rel_err(ground_truth,Xhat_LIN)
print('\n\nRelative Errors with/without Reconstructed Simulation Frames\n\n')
print('Test Ind {} SVD: {} LIN: {}'.format(test_ind,rel_err_svd_only, rel_err_lin))
svd_rel_err.append(rel_err_svd_only)
lin_rel_err.append(rel_err_lin)
results = {"LIN_z_mean_rel_error": Err,
"svd_rel_err": svd_rel_err,
"lin_rel_err": lin_rel_err}
pickle.dump(results, open(os.path.join(tensorboard_direc,versionName_with_args,"results.p"), "wb"))
def invPreprocess(xnew):
x = ((xnew/2)+.5)*(x_mx-x_mn) + x_mn
return x
def test_model_rel_err(model,test_ind,test_data,doPlot=False):
# last model
idx = test_ind # choose one of the test samples
testDataset = LatentVectors(test_data[idx:idx+1],doPreprocess=True,w=simLen-1,mx=x_mx,mn=x_mn)
testDataLoader = DataLoader(dataset=testDataset, batch_size=1)
X,y = next(iter(testDataLoader))
X.shape, y.shape
xhat = X.to(device).clone()
out = []
for idx in range(y.shape[1]):
xhat = model(xhat).clone()
xhat[:,:,-p.shape[1]:] = y[:,idx:idx+1,-p.shape[1]:]
out.append(xhat)
out = torch.stack(out).squeeze()
yy = y.squeeze().to(device)
err = []
for i in range(out.shape[0]):
label = invPreprocess(yy[i].detach().cpu().numpy())
e = label - invPreprocess(out[i].detach().cpu().numpy())
err.append(np.linalg.norm(e)/np.linalg.norm(label))
if doPlot:
plt.plot(err)
plt.title('Test Samples Relative Errors'.format(test_ind))
return err
def reconFrameOnly(svd_vecs,coeffs):
R = np.zeros(svd_vecs.shape[0],)
for idx, c in enumerate(coeffs):
R += c*svd_vecs[:,idx]
R = R.reshape(2,128,96)
return R
def predictLatentVectors(model,test_ind):
# last model
idx = test_ind # choose one of the test samples
testDataset = LatentVectors(test_data[idx:idx+1],doPreprocess=True,w=simLen-1,mx=x_mx,mn=x_mn)
testDataLoader = DataLoader(dataset=testDataset, batch_size=1)
X,y = next(iter(testDataLoader))
xhat = X.to(device).clone()
out = []
for idx in range(y.shape[1]):
xhat = model(xhat).clone()
xhat[:,:,-p.shape[1]:] = y[:,idx:idx+1,-p.shape[1]:]
out.append(xhat)
out = torch.stack(out).squeeze()
return out
## turn the above into a function:
def get_lin_and_svd_rel_err(test_ind, model, svd_vecs):
z = predictLatentVectors(model,test_ind)
invPreProcZ = []
for zz in z:
invPreProcZ.append(invPreprocess(zz.detach().cpu().numpy()))
invPreProcZ = np.array(invPreProcZ)
# reconstruct the sim from the LIN latent vectors
Xhat_LIN = []
for zz in invPreProcZ:
coeffs = zz[:args.numComponents]
R = reconFrameOnly(svd_vecs,coeffs)
Xhat_LIN.append(R)
Xhat_LIN = np.array(Xhat_LIN)
# reconstruct the sim from the ground truth latent vectors
Xhat = []
for zz in test_data[test_ind][0][1:]:
coeffs = zz[:args.numComponents]
R = reconFrameOnly(svd_vecs,coeffs)
Xhat.append(R)
Xhat = np.array(Xhat)
ground_truth = np.array(test_sims[test_ind,1:,:,:,:])
return Xhat_LIN, Xhat, ground_truth
class MLP(nn.Module):
def __init__(self, X, hiddenLayerSizes = [1024], activation=nn.ELU()):
super(MLP,self).__init__()
self.activation = activation
self.inputSize = X.shape[1:]
self.modules = []
self.modules.append(nn.Linear(np.prod(self.inputSize),hiddenLayerSizes[0]))
self.modules.append(self.activation)
for idx,sz in enumerate(hiddenLayerSizes[:-1]):
self.modules.append(nn.Linear(hiddenLayerSizes[idx],hiddenLayerSizes[idx+1]))
self.modules.append(self.activation)
self.modules.append(nn.Linear(hiddenLayerSizes[-1],np.prod(self.inputSize)))
self.layers = nn.Sequential(*self.modules)
def forward(self,x):
x = self.layers(x)
return x
def trainEpoch(myDataLoader, tensorboard_writer, model, opt, loss,
metric, lr_scheduler, tensorboard_rate, device,
tensorboard_recorder_step, total_steps):
running_loss = 0.0
running_rmse = 0.0
total_loss = 0.0
for i, sampleBatch in enumerate(myDataLoader, start=1):
# --- Main Training ---
combined_loss = 0.
# gpu
X,y = sampleBatch[0],sampleBatch[1]
X = X.to(device)
y = y.to(device)
# zero the parameter gradients
opt.zero_grad()
y_hat = X.clone()
predictions = []
for w_idx in range(args.window):
y_hat = model(y_hat).clone()
y_hat[:,:,-p.shape[1]:] = y[:,w_idx:w_idx+1,-p.shape[1]:]
predictions.append(y_hat)
combined_loss += loss(y_hat,y[:,w_idx:w_idx+1,:])
combined_loss.backward()
opt.step()
# loss
batch_loss = combined_loss.item()
running_loss += batch_loss
total_loss += batch_loss
# --- Metrics Recording ---
# metrics
predictions = torch.stack(predictions)
r = metric(y_hat, y)
running_rmse += r
# record lr change
total_steps += 1
tensorboard_writer.add_scalar(tag="LR", scalar_value=opt.param_groups[0]['lr'], global_step=total_steps)
# tensorboard writes
if (i % tensorboard_rate == 0):
tensorboard_recorder_step += 1
avg_running_loss = running_loss/tensorboard_rate
avg_running_rmse = running_rmse/tensorboard_rate
tensorboard_writer.add_scalar(tag="Loss", scalar_value=avg_running_loss, global_step=tensorboard_recorder_step)
tensorboard_writer.add_scalar(tag=metric.__name__, scalar_value=avg_running_rmse, global_step=tensorboard_recorder_step)
# reset running_loss for the next set of batches. (tensorboard_rate number of batches)
running_loss = 0.0
running_rmse = 0.0
return total_loss/len(myDataLoader), tensorboard_recorder_step, total_steps
def validEpoch(myDataLoader, tensorboard_writer, model, loss, metric,
device, tensorboard_recorder_step):
running_loss = 0.0
running_rmse = 0.0
for i, sampleBatch in enumerate(myDataLoader, start=1):
combined_loss = 0.
# --- Metrics Recording ---
# gpu
X,y = sampleBatch[0],sampleBatch[1]
X = X.to(device)
y = y.to(device)
# forward, no gradient calculations
with torch.no_grad():
y_hat = X.clone()
predictions = []
for w_idx in range(args.window):
y_hat = model(y_hat).clone()
y_hat[:,:,-p.shape[1]:] = y[:,w_idx:w_idx+1,-p.shape[1]:]
predictions.append(y_hat)
combined_loss += loss(y_hat,y[:,w_idx:w_idx+1,:])
running_loss += combined_loss.item()
# metrics
predictions = torch.stack(predictions)
r = metric(y_hat, y)
running_rmse += r
avg_running_loss = running_loss/len(myDataLoader)
avg_running_rmse = running_rmse/len(myDataLoader)
tensorboard_writer.add_scalar(tag="Loss", scalar_value=avg_running_loss, global_step=tensorboard_recorder_step)
tensorboard_writer.add_scalar(tag=metric.__name__, scalar_value=avg_running_rmse, global_step=tensorboard_recorder_step)
return avg_running_loss
def build_latents(svd_vec_file, args):
'''
Build Latent Vectors (Warning... The computation of building the latent vectors takes a loooong time.)
If svd_vec_file exists already, the latent vectors will get loaded and this will run quickly
'''
if os.path.exists(svd_vec_file):
data = pkl_load(svd_vec_file)
train_data = data['train_data']
test_data = data['test_data']
else:
user_desire = input("File path doesn't exist, enter 'y' to build latents from scratch? (Takes ~1 hour.)")
if user_desire != 'y':
sys.exit('specify a loadable latent vec file'+
' or type y to create a new one at your specified location')
svd_data = pkl_load(SVDFn)
print(svd_data.keys())
svd_vecs = svd_data['spatialVecs'][:,:args.numComponents]
print(svd_vecs.shape)
trainData, testData = createMantaFlowTrainTest(dataDirec,simLen,testSplit,seed)
print((len(trainData),len(testData)))
def createSVDdataset(trainData):
# datasets may be smaller because: numSamplesToKeep
# Be careful the default is for the data to be preprocessed. Therefore, we have to invPrecprocess if
| |
# Copyright (c) 2018 Sony Pictures Imageworks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base classes for all outline modules."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
import future.types
from future.utils import with_metaclass
import os
import sys
import logging
import tempfile
import six
import FileSequence
from .config import config
from . import constants
from .depend import Depend
from .depend import DependType
from . import event
from .exception import LayerException
from .exception import SessionException
from . import io
from .loader import current_outline
from . import util
__all__ = ["Layer",
"Frame",
"LayerPreProcess",
"LayerPostProcess",
"OutlinePostCommand"]
logger = logging.getLogger("outline.layer")
DEFAULT_FRAME_RANGE = "1000-1000"
class LayerType(type):
"""
A meta-class to wrap the creation of layer objects so they
can be added to the current outline.
"""
def __call__(cls, *args, **kwargs):
r = super(LayerType, cls).__call__(*args, **kwargs)
if current_outline() and r.get_arg("register"):
current_outline().add_layer(r)
# Initialize with plugin system. This is imported
# here to get past a circular dependency.
from outline.plugins import PluginManager
for plugin in PluginManager.get_plugins():
try:
plugin.init(r)
except AttributeError as e:
pass
return r
class Layer(with_metaclass(LayerType, object)):
"""The base class for all outline modules."""
def __init__(self, name, **args):
object.__init__(self)
self.__name = name
# Contains the args hash.
self.__args = self.get_default_args(args)
# Default the layer type to the Render type as
# defined in the constants module
self.__type = None
self.set_type(args.get("type", constants.LAYER_TYPES[0]))
# A set of arguments that is required before
# the Layer can be launched.
self.__req_args = set()
# A list to store what this layer depends on.
self.__depends = []
# If this layer is embedded within a another layer
# the parent value will point to that layer.
self.__parent = None
# Contains IO objects that are considered input.
self.__input = { }
# Contains IO objects that are considered output.
self.__output = { }
# A dictionary of environment variables to apply before execute.
self.__env = { }
self.__env.update(args.get("env", { }))
# Children are unregistered layers that are executed
# after the parent layer.
self.__children = []
# The default name of the service.
self.__service = self.__args.get("service", "shell")
# The current frame number.
self.__frame = None
# Initialize the outline instance.
self.__outline = None
# Register an event handler.
self.__evh = event.EventHandler(self)
# Keep an array of all pre-process frames.
self.__preprocess_layers = []
logger.debug("module %s loaded from %s" % (self.__class__.__name__,
os.path.realpath(__file__)))
def _after_init(self, ol):
"""
This method should be implmented by a subclass. Executed
after a layer has been initialized and added to an outline.
"""
pass
def after_init(self, ol):
"""
Executed after a layer has been initialized and added to an outline.
Emits an event.AFTER_INIT signal.
"""
self._after_init(ol)
self.__evh.emit(event.LayerEvent(event.AFTER_INIT, self))
def _after_parented(self, parent):
"""
This method should be implmented by a subclass. Executed after a
layer has been initialized and added as a child to another layer.
"""
pass
def after_parented(self, parent):
"""
This method should be implemented by a subclass. Automatically
called if this Layer instance is parented to another layer instance.
"""
self._after_parented(parent)
self.__evh.emit(event.LayerEvent(event.AFTER_PARENTED, self))
def _before_execute(self):
"""
This method should be implemened by a subclass. Executed before
all execute checks are started.
"""
pass
def before_execute(self):
"""
Executed before all execute checks are started.
"""
self._before_execute()
self.__evh.emit(event.LayerEvent(event.BEFORE_EXECUTE, self))
def _after_execute(self):
"""
This method should be implemened by a subclass. Executed after
the execute() method has been run even if the frame failed.
Used for doing cleanup operations that should run even
after a frame failure.
"""
pass
def after_execute(self):
"""
Executed after the execute() method has been run even if the
frame failed. Used for doing cleanup operations that should
run even after a frame failure.
"""
self._after_execute()
frames = self.get_local_frame_set(self.__frame)
self.__evh.emit(event.LayerEvent(event.AFTER_EXECUTE, self,
frames=frames))
def system(self, cmd, ignore_error=False, frame=None):
"""
A convinience method for calling io.system(). Shell out
to the given command and wait for it to finish.
@see: L{io.system}
@type cmd: list<str>
@param cmd: The command to execute.
@type ignore_error: boolean
@param ignore_error: Ignore any L{OSError} or shell command failures.
"""
io.system(cmd, ignore_error, frame)
def get_default_args(self, merge=None):
"""
Create and return a default argument hash. Optionally merge
the specified dictionary into the result.
"""
# No backend specific defaults should be here, those values
# would be defined within the relevant backend module or in
# the outline configuration file.
defaults = { }
# By default all layers are registerd. Registered layers show up
# as discrete layers. Unregisterd layers are generally embedded
# in registered layers.
defaults["register"] = True
# The default chunk size.
defaults["chunk"] = 1
# A null frame range indicates the event
# will default to the overall frame range
# defined in the parent outline.
defaults["range"] = None
# Now apply any settings found in the configuration file.
# This settings override the procedural defaults set in
# the layer constructur using default_arg method.
if config.has_section(self.__class__.__name__):
for key, value in config.items(self.__class__.__name__):
defaults[key] = value
# Now apply user supplied arguments. These arguments override
# both the defaults and the class condifuration file.
if merge:
defaults.update(merge)
return defaults
def get_parent(self):
"""Return the parent Layer. """
return self.__parent
def set_parent(self, layer):
"""Set the parent layer."""
if not isinstance(layer, (Layer)):
raise LayerException("Parent instance must derive from Layer.")
self.__parent = layer
def add_child(self, layer):
"""
Add a child layer to this layer. Child layers are
executed after the parent layer.
"""
if not isinstance(layer, (Layer)):
raise LayerException("Child instances must derive from Layer.")
layer.set_outline(self.get_outline())
layer.set_parent(self)
self.__children.append(layer)
layer.after_parented(self)
def add_event_listener(self, event_type, callback):
self.__evh.add_event_listener(event_type, callback)
def get_event_handler(self):
"""
Return the layer's internal EventHandler.
"""
return self.__evh
def get_children(self):
"""Return a list of this layer's child layers."""
return list(self.__children)
def set_env(self, key, value):
"""Set an env var to be set before execute."""
if key in self.__env:
logger.warn("Overwriting outline env var: %s, from %s to %s",
key, self.__env[key], value)
self.__env[str(key)] = str(value)
def get_env(self, key, default=None):
"""Get the value of the env var that will be set before execute."""
self.__env.get(key, default)
def get_name(self):
"""Return the layer name."""
if self.__parent:
return "%s.%s" % (self.__parent.get_name(), self.__name)
return self.__name
def set_name(self, name):
"""
Set the layer's name.
@type name: str
@param name: A name for the layer.
"""
if self.__outline and self.__outline.get_mode() > 1:
msg = "Layer names may only be changed in outline init mode."
raise LayerException(msg)
self.__name = name
def get_type(self):
"""
Returns the general scope or purpose of the Layer. Allowed
types are:
- Render: a general purpose rendering layer, has inputs and outputs.
- Util: a setup/cleanup frame or trival shell command.
- Post: a post layer which is kicked off after all other layers have completed.
"""
return self.__type
def set_type(self, t):
"""
Sets the general scope/purpose of this layer.
"""
if t not in constants.LAYER_TYPES:
raise LayerException("%s is not a valid layer type: %s" % (
t, constants.LAYER_TYPES))
self.__type = t
def get_outline(self):
"""Return the parent outline object."""
if self.__parent:
return self.__parent.get_outline()
else:
return self.__outline
def set_outline(self, outline):
"""Set this layer's parent outline to the given outline object."""
self.__outline = outline
def setup(self):
"""Setup is run once before the job is launched
to the render farm. This method would be used for
any pre-launch operations that may be required.
"""
self.check_required_args()
self._setup()
for child in self.__children:
child.setup()
# Emit the setup event.
self.__evh.emit(event.LayerEvent(event.SETUP, self))
def _setup(self):
"""This method should be implemented by a subclass."""
pass
def _execute(self, frame_set):
"""This method should be implemented by a subclass."""
pass
def execute(self, frame):
"""
Executes the local frame set. This typically happens
on a cluster node.
@type frame: int
@param frame: The | |
will be ignored.
- In kwargs, tau value cannot be included.
"""
if self.TAU in kwargs:
raise ValueError(
"@tau must be specified when scenario = Scenario(), and cannot be specified here.")
self.tau, self[name] = self._tracker(name).estimate(
model=model, phases=phases, n_jobs=n_jobs, **kwargs)
def phase_estimator(self, phase, name="Main"):
"""
Return the estimator of the phase.
Args:
phase (str): phase name, like 1st, 2nd...
name (str): phase series name
Return:
covsirphy.Estimator: estimator of the phase
"""
estimator = self._tracker_dict[name].series.unit(phase).estimator
if estimator is None:
raise UnExecutedError(f'Scenario.estimate(model, phases=["{phase}"], name={name})')
return estimator
def estimate_history(self, phase, name="Main", **kwargs):
"""
Show the history of optimization.
Args:
phase (str): phase name, like 1st, 2nd...
name (str): phase series name
kwargs: keyword arguments of covsirphy.Estimator.history()
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
"""
estimator = self.phase_estimator(phase=phase, name=name)
estimator.history(**kwargs)
def estimate_accuracy(self, phase, name="Main", **kwargs):
"""
Show the accuracy as a figure.
Args:
phase (str): phase name, like 1st, 2nd...
name (str): phase series name
kwargs: keyword arguments of covsirphy.Estimator.accuracy()
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
"""
estimator = self.phase_estimator(phase=phase, name=name)
estimator.accuracy(**kwargs)
def simulate(self, variables=None, phases=None, name="Main", y0_dict=None, **kwargs):
"""
Simulate ODE models with set/estimated parameter values and show it as a figure.
Args:
variables (list[str] or str or None): variable names or abbreviated names (as the same as Scenario.records())
phases (list[str] or None): phases to shoe or None (all phases)
name (str): phase series name. If 'Main', main PhaseSeries will be used
y0_dict(dict[str, float] or None): dictionary of initial values of variables
kwargs: the other keyword arguments of Scenario.line_plot()
Returns:
pandas.DataFrame
Index
reset index
Columns
- Date (pd.Timestamp): Observation date
- Country (str): country/region name
- Province (str): province/prefecture/state name
- Variables of the main dataset (int): Confirmed etc.
"""
tracker = copy.deepcopy(self._tracker(name))
# Select phases
if phases is not None:
tracker.disable(phases=None)
tracker.enable(phases=phases)
# Simulation
try:
sim_df = tracker.simulate(y0_dict=y0_dict)
except UnExecutedError:
raise UnExecutedError("Scenario.trend() or Scenario.add(), and Scenario.estimate(model)") from None
# Variables to show
df = sim_df.set_index(self.DATE)
variables = self._convert_variables(variables, candidates=self.VALUE_COLUMNS)
# Show figure
title = f"{self.area}: Simulated number of cases ({name} scenario)"
self.line_plot(
df=df.loc[:, variables], title=title, y_integer=True, v=tracker.change_dates(), **kwargs)
return sim_df
def get(self, param, phase="last", name="Main"):
"""
Get the parameter value of the phase.
Args:
param (str): parameter name (columns in self.summary())
phase (str): phase name or 'last'
- if 'last', the value of the last phase will be returned
name (str): phase series name
Returns:
str or int or float
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
"""
df = self.summary(name=name)
if param not in df.columns:
raise KeyError(f"@param must be in {', '.join(df.columns)}.")
if phase == "last":
phase = df.index[-1]
return df.loc[phase, param]
def _param_history(self, targets, name):
"""
Return the subset of summary dataframe to select the target of parameter history.
Args:
targets (list[str] or str): parameters to show (Rt etc.)
name (str): phase series name
Returns:
pandas.DataFrame: selected summary dataframe
Raises:
KeyError: targets are not in the columns of summary dataframe
"""
series = self._tracker_dict[name].series
model_set = {unit.model for unit in series}
model_set = model_set - set([None])
parameters = self.flatten([m.PARAMETERS for m in model_set])
day_params = self.flatten([m.DAY_PARAMETERS for m in model_set])
selectable_cols = [self.N, *parameters, self.RT, *day_params]
selectable_set = set(selectable_cols)
df = series.summary().replace(self.UNKNOWN, None)
if not selectable_set.issubset(df.columns):
raise UnExecutedError(
f'Scenario.estimate(model, phases=None, name="{name}")')
targets = [targets] if isinstance(targets, str) else targets
targets = targets or selectable_cols
if not set(targets).issubset(selectable_set):
raise KeyError(
f"@targets must be selected from {', '.join(selectable_cols)}."
)
df = df.loc[:, targets].dropna(how="any", axis=0)
return df.astype(np.float64)
@deprecate(
old="Scenario.param_history(targets: list)",
new="Scenario.history(target: str)",
version="2.7.3-alpha")
def param_history(self, targets=None, name="Main", divide_by_first=True,
show_figure=True, filename=None, show_box_plot=True, **kwargs):
"""
Return subset of summary and show a figure to show the history.
Args:
targets (list[str] or str): parameters to show (Rt etc.)
name (str): phase series name
divide_by_first (bool): if True, divide the values by 1st phase's values
show_box_plot (bool): if True, box plot. if False, line plot
show_figure (bool): If True, show the result as a figure
filename (str): filename of the figure, or None (show figure)
kwargs: keyword arguments of pd.DataFrame.plot or line_plot()
Returns:
pandas.DataFrame
Note:
If 'Main' was used as @name, main PhaseSeries will be used.
"""
self._tracker(name)
# Select target to show
df = self._param_history(targets, name)
# Divide by the first phase parameters
if divide_by_first:
df = df / df.iloc[0, :]
title = f"{self.area}: Ratio to 1st phase parameters ({name} scenario)"
else:
title = f"{self.area}: History of parameter values ({name} scenario)"
if not show_figure:
return df
if show_box_plot:
h_values = [1.0] if divide_by_first or self.RT in targets else None
bar_plot(df, title=title, h=h_values, filename=filename, ylabel=None)
return df
return self.history_rate(params=targets, name=name, **kwargs)
def adjust_end(self):
"""
Adjust the last end dates of the registered scenarios, if necessary.
Returns:
covsirphy.Scenario: self
"""
# The current last end dates
current_dict = {
name: self.date_obj(tracker.last_end_date())
for (name, tracker) in self._tracker_dict.items()}
# Adjusted end date
adjusted_str = max(current_dict.values()).strftime(self.DATE_FORMAT)
for (name, _) in self._tracker_dict.items():
try:
self.add(end_date=adjusted_str, name=name)
except ValueError:
pass
return self
def _describe(self, y0_dict=None):
"""
Describe representative values.
Args:
y0_dict (dict or None): dictionary of initial values or None
- key (str): variable name
- value (float): initial value
Returns:
pandas.DataFrame
Index
(int): scenario name
Columns
- max(Infected): max value of Infected
- argmax(Infected): the date when Infected shows max value
- Confirmed({date}): Confirmed on the next date of the last phase
- Infected({date}): Infected on the next date of the last phase
- Fatal({date}): Fatal on the next date of the last phase
"""
_dict = {}
for (name, _) in self._tracker_dict.items():
# Predict the number of cases
df = self.simulate(name=name, y0_dict=y0_dict, show_figure=False)
df = df.set_index(self.DATE)
cols = df.columns[:]
last_date = df.index[-1]
# Max value of Infected
max_ci = df[self.CI].max()
argmax_ci = df[self.CI].idxmax().strftime(self.DATE_FORMAT)
# Confirmed on the next date of the last phase
last_c = df.loc[last_date, self.C]
# Infected on the next date of the last phase
last_ci = df.loc[last_date, self.CI]
# Fatal on the next date of the last phase
last_f = df.loc[last_date, self.F] if self.F in cols else None
# Save representative values
last_date_str = last_date.strftime(self.DATE_FORMAT)
_dict[name] = {
f"max({self.CI})": max_ci,
f"argmax({self.CI})": argmax_ci,
f"{self.C} on {last_date_str}": last_c,
f"{self.CI} on {last_date_str}": last_ci,
f"{self.F} on {last_date_str}": last_f,
}
return pd.DataFrame.from_dict(_dict, orient="index")
def describe(self, y0_dict=None, with_rt=True):
"""
Describe representative values.
Args:
y0_dict (dict or None): dictionary of initial values or None
- key (str): variable name
- value (float): initial value
with_rt (bool): whether show the history of Rt values
Returns:
pandas.DataFrame:
Index
str: scenario name
Columns
- max(Infected): max value of Infected
- argmax(Infected): the date when Infected shows max value
- Confirmed({date}): Confirmed on the next date of the last phase
- Infected({date}): Infected on the next date of the last phase
- Fatal({date}): Fatal on the next date of the last phase
- nth_Rt etc.: Rt value if the values are not the same values
"""
df = self._describe(y0_dict=y0_dict)
if not with_rt or len(self._tracker_dict) == 1:
return df
# History of reproduction number
rt_df = self.summary().reset_index()
rt_df = rt_df.pivot_table(index=self.SERIES, columns=self.PHASE, values=self.RT)
rt_df = rt_df.fillna(self.UNKNOWN)
rt_df = rt_df.loc[:, rt_df.nunique() > 1]
cols = sorted(rt_df, key=self.str2num)
return df.join(rt_df[cols].add_suffix(f"_{self.RT}"), how="left")
def _track_param(self, name):
"""
Get the history of parameters for the scenario.
Args:
name (str): phase series name
Returns:
pandas.DataFrame:
Index Date (pandas.TimeStamp)
Columns
- Population (int)
- Rt (float)
- parameter values (float)
- day parameter values (float)
"""
df = self.summary(name=name).replace(self.UNKNOWN, None)
# Date range to dates
df[self.START] = pd.to_datetime(df[self.START])
df[self.END] = pd.to_datetime(df[self.END])
df[self.DATE] = df[[self.START, self.END]].apply(
lambda x: pd.date_range(x[0], x[1]).tolist(), axis=1)
df = df.reset_index(drop=True).explode(self.DATE)
# Columns
df = df.drop(
[
self.TENSE, self.START, self.END, self.ODE, self.TAU,
*Evaluator.metrics(), self.TRIALS, self.RUNTIME
],
axis=1, errors="ignore")
df = df.set_index(self.DATE)
for col in df.columns:
df[col] = pd.to_numeric(df[col], errors="coerce")
df[self.N] = df[self.N].astype(np.int64)
return df
def _track(self, phases=None, name="Main", y0_dict=None):
"""
Show values of parameters and variables in one dataframe for the scenario.
Args:
phases (list[str] or None): phases to shoe or None | |
# coding: utf-8
"""
MailMojo API
v1 of the MailMojo API # noqa: E501
OpenAPI spec version: 1.1.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailmojo_sdk.api_client import ApiClient
class ListApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_segment(self, list_id, segment, **kwargs): # noqa: E501
"""Create a segment in the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_segment(list_id, segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object list_id: ID of the email list to create a segment in. (required)
:param SegmentCreation segment: (required)
:return: Segment
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_segment_with_http_info(list_id, segment, **kwargs) # noqa: E501
else:
(data) = self.create_segment_with_http_info(list_id, segment, **kwargs) # noqa: E501
return data
def create_segment_with_http_info(self, list_id, segment, **kwargs): # noqa: E501
"""Create a segment in the email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_segment_with_http_info(list_id, segment, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object list_id: ID of the email list to create a segment in. (required)
:param SegmentCreation segment: (required)
:return: Segment
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'segment'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_segment" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `create_segment`") # noqa: E501
# verify the required parameter 'segment' is set
if ('segment' not in params or
params['segment'] is None):
raise ValueError("Missing the required parameter `segment` when calling `create_segment`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'segment' in params:
body_params = params['segment']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/segments/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Segment', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_list_by_id(self, list_id, **kwargs): # noqa: E501
"""Retrieve an email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_by_id(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_by_id_with_http_info(list_id, **kwargs) # noqa: E501
else:
(data) = self.get_list_by_id_with_http_info(list_id, **kwargs) # noqa: E501
return data
def get_list_by_id_with_http_info(self, list_id, **kwargs): # noqa: E501
"""Retrieve an email list. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_by_id_with_http_info(list_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve. (required)
:return: ListDetail
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_list_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/{list_id}/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListDetail', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_lists(self, **kwargs): # noqa: E501
"""Retrieve all email lists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lists(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[List]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_lists_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_lists_with_http_info(**kwargs) # noqa: E501
return data
def get_lists_with_http_info(self, **kwargs): # noqa: E501
"""Retrieve all email lists. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lists_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: list[List]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_lists" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['mailmojo_auth'] # noqa: E501
return self.api_client.call_api(
'/v1/lists/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[List]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_subscriber_on_list_by_email(self, list_id, email, **kwargs): # noqa: E501
"""Retrieve a subscriber. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscriber_on_list_by_email(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve the subscriber from. (required)
:param str email: Email address of the contact to retrieve. (required)
:return: Subscriber
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_subscriber_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
else:
(data) = self.get_subscriber_on_list_by_email_with_http_info(list_id, email, **kwargs) # noqa: E501
return data
def get_subscriber_on_list_by_email_with_http_info(self, list_id, email, **kwargs): # noqa: E501
"""Retrieve a subscriber. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_subscriber_on_list_by_email_with_http_info(list_id, email, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int list_id: ID of the email list to retrieve the subscriber from. (required)
:param str email: Email address of the contact to retrieve. (required)
:return: Subscriber
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'email'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_subscriber_on_list_by_email" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling `get_subscriber_on_list_by_email`") # noqa: E501
# verify | |
import aerosandbox.numpy as np
from aerosandbox.optimization.opti import Opti
from typing import Union, Dict, Callable, List
from aerosandbox.modeling.surrogate_model import SurrogateModel
import copy
import warnings
class FittedModel(SurrogateModel):
"""
A model that is fitted to data. Maps from R^N -> R^1.
You can evaluate this model at a given point by calling it just like a function, e.g.:
>>> my_fitted_model = FittedModel(...) # See FittedModel.__init__ docstring for syntax
>>> y = my_fitted_model(x)
The input to the model (`x` in the example above) is of the type:
* in the general N-dimensional case, a dictionary where: keys are variable names and values are float/array
* in the case of a 1-dimensional input (R^1 -> R^1), a float/array.
If you're not sure what the input type of `my_fitted_model` should be, just do:
>>> print(my_fitted_model) # Displays the valid input type to the model
The output of the model (`y` in the example above) is always a float or array.
See the docstring __init__ method of FittedModel for more details of how to instantiate and use FittedModel.
One might have expected a fitted model to be a literal Python function rather than a Python class - the
benefit of having FittedModel as a class rather than a function is that you can easily save (pickle) classes
including data (e.g. parameters, x_data, y_data), but you can't do that with functions. And, because the
FittedModel class has a __call__ method, you can basically still just think of it like a function.
"""
def __init__(self,
model: Callable[
[
Union[np.ndarray, Dict[str, np.ndarray]],
Dict[str, float]
],
np.ndarray
],
x_data: Union[np.ndarray, Dict[str, np.ndarray]],
y_data: np.ndarray,
parameter_guesses: Dict[str, float],
parameter_bounds: Dict[str, tuple] = None,
residual_norm_type: str = "L2",
fit_type: str = "best",
weights: np.ndarray = None,
put_residuals_in_logspace: bool = False,
verbose=True,
):
"""
Fits an analytical model to n-dimensional unstructured data using an automatic-differentiable optimization approach.
Args:
model: The model that you want to fit your dataset to. This is a callable with syntax f(x, p) where:
* x is a dict of dependent variables. Same format as x_data [dict of 1D ndarrays of length n].
* If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead interpret x
as a 1D ndarray. (If you do this, just give `x_data` as an array.)
* p is a dict of parameters. Same format as param_guesses [dict with syntax param_name:param_value].
Model should return a 1D ndarray of length n.
Basically, if you've done it right:
>>> model(x_data, parameter_guesses)
should evaluate to a 1D ndarray where each x_data is mapped to something analogous to y_data. (The fit
will likely be bad at this point, because we haven't yet optimized on param_guesses - but the types
should be happy.)
Model should use aerosandbox.numpy operators.
The model is not allowed to make any in-place changes to the input `x`. The most common way this
manifests itself is if someone writes something to the effect of `x += 3` or similar. Instead, write `x =
x + 3`.
x_data: Values of the dependent variable(s) in the dataset to be fitted. This is a dictionary; syntax is {
var_name:var_data}.
* If the model is one-dimensional (e.g. f(x1) instead of f(x1, x2, x3...)), you can instead supply x_data
as a 1D ndarray. (If you do this, just treat `x` as an array in your model, not a dict.)
y_data: Values of the independent variable in the dataset to be fitted. [1D ndarray of length n]
parameter_guesses: a dict of fit parameters. Syntax is {param_name:param_initial_guess}.
* Parameters will be initialized to the values set here; all parameters need an initial guess.
* param_initial_guess is a float; note that only scalar parameters are allowed.
parameter_bounds: Optional: a dict of bounds on fit parameters. Syntax is {"param_name":(min, max)}.
* May contain only a subset of param_guesses if desired.
* Use None to represent one-sided constraints (i.e. (None, 5)).
residual_norm_type: What error norm should we minimize to optimize the fit parameters? Options:
* "L1": minimize the L1 norm or sum(abs(error)). Less sensitive to outliers.
* "L2": minimize the L2 norm, also known as the Euclidian norm, or sqrt(sum(error ** 2)). The default.
* "Linf": minimize the L_infinty norm or max(abs(error)). More sensitive to outliers.
fit_type: Should we find the model of best fit (i.e. the model that minimizes the specified residual norm),
or should we look for a model that represents an upper/lower bound on the data (useful for robust surrogate
modeling, so that you can put bounds on modeling error):
* "best": finds the model of best fit. Usually, this is what you want.
* "upper bound": finds a model that represents an upper bound on the data (while still trying to minimize
the specified residual norm).
* "lower bound": finds a model that represents a lower bound on the data (while still trying to minimize
the specified residual norm).
weights: Optional: weights for data points. If not supplied, weights are assumed to be uniform.
* Weights are automatically normalized. [1D ndarray of length n]
put_residuals_in_logspace: Whether to optimize using the logarithmic error as opposed to the absolute error
(useful for minimizing percent error).
Note: If any model outputs or data are negative, this will raise an error!
verbose: Should the progress of the optimization solve that is part of the fitting be displayed? See
`aerosandbox.Opti.solve(verbose=)` syntax for more details.
Returns: A model in the form of a FittedModel object. Some things you can do:
>>> y = FittedModel(x) # evaluate the FittedModel at new x points
>>> FittedModel.parameters # directly examine the optimal values of the parameters that were found
>>> FittedModel.plot() # plot the fit
"""
super().__init__()
##### Prepare all inputs, check types/sizes.
### Flatten all inputs
def flatten(input):
return np.array(input).flatten()
try:
x_data = {
k: flatten(v)
for k, v in x_data.items()
}
x_data_is_dict = True
except AttributeError: # If it's not a dict or dict-like, assume it's a 1D ndarray dataset
x_data = flatten(x_data)
x_data_is_dict = False
y_data = flatten(y_data)
n_datapoints = np.length(y_data)
### Handle weighting
if weights is None:
weights = np.ones(n_datapoints)
else:
weights = flatten(weights)
sum_weights = np.sum(weights)
if sum_weights <= 0:
raise ValueError("The weights must sum to a positive number!")
if np.any(weights < 0):
raise ValueError("No entries of the weights vector are allowed to be negative!")
weights = weights / np.sum(weights) # Normalize weights so that they sum to 1.
### Check format of parameter_bounds input
if parameter_bounds is None:
parameter_bounds = {}
for param_name, v in parameter_bounds.items():
if param_name not in parameter_guesses.keys():
raise ValueError(
f"A parameter name (key = \"{param_name}\") in parameter_bounds was not found in parameter_guesses.")
if not np.length(v) == 2:
raise ValueError(
"Every value in parameter_bounds must be a tuple in the format (lower_bound, upper_bound). "
"For one-sided bounds, use None for the unbounded side.")
### If putting residuals in logspace, check positivity
if put_residuals_in_logspace:
if not np.all(y_data > 0):
raise ValueError("You can't fit a model with residuals in logspace if y_data is not entirely positive!")
### Check dimensionality of inputs to fitting algorithm
relevant_inputs = {
"y_data" : y_data,
"weights": weights,
}
try:
relevant_inputs.update(x_data)
except TypeError:
relevant_inputs.update({"x_data": x_data})
for key, value in relevant_inputs.items():
# Check that the length of the inputs are consistent
series_length = np.length(value)
if not series_length == n_datapoints:
raise ValueError(
f"The supplied data series \"{key}\" has length {series_length}, but y_data has length {n_datapoints}.")
##### Formulate and solve the fitting optimization problem
### Initialize an optimization environment
opti = Opti()
### Initialize the parameters as optimization variables
params = {}
for param_name, param_initial_guess in parameter_guesses.items():
if param_name in parameter_bounds:
params[param_name] = opti.variable(
init_guess=param_initial_guess,
lower_bound=parameter_bounds[param_name][0],
upper_bound=parameter_bounds[param_name][1],
)
else:
params[param_name] = opti.variable(
init_guess=param_initial_guess,
)
### Evaluate the model at the data points you're trying to fit
x_data_original = copy.deepcopy(
x_data) # Make a copy of x_data so that you can determine if the model did | |
# These requirements were auto generated
# from software requirements specification (SRS)
# document by TestFlows v1.6.201216.1172002.
# Do not edit by hand but re-generate instead
# using 'tfs requirements generate' command.
from testflows.core import Specification
from testflows.core import Requirement
Heading = Specification.Heading
RQ_SRS008_AES_Functions = Requirement(
name='RQ.SRS008.AES.Functions',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support [AES] encryption functions to encrypt and decrypt data.\n'
'\n'
),
link=None,
level=3,
num='4.1.1')
RQ_SRS008_AES_Functions_Compatability_MySQL = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.MySQL',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support [AES] encryption functions compatible with [MySQL 5.7].\n'
'\n'
),
link=None,
level=3,
num='4.1.2')
RQ_SRS008_AES_Functions_Compatability_Dictionaries = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.Dictionaries',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support encryption and decryption of data accessed on remote\n'
'[MySQL] servers using [MySQL Dictionary].\n'
'\n'
),
link=None,
level=3,
num='4.1.3')
RQ_SRS008_AES_Functions_Compatability_Engine_Database_MySQL = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.Engine.Database.MySQL',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Database Engine],\n'
'\n'
),
link=None,
level=3,
num='4.1.4')
RQ_SRS008_AES_Functions_Compatability_Engine_Table_MySQL = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.Engine.Table.MySQL',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Engine].\n'
'\n'
),
link=None,
level=3,
num='4.1.5')
RQ_SRS008_AES_Functions_Compatability_TableFunction_MySQL = Requirement(
name='RQ.SRS008.AES.Functions.Compatability.TableFunction.MySQL',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support encryption and decryption of data accessed using [MySQL Table Function].\n'
'\n'
),
link=None,
level=3,
num='4.1.6')
RQ_SRS008_AES_Functions_DifferentModes = Requirement(
name='RQ.SRS008.AES.Functions.DifferentModes',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL allow different modes to be supported in a single SQL statement\n'
'using explicit function parameters.\n'
'\n'
),
link=None,
level=3,
num='4.1.7')
RQ_SRS008_AES_Functions_DataFromMultipleSources = Requirement(
name='RQ.SRS008.AES.Functions.DataFromMultipleSources',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support handling encryption and decryption of data from multiple sources\n'
'in the `SELECT` statement, including [ClickHouse] [MergeTree] table as well as [MySQL Dictionary],\n'
'[MySQL Database Engine], [MySQL Table Engine], and [MySQL Table Function]\n'
'with possibly different encryption schemes.\n'
'\n'
),
link=None,
level=3,
num='4.1.8')
RQ_SRS008_AES_Functions_SuppressOutputOfSensitiveValues = Requirement(
name='RQ.SRS008.AES.Functions.SuppressOutputOfSensitiveValues',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL suppress output of [AES] `string` and `key` parameters to the system log,\n'
'error log, and `query_log` table to prevent leakage of sensitive values.\n'
'\n'
),
link=None,
level=3,
num='4.1.9')
RQ_SRS008_AES_Functions_InvalidParameters = Requirement(
name='RQ.SRS008.AES.Functions.InvalidParameters',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error when parameters are invalid.\n'
'\n'
),
link=None,
level=3,
num='4.1.10')
RQ_SRS008_AES_Functions_Mismatched_Key = Requirement(
name='RQ.SRS008.AES.Functions.Mismatched.Key',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return garbage for mismatched keys.\n'
'\n'
),
link=None,
level=3,
num='4.1.11')
RQ_SRS008_AES_Functions_Mismatched_IV = Requirement(
name='RQ.SRS008.AES.Functions.Mismatched.IV',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return garbage for mismatched initialization vector for the modes that use it.\n'
'\n'
),
link=None,
level=3,
num='4.1.12')
RQ_SRS008_AES_Functions_Mismatched_AAD = Requirement(
name='RQ.SRS008.AES.Functions.Mismatched.AAD',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return garbage for mismatched additional authentication data for the modes that use it.\n'
'\n'
),
link=None,
level=3,
num='4.1.13')
RQ_SRS008_AES_Functions_Mismatched_Mode = Requirement(
name='RQ.SRS008.AES.Functions.Mismatched.Mode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error or garbage for mismatched mode.\n'
'\n'
),
link=None,
level=3,
num='4.1.14')
RQ_SRS008_AES_Functions_Check_Performance = Requirement(
name='RQ.SRS008.AES.Functions.Check.Performance',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Performance of [AES] encryption functions SHALL be measured.\n'
'\n'
),
link=None,
level=3,
num='4.1.15')
RQ_SRS008_AES_Function_Check_Performance_BestCase = Requirement(
name='RQ.SRS008.AES.Function.Check.Performance.BestCase',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Performance of [AES] encryption functions SHALL be checked for the best case\n'
'scenario where there is one key, one initialization vector, and one large stream of data.\n'
'\n'
),
link=None,
level=3,
num='4.1.16')
RQ_SRS008_AES_Function_Check_Performance_WorstCase = Requirement(
name='RQ.SRS008.AES.Function.Check.Performance.WorstCase',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Performance of [AES] encryption functions SHALL be checked for the worst case\n'
'where there are `N` keys, `N` initialization vectors and `N` very small streams of data.\n'
'\n'
),
link=None,
level=3,
num='4.1.17')
RQ_SRS008_AES_Functions_Check_Compression = Requirement(
name='RQ.SRS008.AES.Functions.Check.Compression',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Effect of [AES] encryption on column compression SHALL be measured.\n'
'\n'
),
link=None,
level=3,
num='4.1.18')
RQ_SRS008_AES_Functions_Check_Compression_LowCardinality = Requirement(
name='RQ.SRS008.AES.Functions.Check.Compression.LowCardinality',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'Effect of [AES] encryption on the compression of a column with [LowCardinality] data type\n'
'SHALL be measured.\n'
'\n'
),
link=None,
level=3,
num='4.1.19')
RQ_SRS008_AES_Encrypt_Function = Requirement(
name='RQ.SRS008.AES.Encrypt.Function',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `encrypt` function to encrypt data using [AES].\n'
'\n'
),
link=None,
level=3,
num='4.2.1')
RQ_SRS008_AES_Encrypt_Function_Syntax = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Syntax',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support the following syntax for the `encrypt` function\n'
'\n'
'```sql\n'
'encrypt(mode, plaintext, key, [iv, aad])\n'
'```\n'
'\n'
),
link=None,
level=3,
num='4.2.2')
RQ_SRS008_AES_Encrypt_Function_NIST_TestVectors = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.NIST.TestVectors',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] `encrypt` function output SHALL produce output that matches [NIST test vectors].\n'
'\n'
),
link=None,
level=3,
num='4.2.3')
RQ_SRS008_AES_Encrypt_Function_Parameters_PlainText = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.PlainText',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `plaintext` accepting any data type as\n'
'the first parameter to the `encrypt` function that SHALL specify the data to be encrypted.\n'
'\n'
),
link=None,
level=3,
num='4.2.4')
RQ_SRS008_AES_Encrypt_Function_Parameters_Key = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Key',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `key` with `String` or `FixedString` data types\n'
'as the second parameter to the `encrypt` function that SHALL specify the encryption key.\n'
'\n'
),
link=None,
level=3,
num='4.2.5')
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `mode` with `String` or `FixedString` data types as the third parameter\n'
'to the `encrypt` function that SHALL specify encryption key length and block encryption mode.\n'
'\n'
),
link=None,
level=3,
num='4.2.6')
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_ValuesFormat = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.ValuesFormat',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support values of the form `aes-[key length]-[mode]` for the `mode` parameter\n'
'of the `encrypt` function where\n'
'the `key_length` SHALL specifies the length of the key and SHALL accept\n'
'`128`, `192`, or `256` as the values and the `mode` SHALL specify the block encryption\n'
'mode and SHALL accept [ECB], [CBC], [CFB128], or [OFB] as well as\n'
'[CTR] and [GCM] as the values. For example, `aes-256-ofb`.\n'
'\n'
),
link=None,
level=3,
num='4.2.7')
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Value_Invalid = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Value.Invalid',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL return an error if the specified value for the `mode` parameter of the `encrypt`\n'
'function is not valid with the exception where such a mode is supported by the underlying\n'
'[OpenSSL] implementation.\n'
'\n'
),
link=None,
level=3,
num='4.2.8')
RQ_SRS008_AES_Encrypt_Function_Parameters_Mode_Values = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.Mode.Values',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support the following [AES] block encryption modes as the value for the `mode` parameter\n'
'of the `encrypt` function:\n'
'\n'
'* `aes-128-ecb` that SHALL use [ECB] block mode encryption with 128 bit key\n'
'* `aes-192-ecb` that SHALL use [ECB] block mode encryption with 192 bit key\n'
'* `aes-256-ecb` that SHALL use [ECB] block mode encryption with 256 bit key\n'
'* `aes-128-cbc` that SHALL use [CBC] block mode encryption with 128 bit key\n'
'* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 192 bit key\n'
'* `aes-192-cbc` that SHALL use [CBC] block mode encryption with 256 bit key\n'
'* `aes-128-cfb128` that SHALL use [CFB128] block mode encryption with 128 bit key\n'
'* `aes-192-cfb128` that SHALL use [CFB128] block mode encryption with 192 bit key\n'
'* `aes-256-cfb128` that SHALL use [CFB128] block mode encryption with 256 bit key\n'
'* `aes-128-ofb` that SHALL use [OFB] block mode encryption with 128 bit key\n'
'* `aes-192-ofb` that SHALL use [OFB] block mode encryption with 192 bit key\n'
'* `aes-256-ofb` that SHALL use [OFB] block mode encryption with 256 bit key\n'
'* `aes-128-gcm` that SHALL use [GCM] block mode encryption with 128 bit key\n'
' and `AEAD` 16-byte tag is appended to the resulting ciphertext according to\n'
' the [RFC5116]\n'
'* `aes-192-gcm` that SHALL use [GCM] block mode encryption with 192 bit key\n'
' and `AEAD` 16-byte tag is appended to the resulting ciphertext according to\n'
' the [RFC5116]\n'
'* `aes-256-gcm` that SHALL use [GCM] block mode encryption with 256 bit key\n'
' and `AEAD` 16-byte tag is appended to the resulting ciphertext according to\n'
' the [RFC5116]\n'
'* `aes-128-ctr` that SHALL use [CTR] block mode encryption with 128 bit key\n'
'* `aes-192-ctr` that SHALL use [CTR] block mode encryption with 192 bit key\n'
'* `aes-256-ctr` that SHALL use [CTR] block mode encryption with 256 bit key\n'
'\n'
),
link=None,
level=3,
num='4.2.9')
RQ_SRS008_AES_Encrypt_Function_Parameters_InitializationVector = Requirement(
name='RQ.SRS008.AES.Encrypt.Function.Parameters.InitializationVector',
version='1.0',
priority=None,
group=None,
type=None,
uid=None,
description=(
'[ClickHouse] SHALL support `iv` with `String` or `FixedString` data types as the optional fourth\n'
'parameter to the `encrypt` function that SHALL specify | |
<reponame>NaomiatLibrary/OpenNMT-kpg-release
import argparse
import datetime
import json
import os
from functools import partial
import tqdm
import numpy as np
import string
import sys
sys.path.append("/home/yingyi/Documents/OpenNMT-kpg")
print("python path",sys.path)
from onmt.utils.logging import init_logger
from onmt import opts
from onmt.newssum import docutils
from onmt.inputters.news_dataset import load_pretrained_tokenizer
import random
import re
from stanfordcorenlp import StanfordCoreNLP
import nltk
nltk.download('stopwords')
TOKENIZER_NAMES = ['roberta-base', 'bert-base-uncased', 'word']
stemmer = nltk.stem.porter.PorterStemmer()
nlp = StanfordCoreNLP(r'/home/yingyi/Documents/tool/stanford-corenlp-full-2016-10-31')
def init_opt():
parser = argparse.ArgumentParser()
# Input/output options
parser.add_argument('--json_dir', '-json_dir', default='/home/yingyi/Documents/kp20k', help='Path to jsonl files.')
parser.add_argument('--output_dir', '-output_dir', default='/home/yingyi/Documents/output/kp20k',
help='The path of the output json files, final path is like /export/share/rmeng/output/bert-base-cased/tokenized/'
'folder name will be dataset_tgt, like cnndm_summary, and insides are train.jsonl, valid.jsonl, test.jsonl.')
parser.add_argument('--tokenizer', '-tokenizer', default='roberta-base', choices=TOKENIZER_NAMES, help='.')
parser.add_argument('--partition', '-partition', default='kp20k_train', type=str, choices=['kp20k_train', 'kp20k_valid', 'kp20k_test'],
help='Specify which partition of dataset to process: train/test/valid/all')
parser.add_argument('--shard_filename', '-shard_filename', type=str, help='.')
parser.add_argument('--verbose', '-verbose', action='store_true', help='.')
parser.add_argument('--special_vocab_path', '-opt.special_vocab_path', default=None, action='store_true', help='.')
opt = parser.parse_args()
return opt
def meng17_tokenize(text):
'''
The tokenizer used in Meng et al. ACL 2017
parse the feed-in text, filtering and tokenization
keep [_<>,\(\)\.\'%], replace digits to <digit>, split by [^a-zA-Z0-9_<>,\(\)\.\'%]
:param text:
:return: a list of tokens
'''
# remove line breakers
text = re.sub(r'[\r\n\t]', ' ', text)
# pad spaces to the left and right of special punctuations
text = re.sub(r'[_<>,\(\)\.\'%]', ' \g<0> ', text)
# tokenize by non-letters (new-added + # & *, but don't pad spaces, to make them as one whole word)
tokens = list(filter(lambda w: len(w) > 0, re.split(r'[^a-zA-Z0-9_<>,#&\+\*\(\)\.\']', text)))
return tokens
def start_end_re(match_position_idxs_key, match_pos_ends_key, keywords_exist, posi_dict, poss, cate):
position_lists = []
if cate=='noun':
for starts, ends, words, pos in zip(match_position_idxs_key, match_pos_ends_key, keywords_exist, poss):
position_list = []
for start, end in zip(starts, ends):
sta = posi_dict[start][0]
en = posi_dict[end-1][-1]
position_list.append([sta, en])
position_lists.append({'position': position_list, 'phrase': words, 'pos': pos})
else:
for starts, ends, words in zip(match_position_idxs_key, match_pos_ends_key, keywords_exist):
position_list = []
for start, end in zip(starts, ends):
sta = posi_dict[start][0]
en = posi_dict[end-1][-1]
position_list.append([sta, en])
position_lists.append({'position': position_list, 'phrase': words})
return position_lists
def start_end(match_position_idxs_key, match_pos_ends_key, keywords_exist, poss, cate):
position_lists = []
if cate=='noun':
for starts, ends, words, pos in zip(match_position_idxs_key, match_pos_ends_key, keywords_exist, poss):
position_list = []
for start, end in zip(starts, ends):
position_list.append([start, end])
position_lists.append({'position': position_list, 'phrase': words, 'pos': pos})
else:
for starts, ends, words in zip(match_position_idxs_key, match_pos_ends_key, keywords_exist):
position_list = []
for start, end in zip(starts, ends):
position_list.append([start, end])
position_lists.append({'position': position_list, 'phrase': words})
return position_lists
def prepend_space_to_words(words):
new_words = []
# prepend a space for all non-head and non-punctuation words
for word in words:
if len(new_words) == 0 or (len(word) == 1 and word in string.punctuation + string.whitespace):
new_words.append(word)
else:
new_words.append(' ' + word)
return new_words
def position_dict(position, subwords, length):
posi_dict = [i for i in range(length, length+len(subwords))]
return posi_dict
def words_to_subwords(tokenizer, words, pos = None):
all_subwords = []
all_codes = []
all_pos = []
all_posi = []
spaced_words = prepend_space_to_words(words)
length = 0
if pos==None:
for i, word in enumerate(spaced_words):
if opt.tokenizer=="roberta-base":
subwords = tokenizer.tokenize(word, add_prefix_space=True)
else:
subwords = tokenizer.tokenize(word)
codes = tokenizer.convert_tokens_to_ids(subwords)
posi_dict = position_dict(i, subwords, length)
length = length + len(subwords)
all_subwords.extend(subwords)
all_codes.extend(codes)
all_posi.append(posi_dict)
return all_subwords, all_codes, all_posi
else:
new_po = []
i = 0
for word, po in zip(spaced_words, pos):
if opt.tokenizer=="roberta-base":
subwords = tokenizer.tokenize(word, add_prefix_space=True)
else:
subwords = tokenizer.tokenize(word)
codes = tokenizer.convert_tokens_to_ids(subwords)
posi_dict = position_dict(i, subwords, length)
length = length + len(subwords)
for _ in range(0,len(subwords)):
new_po.append(po)
all_subwords.extend(subwords)
all_codes.extend(codes)
all_pos.extend(new_po)
all_posi.append(posi_dict)
new_po = []
i=i+1
return all_subwords, all_codes, all_pos, all_posi
def if_present_phrase(src_str_tokens, phrase_str_tokens):
"""
:param src_str_tokens: a list of strings (words) of source text
:param phrase_str_tokens: a list of strings (words) of a phrase
:return:
"""
match_flag = False
match_pos_idx = -1
match_pos_idxs = []
match_pos_ends = []
keywords_tokenizes = []
for src_start_idx in range(len(src_str_tokens) - len(phrase_str_tokens) + 1):
match_flag = True
# iterate each word in target, if one word does not match, set match=False and break
for seq_idx, seq_w in enumerate(phrase_str_tokens):
src_w = src_str_tokens[src_start_idx + seq_idx]
if src_w != seq_w:
match_flag = False
break
if match_flag:
match_pos_idx = src_start_idx
match_pos_idxs.append(match_pos_idx)
match_pos_ends.append(match_pos_idx+len(phrase_str_tokens))
keywords_tokenizes.append(phrase_str_tokens)
return match_flag, match_pos_idxs, match_pos_ends, keywords_tokenizes
def macth_word(sentence_stemmer, word_stemmer, word_origin, poss):
match_flags = []
match_pos_idxs = []
match_pos_ends = []
words_tokenize = []
absent_toeknize = []
exist_pos = []
for word, origin, pos in zip(word_stemmer, word_origin, poss):
match_flag, match_pos_idx, match_pos_end, word_tokenize = if_present_phrase(sentence_stemmer, word)
if len(match_pos_idx)>0:
match_flags.append(match_flag)
match_pos_idxs.append(match_pos_idx)
match_pos_ends.append(match_pos_end)
words_tokenize.append(origin)
exist_pos.append(pos)
else:
absent_toeknize.append(origin)
return match_flags, match_pos_idxs, match_pos_ends, words_tokenize, absent_toeknize, exist_pos
def keyword_stemmer(keywords):
keywords_tokenize = []
for keyword in keywords:
keyword_stemmer = [stemmer.stem(word.lower().strip()) for word in keyword]
keywords_tokenize.append(keyword_stemmer)
return keywords_tokenize
def nounchunk_stemmer(nouns):
nounchunks_tokenize = []
for nounchunk in nouns:
nounchunk_stemmer = [stemmer.stem(word.lower().strip()) for word in nounchunk]
nounchunks_tokenize.append(nounchunk_stemmer)
return nounchunks_tokenize
def pos_judge(text, model="stanfordnlp", lowercase=False):
#print ('pos_tag', text)
return nlp.pos_tag(text)
def listToStr(tokens):
sentence = ""
for token in tokens:
sentence = sentence+str(token)+" "
return sentence.strip()
def tokenize_doc(doc):
# process title and abstract
title_tokens = meng17_tokenize(doc['title'])
abstract_tokens = meng17_tokenize(doc['abstract'])
all_tokens = title_tokens[:]
all_tokens.append(".")
for tokens in abstract_tokens:
all_tokens.append(tokens)
keywords_tokens = []
for keyword in doc['keywords']:
keyword_tokenize = meng17_tokenize(keyword.strip())
if keyword_tokenize!=[]:
keywords_tokens.append(keyword_tokenize)
return title_tokens, abstract_tokens, all_tokens, keywords_tokens
def label(sentence, starts, ends):
zero = np.zeros(len(sentence))
for start, end in zip(starts, ends):
for st, en in zip(start, end):
zero[st]=1
zero[st+1:en]=2
return [int (i) for i in zero]
def recognise_nounchunks(tagged):
#from montylingua-2.1/ MontyREChunker.py
lookup=[]
words_poss = {}
info_dict = tagged
file1 = list(map(lambda filename_dict: filename_dict[0], info_dict))
_montylingua_arr = list(map(lambda filename_dict: filename_dict[1], info_dict))
# filename_p = "((PDT )?(DT |PRP[$] |WDT |WP[$] )(VBG |VBD |VBN |JJ |JJR |JJS |, |CC |NN |NNS |NNP |NNPS |CD )*(NN |NNS |NNP |NNPS |CD )+)"
# groupnames1 = "((PDT )?(JJ |JJR |JJS |, |CC |NN |NNS |NNP |NNPS |CD )*(NN |NNS |NNP |NNPS |CD )+)"
# case1 = "(" + filename_p + "|" + groupnames1 + "|EX |PRP |WP |WDT )"
filename_p = "((PDT )?(VBG |VBD |VBN |JJ |JJR |JJS |CD )*(NN |NNS |NNP |NNPS |CD )+)"
case1 = "(" + filename_p+ ")"
case1 = "(" + case1 + 'POS )?' + case1
case1 = ' ' + case1
case1 = re.compile(case1)
awk1 = 1
while awk1:
awk1 = 0
gawks = ' ' + ' '.join(_montylingua_arr) + ' '
groupnames_str = case1.search(gawks)
if groupnames_str:
awk1 = 1
info_str = len(gawks[:groupnames_str.start()].split())
cleaned_arr = len(gawks[groupnames_str.end():].split())
tagged_str = (info_str, len(_montylingua_arr) - cleaned_arr)
mores = file1[tagged_str[0]:tagged_str[1]]
popd_arr = _montylingua_arr[tagged_str[0]:tagged_str[1]]
cron_cleaned = ' '.join(
list(map(lambda filename_dict: mores[filename_dict] + '/' + popd_arr[filename_dict], range(len(mores)))))
only_word = ' '.join(
list(map(lambda filename_dict: mores[filename_dict], range(len(mores)))))
only_pos = ' '.join(
list(map(lambda filename_dict: popd_arr[filename_dict], range(len(popd_arr)))))
stripped_str = 'NC_' + str(random.randint(0, 1000000000))
for stripped_dict in range(len(file1)):
if stripped_dict in range(tagged_str[0], tagged_str[1]):
file1[stripped_dict] = 'bar'
_montylingua_arr[stripped_dict] = stripped_str
lookup.append(cron_cleaned)
words_poss[only_word] = only_pos
noun_phrases = [only_word.split() for only_word in words_poss.keys()]
pos_phrases = [only_pos.split() for only_pos in words_poss.values()]
return lookup, noun_phrases, pos_phrases
if __name__ == '__main__':
opt = init_opt()
current_time = datetime.datetime.now().strftime('%Y-%m-%d') # '%Y-%m-%d_%H:%M:%S'
logger = init_logger(opt.output_dir + '/tokenize.%s.log' % (current_time))
# determine whether to lowercase the text
if opt.tokenizer == 'word' or '-base' in opt.tokenizer:
lowercase = False
else:
lowercase = False
if opt.tokenizer == 'word':
# initialize tokenizer (for testset, only word tokenization should be applied)
#tokenizer_fn = partial(docutils.word_tokenize, model="spacy", lowercase=lowercase)
tokenizer_fn = nlp
else:
# Load pre-trained model tokenizer (vocabulary)
pretrained_tokenizer = load_pretrained_tokenizer(opt.tokenizer, None,
special_vocab_path=opt.special_vocab_path)
tokenizer_fn = pretrained_tokenizer.tokenize
if opt.shard_filename:
input_jsonl_path = os.path.join(opt.json_dir, opt.shard_filename)
logger.info('Tokenizing dataset. Loaded data from jsonl: %s ' % (input_jsonl_path))
output_dir = os.path.join(opt.output_dir, opt.tokenizer, 'sharded_1000')
output_jsonl_path = os.path.join(output_dir, opt.shard_filename)
logger.info('Exporting tokenized data to %s' % output_jsonl_path)
else:
input_jsonl_path = os.path.join(opt.json_dir, '%s.json' % (opt.partition))
logger.info(
'Tokenizing dataset [%s]. Loaded data from jsonl: %s ' % (opt.partition, input_jsonl_path))
output_dir = os.path.join(opt.output_dir, opt.tokenizer, 'tokenized')
output_jsonl_path = os.path.join(output_dir, opt.partition + '7.json')
logger.info('Exporting tokenized data to %s' % output_jsonl_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
with open(output_jsonl_path, 'w') as output_jsonl_writer:
counter = 0
src_lengths = []
tgt_lengths = []
keyphrase_num = 0
keyphrase_num_roberta = 0
noun_num = 0
noun_roberta_num = 0
fw_error = open("/home/yingyi/Documents/output/kp20k/error.txt","w")
for line in tqdm.tqdm(open(input_jsonl_path, 'r', encoding='utf-8', errors='ignore'),
desc="Processing %s" % (
opt.partition if opt.partition else opt.shard_filename)):
counter += 1
if counter>460000: #40000, 80000, 140000, 270000, 320000, 460000
doc = json.loads(line)
word_doc = {}
roberta_doc = {}
doc['word'] = word_doc
doc[opt.tokenizer] = roberta_doc
title_tokens, abstract_tokens, all_tokens, keywords_tokens = tokenize_doc(doc)
#print(all_tokens)
pos_sentence = pos_judge(listToStr(all_tokens))
nounchunks, nouns, poss = recognise_nounchunks(pos_sentence)
sentence_stemmer = [stemmer.stem(word.lower().strip()) for | |
rolled back on :meth:`.Transaction.close`::
with session.begin_transaction() as tx:
pass
"""
#: When set, the transaction will be committed on close, otherwise it
#: will be rolled back. This attribute can be set in user code
#: multiple times before a transaction completes, with only the final
#: value taking effect.
success = None
_closed = False
def __init__(self, session, on_close):
self.session = session
self.on_close = on_close
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if self._closed:
return
if self.success is None:
self.success = not bool(exc_type)
self.close()
def run(self, statement, parameters=None, **kwparameters):
""" Run a Cypher statement within the context of this transaction.
The statement is sent to the server lazily, when its result is
consumed. To force the statement to be sent to the server, use
the :meth:`.Transaction.sync` method.
Cypher is typically expressed as a statement template plus a
set of named parameters. In Python, parameters may be expressed
through a dictionary of parameters, through individual parameter
arguments, or as a mixture of both. For example, the `run`
statements below are all equivalent::
>>> statement = "CREATE (a:Person {name:{name}, age:{age}})"
>>> tx.run(statement, {"name": "Alice", "age": 33})
>>> tx.run(statement, {"name": "Alice"}, age=33)
>>> tx.run(statement, name="Alice", age=33)
Parameter values can be of any type supported by the Neo4j type
system. In Python, this includes :class:`bool`, :class:`int`,
:class:`str`, :class:`list` and :class:`dict`. Note however that
:class:`list` properties must be homogenous.
:param statement: template Cypher statement
:param parameters: dictionary of parameters
:param kwparameters: additional keyword parameters
:returns: :class:`.StatementResult` object
:raise TransactionError: if the transaction is closed
"""
self._assert_open()
return self.session.run(statement, parameters, **kwparameters)
def sync(self):
""" Force any queued statements to be sent to the server and
all related results to be fetched and buffered.
:raise TransactionError: if the transaction is closed
"""
self._assert_open()
self.session.sync()
def commit(self):
""" Mark this transaction as successful and close in order to
trigger a COMMIT. This is functionally equivalent to::
tx.success = True
tx.close()
:raise TransactionError: if already closed
"""
self.success = True
self.close()
def rollback(self):
""" Mark this transaction as unsuccessful and close in order to
trigger a ROLLBACK. This is functionally equivalent to::
tx.success = False
tx.close()
:raise TransactionError: if already closed
"""
self.success = False
self.close()
def close(self):
""" Close this transaction, triggering either a COMMIT or a ROLLBACK,
depending on the value of :attr:`.success`.
:raise TransactionError: if already closed
"""
self._assert_open()
try:
self.sync()
except CypherError:
self.success = False
raise
finally:
if self.session.has_transaction():
if self.success:
self.session.commit_transaction()
else:
self.session.rollback_transaction()
self._closed = True
self.on_close()
def closed(self):
""" Indicator to show whether the transaction has been closed.
:returns: :const:`True` if closed, :const:`False` otherwise.
"""
return self._closed
def _assert_open(self):
if self._closed:
raise TransactionError("Transaction closed")
class Statement:
def __init__(self, text, metadata=None, timeout=None):
self.text = text
try:
self.metadata = metadata
except TypeError:
raise TypeError("Metadata must be coercible to a dict")
try:
self.timeout = timeout
except TypeError:
raise TypeError("Timeout must be specified as a number of seconds")
def __str__(self):
return str(self.text)
def fix_parameters(parameters):
if not parameters:
return {}
dehydrator = DataDehydrator()
try:
dehydrated, = dehydrator.dehydrate([parameters])
except TypeError as error:
value = error.args[0]
raise TypeError("Parameters of type {} are not supported".format(type(value).__name__))
else:
return dehydrated
class StatementResult:
""" A handler for the result of Cypher statement execution. Instances
of this class are typically constructed and returned by
:meth:`.Session.run` and :meth:`.Transaction.run`.
"""
def __init__(self, session, hydrant, metadata):
self._session = session
self._hydrant = hydrant
self._metadata = metadata
self._records = deque()
self._summary = None
def __iter__(self):
return self.records()
@property
def session(self):
""" The :class:`.Session` to which this result is attached, if any.
"""
return self._session
def attached(self):
""" Indicator for whether or not this result is still attached to
an open :class:`.Session`.
"""
return self._session and not self._session.closed()
def detach(self, sync=True):
""" Detach this result from its parent session by fetching the
remainder of this result from the network into the buffer.
:returns: number of records fetched
"""
if self.attached():
return self._session.detach(self, sync=sync)
else:
return 0
def keys(self):
""" The keys for the records in this result.
:returns: tuple of key names
"""
try:
return self._metadata["fields"]
except KeyError:
if self.attached():
self._session.send()
while self.attached() and "fields" not in self._metadata:
self._session.fetch()
return self._metadata.get("fields")
def records(self):
""" Generator for records obtained from this result.
:yields: iterable of :class:`.Record` objects
"""
records = self._records
next_record = records.popleft
while records:
yield next_record()
attached = self.attached
if attached():
self._session.send()
while attached():
self._session.fetch()
while records:
yield next_record()
def summary(self):
""" Obtain the summary of this result, buffering any remaining records.
:returns: The :class:`.ResultSummary` for this result
"""
self.detach()
if self._summary is None:
self._summary = BoltStatementResultSummary(**self._metadata)
return self._summary
def consume(self):
""" Consume the remainder of this result and return the summary.
:returns: The :class:`.ResultSummary` for this result
"""
if self.attached():
for _ in self:
pass
return self.summary()
def single(self):
""" Obtain the next and only remaining record from this result.
A warning is generated if more than one record is available but
the first of these is still returned.
:returns: the next :class:`.Record` or :const:`None` if none remain
:warns: if more than one record is available
"""
records = list(self)
size = len(records)
if size == 0:
return None
if size != 1:
warn("Expected a result with a single record, but this result contains %d" % size)
return records[0]
def peek(self):
""" Obtain the next record from this result without consuming it.
This leaves the record in the buffer for further processing.
:returns: the next :class:`.Record` or :const:`None` if none remain
"""
records = self._records
if records:
return records[0]
if not self.attached():
return None
if self.attached():
self._session.send()
while self.attached() and not records:
self._session.fetch()
if records:
return records[0]
return None
def graph(self):
""" Return a Graph instance containing all the graph objects
in the result. After calling this method, the result becomes
detached, buffering all remaining records.
:returns: result graph
"""
self.detach()
return self._hydrant.graph
class BoltStatementResult(StatementResult):
""" A handler for the result of Cypher statement execution.
"""
def __init__(self, session, hydrant, metadata):
super(BoltStatementResult, self).__init__(session, hydrant, metadata)
def value(self, item=0, default=None):
""" Return the remainder of the result as a list of values.
:param item: field to return for each remaining record
:param default: default value, used if the index of key is unavailable
:returns: list of individual values
"""
return [record.value(item, default) for record in self.records()]
def values(self, *items):
""" Return the remainder of the result as a list of tuples.
:param items: fields to return for each remaining record
:returns: list of value tuples
"""
return [record.values(*items) for record in self.records()]
def data(self, *items):
""" Return the remainder of the result as a list of dictionaries.
:param items: fields to return for each remaining record
:returns: list of dictionaries
"""
return [record.data(*items) for record in self.records()]
class BoltStatementResultSummary:
""" A summary of execution returned with a :class:`.StatementResult` object.
"""
#: The version of Bolt protocol over which this result was obtained.
protocol_version = None
#: The server on which this result was generated.
server = None
#: The statement that was executed to produce this result.
statement = None
#: Dictionary of parameters passed with the statement.
parameters = None
#: The type of statement (``'r'`` = read-only, ``'rw'`` = read/write).
statement_type = None
#: A set of statistical information held in a :class:`.Counters` instance.
counters = None
#: A :class:`.Plan` instance
plan = None
#: A :class:`.ProfiledPlan` instance
profile = None
#: The time it took for the server to have the result available.
result_available_after = None
#: The time it took for the server to consume the result.
result_consumed_after = None
#: Notifications provide extra information for a user executing a statement.
#: They can be warnings about problematic queries or other valuable information that can be
#: presented in a client.
#: Unlike failures or errors, notifications do not affect the execution of a statement.
notifications = None
def __init__(self, **metadata):
self.metadata = metadata
self.protocol_version = metadata.get("protocol_version")
self.server | |
<gh_stars>0
"""
CANNR TM analytics container building tool build functions.
Module that generates Web service code from source files.
Copyright 2020 <NAME> <EMAIL>
All rights reserved
Maintainer <NAME> <EMAIL>
"""
import cannrcore as cc
import os
import json
from pathlib import Path
from datetime import datetime
import shutil
# Returns a file in the lib directory as text
def getLibFile(filename):
libPathLen = __file__.rfind(os.path.sep)
libPath = os.path.sep
if libPathLen > 0:
libPath = __file__[:libPathLen]
with open(libPath + os.path.sep + filename, 'r') as libFile:
return libFile.read()
# Returns the Dockerfile template
def getDockerfile():
return getLibFile('Dockerfile')
# Generate a line of code given the indent and list of terms
def buildCodeLine(indent, content):
codeLine = indent*'\t'
for term in content:
codeLine += term
return codeLine + '\n'
# Builds the app.route line of the Flask handler.
def buildAppRoute(folderName, moduleName, serviceName, method, resourceNames):
appRouteLine = '@app.route("/services/' + folderName + '/' + moduleName + '/' + serviceName
for resourceName in resourceNames:
appRouteLine += '/<' + resourceName + '>'
appRouteLine += '", methods=["' + method + '"])'
return appRouteLine
# Generates the Python file for a Python folder.
def buildPyFolder(folderName, project):
# TODO: ADD ERROR HANDLING. LOG?
# Get the copyright/license notice for the project.
projectNotice = project.get("notice", None)
# Get the folder from the folder name.
folder = cc.getFolder(folderName, project)
if not folder:
return None
# Get all the module names.
moduleNames = cc.getModuleNames(folder)
if not moduleNames:
return None
# Start with empty module.
moduleText = ''
# Add the file header.
moduleText += buildCodeLine(0, ['"""'])
moduleText += buildCodeLine(0, ['CANNR TM analytics container building tool Python service script.'])
moduleText += buildCodeLine(0, ['Module that calls other modules to provide Web services.'])
moduleText += buildCodeLine(0, ['Copyright 2020 <NAME> <EMAIL>'])
moduleText += buildCodeLine(0, ['All rights reserved'])
moduleText += buildCodeLine(0, ['Maintainer <NAME> <EMAIL>'])
moduleText += buildCodeLine(0, ['"""'])
moduleText += buildCodeLine(0, [])
# TODO: NEED TO HANDLE CASE THAT THERE ARE LINE BREAKS IN THE NOTICE.
moduleText += buildCodeLine(0, ['"""'])
#if projectNotice:
# moduleText += buildCodeLine(0, [projectNotice])
moduleText += buildCodeLine(0, ['Generated ', datetime.now().isoformat(sep=' ', timespec='seconds')])
moduleText += buildCodeLine(0, ['"""'])
# Add basic imports that are always included.
moduleText += buildCodeLine(0, ['import json'])
moduleText += buildCodeLine(0, ['import os'])
moduleText += buildCodeLine(0, ['import sys'])
moduleText += buildCodeLine(0, ['import logging'])
moduleText += buildCodeLine(0, ['import uuid'])
moduleText += buildCodeLine(0, ['import pandas'])
moduleText += buildCodeLine(0, ['from flask import Flask, render_template, request, Response'])
# Import utilities.
moduleText += buildCodeLine(0, ['import cannrcore as cc'])
moduleText += buildCodeLine(0, ['import cannrio as ci'])
# Change to the folder home.
moduleText += '\n'
# Add paths to search for dependent modules.
# Loop through paths, add.
folderPath = '/folders/' + folderName
#relativePath = cc.getRelativePath(folder['sourcePath'].replace(os.path.sep, '/'))
paths = folder.get('paths', None)
if paths:
for path in paths:
moduleText += buildCodeLine(0, ['sys.path.append("', folderPath, '/', folderName, '/', path, '")'])
# Change to the folder home.
moduleText += '\n'
#moduleText += buildCodeLine(0, ['os.chdir("', cc.getHome(folderName, folder), '")'])
moduleText += buildCodeLine(0, ['os.chdir("', folderPath + '/' + folderName, '")'])
# Build imports of modules.
# Add the imports.
# Loop through modules, add import for each one.
moduleShortNames = {}
moduleNum = 1
for moduleName in moduleNames:
module = cc.getModule(moduleName, folder)
fileName = module.get('sourceFile', None)
moduleFileName = folderPath + '/' + folderName + '/' + fileName
moduleShortName = 'm_' + str(moduleNum)
moduleText += buildCodeLine(0, [moduleShortName, ' = ','cc.importPackage("', moduleShortName, '", "', moduleFileName, '")'])
moduleShortNames[moduleName] = moduleShortName
moduleNum += 1
# TODO: If no source file, error.
# TODO: CHECK FOR LEGAL MODULE NAME.
# Create the Flask app object.
moduleText += '\n'
moduleText += buildCodeLine(0, ['app = Flask(__name__)'])
moduleText += buildCodeLine(0, ['app.url_map.strict_slashes = False'])
moduleText += buildCodeLine(0, ['cnr__workerID = str(uuid.uuid4())'])
moduleText += buildCodeLine(0, ['cnr__credentials = None'])
moduleText += buildCodeLine(0, ['cnr__lastUpdateID = None'])
moduleText += '\n'
# Dispatcher to shut down the worker.
moduleText += buildCodeLine(0, ['# Shut down the worker'])
moduleText += buildCodeLine(0, ['@app.route("/shutdown/', folderName, '", methods=["POST"])'])
moduleText += buildCodeLine(0, ['def shutdown():'])
moduleText += buildCodeLine(1, ['shutdown.shutdown()'])
moduleText += buildCodeLine(1, ['return "Shutting down..."'])
# Build the wrappers.
functionNumber = 1
moduleNumber = 1
for moduleName in moduleNames:
module = cc.getModule(moduleName, folder)
serviceNames = cc.getServiceNames(module)
moduleText += '\n'
for serviceName in serviceNames:
service = cc.getService(serviceName, module)
capacity = service.get('capacity', 0)
method = service.get('method', 'POST')
resourceNames = service.get('resourceNames', [])
# TODO: CHECK TO MAKE SURE functionName EXISTS!
functionName = service.get('functionName', 'ERROR')
moduleText += buildCodeLine(0, ['# Service ', serviceName, ' in module ', moduleName])
#moduleText += buildCodeLine(0, ['@app.route("/services/', folderName, '/', moduleName, '/', serviceName, '", methods=["', method , '"])'])
moduleText += buildAppRoute(folderName, moduleName, serviceName, method, resourceNames) + '\n'
# TODO: IF resourceNames, ADD RESOURCE NAMES AS FUNCTION ARGUMENTS
resourceArgList = ''
resourceString = 'resources = {'
for resourceName in resourceNames:
resourceString += '"' + resourceName + '": ' + resourceName + ', '
resourceArgList += resourceName + ', '
resourceString += '}'
moduleText += buildCodeLine(0, ['def s_', str(functionNumber), '(', resourceArgList, '):'])
moduleText += buildCodeLine(1, ['try:'])
functionNumber += 1
# TODO: ADD METRICS.
# TODO: ADD LOGGING.
if resourceNames:
moduleText += buildCodeLine(2, resourceString)
# For POST, parse the body.
includeBody = service.get('includeBody', True)
if method == 'POST' and includeBody:
moduleText += buildCodeLine(2, ['inputObject = ci.toInputType(request, inputParseType="', service.get('inputParseType', 'none'), '")'])
# Add capacity check if appropriate
if capacity:
moduleText += buildCodeLine(2, ['if isinstance(inputObject, pandas.core.frame.DataFrame) and len(inputObject.index) > ', str(capacity),':'])
moduleText += buildCodeLine(3, ['return {"error": "Capacity exceeded"}'])
functionText = moduleShortNames[moduleName] + '.' + functionName
codeComponents = ['output = ', functionText, '(']
functionArgs = []
if resourceNames:
functionArgs.append('resources, ')
elif service.get('includeParams', False):
functionArgs.append('request.args.to_dict(), ')
if service.get('includeRequest', False):
functionArgs.append('request, ')
if method == 'POST' and includeBody:
functionArgs.append('inputObject')
codeComponents.extend(functionArgs)
codeComponents.append(')')
moduleText += buildCodeLine(2, codeComponents)
moduleText += buildCodeLine(2, ['return Response(ci.serviceOutput(output, "', service.get('outputParseType', 'default'), '"), ',
'content_type="application/json"',')'])
moduleText += buildCodeLine(1, ['except Exception as err:'])
#moduleText += buildCodeLine(2, ['return(\'{"error": "\' + str(err) + \'"}\')'])
moduleText += buildCodeLine(2, ['return {"error": str(err)}'])
moduleText += '\n'
# Stub for refreshing objects in the module
# TODO: IMPLEMENT THIS
moduleText += '\n'
moduleText += buildCodeLine(0, ['# Refresh objects in module ', moduleName])
moduleText += buildCodeLine(0, ['@app.route("/refreshObjects/', folderName, '/', moduleName, '", methods=["POST"])'])
moduleText += buildCodeLine(0, ['def refresh_', str(moduleNumber), '():'])
moduleText += buildCodeLine(1, ['# TODO: STUB - TO BE ADDED'])
moduleText += buildCodeLine(1, ['# TODO: PASS BACK cnr__workerID IN THE RESPONSE'])
moduleText += buildCodeLine(1, ['return({})'])
# Update credentials (e.g., for object store)
# TODO: IMPLEMENT THIS
moduleText += '\n'
moduleText += buildCodeLine(0, ['# Update credentials in module ', moduleName])
moduleText += buildCodeLine(0, ['@app.route("/updateCredentials/', folderName, '/', moduleName, '", methods=["POST"])'])
moduleText += buildCodeLine(0, ['def updateCred_', str(moduleNumber), '():'])
moduleText += buildCodeLine(1, ['parsedBody = json.loads(request.get_json())'])
moduleText += buildCodeLine(1, ['updateID = parsedBody.get("updateID", None)'])
moduleText += buildCodeLine(1, ['if updateID and updateID != cnr__lastUpdateID:'])
moduleText += buildCodeLine(2, ['cnr__lastUpdateID = updateID'])
moduleText += buildCodeLine(2, [''])
moduleText += buildCodeLine(1, ['return({"workerID": cnr__workerID})'])
moduleText += '\n'
moduleNumber += 1
moduleText += '\n'
moduleText += buildCodeLine(0, ['# Run the app.'])
moduleText += buildCodeLine(0, ['if __name__ == "__main__":'])
moduleText += buildCodeLine(1, ['app.run(host="0.0.0.0", port=int(sys.argv[1]))'])
return moduleText
# Generates the R file for an R module.
def buildRModuleEpilogue(folderName, moduleName, project):
# TODO: ADD ERROR HANDLING. LOG?
# Get the copyright/license notice for the project.
projectNotice = project.get("notice", None)
# Get the folder from the folder name.
folder = cc.getFolder(folderName, project)
if not folder:
return None
# Get all the module names.
module = cc.getModule(moduleName, folder)
if not module:
return None
# Start with empty module.
moduleText = ''
# Add the file header.
# Add the file header.
moduleText += buildCodeLine(0, ['#'*80])
moduleText += buildCodeLine(0, ['# ', 'CANNR TM analytics container building tool R service script.'])
moduleText += buildCodeLine(0, ['# ', 'Wrapper module that provides Web services.'])
moduleText += buildCodeLine(0, ['# ', 'Copyright 2020 <NAME> <EMAIL>'])
moduleText += buildCodeLine(0, ['# ', 'All rights reserved'])
moduleText += buildCodeLine(0, ['# ', 'Maintainer <NAME> <EMAIL>'])
moduleText += buildCodeLine(0, ['#'*80])
moduleText += buildCodeLine(0, [])
moduleText += buildCodeLine(0, ['#'*80])
# TODO: NEED TO HANDLE CASE THAT THERE ARE LINE BREAKS IN THE NOTICE.
#if projectNotice:
# moduleText += buildCodeLine(0, ['# ', projectNotice])
| |
<gh_stars>0
r"""
Clifford Algebras
AUTHORS:
- <NAME> (2013-09-06): Initial version
"""
#*****************************************************************************
# Copyright (C) 2013 <NAME> <tscrim at ucdavis.edu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
from six import iteritems
from sage.misc.six import with_metaclass
from sage.misc.cachefunc import cached_method
from sage.structure.unique_representation import UniqueRepresentation
from copy import copy
from sage.categories.algebras_with_basis import AlgebrasWithBasis
from sage.categories.hopf_algebras_with_basis import HopfAlgebrasWithBasis
from sage.modules.with_basis.morphism import ModuleMorphismByLinearity
from sage.categories.poor_man_map import PoorManMap
from sage.rings.all import ZZ
from sage.modules.free_module import FreeModule, FreeModule_generic
from sage.matrix.constructor import Matrix
from sage.matrix.args import MatrixArgs
from sage.sets.family import Family
from sage.combinat.free_module import CombinatorialFreeModule
from sage.combinat.subset import SubsetsSorted
from sage.quadratic_forms.quadratic_form import QuadraticForm
from sage.algebras.weyl_algebra import repr_from_monomials
from sage.misc.inherit_comparison import InheritComparisonClasscallMetaclass
class CliffordAlgebraElement(CombinatorialFreeModule.Element):
"""
An element in a Clifford algebra.
TESTS::
sage: Q = QuadraticForm(ZZ, 3, [1, 2, 3, 4, 5, 6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: elt = ((x^3-z)*x + y)^2
sage: TestSuite(elt).run()
"""
def _repr_(self):
"""
Return a string representation of ``self``.
TESTS::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: ((x^3-z)*x + y)^2
-2*x*y*z - x*z + 5*x - 4*y + 2*z + 2
sage: Cl.zero()
0
"""
return repr_from_monomials(self.list(), self.parent()._repr_term)
def _latex_(self):
r"""
Return a `\LaTeX` representation of ``self``.
TESTS::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: latex( ((x^3-z)*x + y)^2 )
-2 x y z - x z + 5 x - 4 y + 2 z + 2
sage: Cl.<x0,x1,x2> = CliffordAlgebra(Q)
sage: latex( (x1 - x2)*x0 + 5*x0*x1*x2 )
5 x_{0} x_{1} x_{2} - x_{0} x_{1} + x_{0} x_{2} - 1
"""
return repr_from_monomials(self.list(), self.parent()._latex_term, True)
def _mul_(self, other):
"""
Return ``self`` multiplied by ``other``.
INPUT:
- ``other`` -- element of the same Clifford algebra as ``self``
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: (x^3 - z*y)*x*(y*z + x*y*z)
x*y*z + y*z - 24*x + 12*y + 2*z - 24
sage: y*x
-x*y + 2
sage: z*x
-x*z + 3
sage: z*z
6
sage: x*0
0
sage: 0*x
0
"""
Q = self.parent()._quadratic_form
zero = self.parent().base_ring().zero()
d = {}
for ml,cl in self:
# Distribute the current term ``cl`` * ``ml`` over ``other``.
cur = copy(other._monomial_coefficients) # The current distribution of the term
for i in reversed(ml):
# Distribute the current factor ``e[i]`` (the ``i``-th
# element of the standard basis).
next = {}
# At the end of the following for-loop, ``next`` will be
# the dictionary describing the element
# ``e[i]`` * (the element described by the dictionary ``cur``)
# (where ``e[i]`` is the ``i``-th standard basis vector).
for mr,cr in iteritems(cur):
# Commute the factor as necessary until we are in order
pos = 0
for j in mr:
if i <= j:
break
# Add the additional term from the commutation
t = list(mr)
t.pop(pos)
t = tuple(t)
next[t] = next.get(t, zero) + cr * Q[i,j]
# Note: ``Q[i,j] == Q(e[i]+e[j]) - Q(e[i]) - Q(e[j])`` for
# ``i != j``, where ``e[k]`` is the ``k``-th standard
# basis vector.
cr = -cr
if next[t] == zero:
del next[t]
pos += 1
# Check to see if we have a squared term or not
t = list(mr)
if i in t:
t.remove(i)
cr *= Q[i,i]
# Note: ``Q[i,i] == Q(e[i])`` where ``e[i]`` is the
# ``i``-th standard basis vector.
else:
t.insert(pos, i)
# Note that ``t`` is now sorted.
t = tuple(t)
next[t] = next.get(t, zero) + cr
if next[t] == zero:
del next[t]
cur = next
# Add the distributed terms to the total
for index,coeff in iteritems(cur):
d[index] = d.get(index, zero) + cl * coeff
if d[index] == zero:
del d[index]
return self.__class__(self.parent(), d)
def list(self):
"""
Return the list of monomials and their coefficients in ``self``
(as a list of `2`-tuples, each of which has the form
``(monomial, coefficient)``).
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: elt = 5*x + y
sage: elt.list()
[((0,), 5), ((1,), 1)]
"""
return sorted(self._monomial_coefficients.items(), key=lambda m_c : (-len(m_c[0]), m_c[0]))
def support(self):
"""
Return the support of ``self``.
This is the list of all monomials which appear with nonzero
coefficient in ``self``.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: elt = 5*x + y
sage: elt.support()
[(0,), (1,)]
"""
return sorted(self._monomial_coefficients.keys(), key=lambda x: (-len(x), x))
def reflection(self):
r"""
Return the image of the reflection automorphism on ``self``.
The *reflection automorphism* of a Clifford algebra is defined
as the linear endomorphism of this algebra which maps
.. MATH::
x_1 \wedge x_2 \wedge \cdots \wedge x_m \mapsto
(-1)^m x_1 \wedge x_2 \wedge \cdots \wedge x_m.
It is an algebra automorphism of the Clifford algebra.
:meth:`degree_negation` is an alias for :meth:`reflection`.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: elt = 5*x + y + x*z
sage: r = elt.reflection(); r
x*z - 5*x - y
sage: r.reflection() == elt
True
TESTS:
We check that the reflection is an involution::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: all(x.reflection().reflection() == x for x in Cl.basis())
True
"""
return self.__class__(self.parent(), {m: (-1)**len(m) * c for m,c in self})
degree_negation = reflection
def transpose(self):
r"""
Return the transpose of ``self``.
The transpose is an anti-algebra involution of a Clifford algebra
and is defined (using linearity) by
.. MATH::
x_1 \wedge x_2 \wedge \cdots \wedge x_m \mapsto
x_m \wedge \cdots \wedge x_2 \wedge x_1.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: elt = 5*x + y + x*z
sage: t = elt.transpose(); t
-x*z + 5*x + y + 3
sage: t.transpose() == elt
True
sage: Cl.one().transpose()
1
TESTS:
We check that the transpose is an involution::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: all(x.transpose().transpose() == x for x in Cl.basis())
True
Zero is sent to zero::
sage: Cl.zero().transpose() == Cl.zero()
True
"""
P = self.parent()
if not self._monomial_coefficients:
return P.zero()
g = P.gens()
return P.sum(c * P.prod(g[i] for i in reversed(m)) for m,c in self)
def conjugate(self):
r"""
Return the Clifford conjugate of ``self``.
The Clifford conjugate of an element `x` of a Clifford algebra is
defined as
.. MATH::
\bar{x} := \alpha(x^t) = \alpha(x)^t
where `\alpha` denotes the :meth:`reflection <reflection>`
automorphism and `t` the :meth:`transposition <transpose>`.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: elt = 5*x + y + x*z
sage: c = elt.conjugate(); c
-x*z - 5*x - y + 3
sage: c.conjugate() == elt
True
TESTS:
We check that the conjugate is an involution::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: all(x.conjugate().conjugate() == x for x in Cl.basis())
True
"""
return self.reflection().transpose()
clifford_conjugate = conjugate
# TODO: This is a general function which should be moved to a
# superalgebras category when one is implemented.
def supercommutator(self, x):
r"""
Return the supercommutator of ``self`` and ``x``.
Let `A` be a superalgebra. The *supercommutator* of homogeneous
elements `x, y \in A` is defined by
.. MATH::
[x, y\} = x y - (-1)^{|x| |y|} y x
and extended to all elements by linearity.
EXAMPLES::
sage: Q = QuadraticForm(ZZ, 3, [1,2,3,4,5,6])
sage: Cl.<x,y,z> = CliffordAlgebra(Q)
sage: a = x*y - z
sage: b = x - y + y*z
sage: a.supercommutator(b)
-5*x*y + 8*x*z - 2*y*z - 6*x + 12*y - 5*z
sage: a.supercommutator(Cl.one())
0
sage: Cl.one().supercommutator(a)
0
sage: Cl.zero().supercommutator(a)
0
sage: a.supercommutator(Cl.zero())
0
sage: Q = QuadraticForm(ZZ, 2, [-1,1,-3])
sage: Cl.<x,y> = CliffordAlgebra(Q)
sage: [a.supercommutator(b) for a in Cl.basis() for b in Cl.basis()]
[0, 0, 0, 0, 0, -2, 1, -x - 2*y, 0, 1,
-6, 6*x + y, 0, x + 2*y, -6*x - y, 0]
sage: [a*b-b*a for a in Cl.basis() for b in Cl.basis()]
[0, 0, 0, 0, 0, 0, 2*x*y - 1, -x | |
"""
@author: <NAME> <<EMAIL>>
Copyright 2013 IBM Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# This represents the low layer message framing portion of IPMI
import atexit
from collections import deque
from hashlib import md5
import os
from random import random
import select
import socket
from struct import pack, unpack
from time import time
from Crypto.Cipher import AES
from Crypto.Hash import HMAC, SHA
from ipmi.private.constants import payload_types, ipmi_completion_codes, command_completion_codes, payload_types, rmcp_codes
initialtimeout = 0.5 #minimum timeout for first packet to retry in any given
#session. This will be randomized to stagger out retries
#in case of congestion
def _aespad(data): # ipmi demands a certain pad scheme, per table 13-20 AES-CBC
# encrypted payload fields
newdata=list(data)
currlen=len(data)+1 #need to count the pad length field as well
neededpad=currlen%16
if neededpad: #if it happens to be zero, hurray, but otherwise invert the
#sense of the padding
neededpad = 16-neededpad
padval=1
while padval <= neededpad:
newdata.append(padval)
padval+=1
newdata.append(neededpad)
return newdata
'''
In order to simplify things, in a number of places there is a callback facility and optional arguments to pass in.
An OO oriented caller may find the additional argument needless. Allow them to ignore it by skipping the argument if None
'''
def call_with_optional_args(callback,*args):
newargs=[]
for arg in args:
if arg is not None:
newargs.append(arg)
callback(*newargs)
def get_ipmi_error(response,suffix=""):
if 'error' in response:
return response['error']+suffix
code = response['code']
command = response['command']
netfn = response['netfn']
if code == 0:
return False
if ((netfn,command) in command_completion_codes and
code in command_completion_codes[(netfn,command)]):
return command_completion_codes[(netfn,command)][code]+suffix
elif code in ipmi_completion_codes:
return ipmi_completion_codes[code]+suffix
else:
return "Unknown code "+code+" encountered"
class Session:
poller=select.poll()
bmc_handlers={}
waiting_sessions={}
peeraddr_to_nodes={}
#Upon exit of python, make sure we play nice with BMCs by assuring closed
#sessions for all that we tracked
@classmethod
def _cleanup(cls):
for session in cls.bmc_handlers.itervalues():
session.logout()
@classmethod
def _createsocket(cls):
atexit.register(cls._cleanup)
cls.socket = socket.socket(socket.AF_INET6,socket.SOCK_DGRAM) #INET6
#can do IPv4 if you are nice to it
try: #we will try to fixup our receive buffer size if we are smaller
#than allowed.
maxmf = open("/proc/sys/net/core/rmem_max")
rmemmax = int(maxmf.read())
rmemmax = rmemmax/2
curmax=cls.socket.getsockopt(socket.SOL_SOCKET,socket.SO_RCVBUF)
curmax = curmax/2
if (rmemmax > curmax):
cls.socket.setsockopt(socket.SOL_SOCKET,
socket.SO_RCVBUF,
rmemmax)
except:
pass
curmax=cls.socket.getsockopt(socket.SOL_SOCKET,socket.SO_RCVBUF)
cls.poller.register(cls.socket,select.POLLIN)
curmax = curmax/2
#we throttle such that we never have no more outstanding packets than
#our receive buffer should be able to handle
cls.pending=0
cls.maxpending=curmax/1000 #pessimistically assume 1 kilobyte messages, #way larger than almost all ipmi datagrams
#for faster performance, sysadmins may want to examine and tune
#/proc/sys/net/core/rmem_max up. This allows the module to request
#more, but does not increase buffers for applications that do less
#creative things
#TODO(jbjohnso): perhaps spread sessions across a socket pool when
#rmem_max is small, still get ~65/socket, but avoid long queues that
#might happen with low rmem_max and putting thousands of nodes in line
'''
This function handles the synchronous caller case in liue of a client
provided callback
'''
def _sync_login(self,response):
if 'error' in response:
raise Exception(response['error'])
def __init__(self,
bmc,
userid,
password,
port=623,
kg=None,
onlogon=None,
onlogonargs=None):
self.bmc=bmc
self.userid=userid
self.password=password
self.noretry=False
self.nowait=False
if kg is not None:
self.kg=kg
else:
self.kg=password
self.port=port
self.onlogonargs=onlogonargs
if (onlogon is None):
self.async=False
self.onlogon=self._sync_login
else:
self.async=True
self.onlogon=onlogon
if not hasattr(Session,'socket'):
self._createsocket()
self.login()
if not self.async:
while not self.logged:
Session.wait_for_rsp()
def _initsession(self):
self.localsid=2017673555 #this number can be whatever we want. I picked
#'xCAT' minus 1 so that a hexdump of packet
# would show xCAT
self.privlevel=4 #for the moment, assume admin access
#TODO(jbjohnso): make flexible
self.confalgo=0
self.aeskey=None
self.integrityalgo=0
self.k1=None
self.rmcptag=1
self.ipmicallback=None
self.ipmicallbackargs=None
self.sessioncontext=None
self.sequencenumber=0
self.sessionid=0
self.authtype=0
self.ipmiversion=1.5
self.timeout=initialtimeout+(0.5*random())
self.seqlun=0
self.rqaddr=0x81 #per IPMI table 5-4, software ids in the ipmi spec may
#be 0x81 through 0x8d. We'll stick with 0x81 for now,
#do not forsee a reason to adjust
self.logged=0
self.sockaddr=None #when we confirm a working sockaddr, put it here to
#skip getaddrinfo
self.tabooseq={} #this tracks netfn,command,seqlun combinations that
#were retried so that we don't loop around and reuse
#the same request data and cause potential ambiguity
#in return
self.ipmi15only=0 #default to supporting ipmi 2.0. Strictly by spec,
#this should gracefully be backwards compat, but some
#1.5 implementations checked reserved bits
def _checksum(self,*data): #Two's complement over the data
csum=sum(data)
csum=csum^0xff
csum+=1
csum &= 0xff
return csum
'''
This function generates the core ipmi payload that would be applicable
for any channel (including KCS)
'''
def _make_ipmi_payload(self,netfn,command,data=()):
self.expectedcmd=command
self.expectednetfn=netfn+1 #in ipmi, the response netfn is always one
#higher than the request payload, we assume
#we are always the requestor for now
seqincrement=7 #IPMI spec forbids gaps bigger then 7 in seq number.
#Risk the taboo rather than violate the rules
while ((netfn,command,self.seqlun) in self.tabooseq and
self.tabooseq[(netfn,command,self.seqlun)] and seqincrement):
self.tabooseq[(self.expectednetfn,command,self.seqlun)]-=1
#Allow taboo to eventually expire after a few rounds
self.seqlun += 4 #the last two bits are lun, so add 4 to add 1
self.seqlun &= 0xff #we only have one byte, wrap when exceeded
seqincrement-=1
header=[0x20,netfn<<2] #figure 13-4, first two bytes are rsaddr and
#netfn, rsaddr is always 0x20 since we are
#addressing BMC
reqbody=[self.rqaddr,self.seqlun,command]+list(data)
headsum=self._checksum(*header)
bodysum=self._checksum(*reqbody)
payload=header+[headsum]+reqbody+[bodysum]
return payload
def _generic_callback(self,response):
errorstr = get_ipmi_error(response)
if errorstr:
response['error']=errorstr
self.lastresponse=response
def raw_command(self,
netfn,
command,
data=[],
callback=None,
callback_args=None):
self.ipmicallbackargs=callback_args
if callback is None:
self.lastresponse=None
self.ipmicallback=self._generic_callback
else:
self.ipmicallback=callback
self._send_ipmi_net_payload(netfn,command,data)
if callback is None:
while self.lastresponse is None:
Session.wait_for_rsp()
return self.lastresponse
def _send_ipmi_net_payload(self,netfn,command,data):
ipmipayload=self._make_ipmi_payload(netfn,command,data)
payload_type = payload_types['ipmi']
if self.integrityalgo:
payload_type |= 0b01000000
if self.confalgo:
payload_type |= 0b10000000
self._pack_payload(payload=ipmipayload,payload_type=payload_type)
def _pack_payload(self,payload=None,payload_type=None):
if payload is None:
payload=self.lastpayload
if payload_type is None:
payload_type=self.last_payload_type
message = [0x6,0,0xff,0x07] #constant RMCP header for IPMI
baretype = payload_type & 0b00111111
self.lastpayload=payload
self.last_payload_type=payload_type
message.append(self.authtype)
if (self.ipmiversion == 2.0):
message.append(payload_type)
if (baretype == 2):
raise Exception("TODO(jbjohnso): OEM Payloads")
elif (baretype == 1):
raise Exception("TODO(jbjohnso): SOL Payload")
elif baretype not in payload_types.values():
raise Exception("Unrecognized payload type %d"%baretype)
message += unpack("!4B",pack("<I",self.sessionid))
message += unpack("!4B",pack("<I",self.sequencenumber))
if (self.ipmiversion == 1.5):
message += unpack("!4B",pack("<I",self.sessionid))
if not self.authtype == 0:
message += self._ipmi15authcode(payload)
message.append(len(payload))
message += payload
totlen=34+len(message) #Guessing the ipmi spec means the whole
#packet and assume no tag in old 1.5 world
if (totlen in (56,84,112,128,156)):
message.append(0) #Legacy pad as mandated by ipmi spec
elif self.ipmiversion == 2.0:
psize = len(payload)
if self.confalgo:
pad = (psize+1)%16 #pad has to account for one byte field as in
#the _aespad function
if pad: #if no pad needed, then we take no more action
pad = 16-pad
newpsize=psize+pad+17 #new payload size grew according to pad
#size, plus pad length, plus 16 byte IV
#(Table 13-20)
message.append(newpsize&0xff)
message.append(newpsize>>8);
iv=os.urandom(16)
message += list(unpack("16B",iv))
payloadtocrypt=_aespad(payload)
crypter = AES.new(self.aeskey,AES.MODE_CBC,iv)
crypted = crypter.encrypt(pack("%dB"%len(payloadtocrypt),
*payloadtocrypt))
crypted = list(unpack("%dB"%len(crypted),crypted))
message += crypted
else: #no confidetiality algorithm
message.append(psize&0xff)
message.append(psize>>8);
message += list(payload)
if self.integrityalgo: #see table 13-8,
#RMCP+ packet format
#TODO(jbjohnso): SHA256 which is now allowed
neededpad=(len(message)-2)%4
if neededpad:
neededpad = 4-neededpad
message += [0xff]*neededpad
message.append(neededpad)
message.append(7) #reserved, 7 is the required value for the
#specification followed
integdata = message[4:]
authcode = HMAC.new(self.k1,
pack("%dB"%len(integdata),
*integdata),
SHA).digest()[:12] #SHA1-96
#per RFC2404 truncates to 96 bits
message += unpack("12B",authcode)
self.netpacket = pack("!%dB"%len(message),*message)
self._xmit_packet()
def _ipmi15authcode(self,payload,checkremotecode=False):
if self.authtype == 0: #Only for things prior to auth in ipmi 1.5, not
#like 2.0 cipher suite 0
return ()
password = self.password
padneeded = 16 - len(password)
if padneeded < 0:
raise Exception("Password is too long for ipmi 1.5")
password += '\<PASSWORD>'*padneeded
passdata = unpack("16B",password)
if checkremotecode:
seqbytes = unpack("!4B",pack("<I",self.remsequencenumber))
else:
seqbytes = unpack("!4B",pack("<I",self.sequencenumber))
sessdata = unpack("!4B",pack("<I",self.sessionid))
bodydata = passdata + sessdata + tuple(payload) + seqbytes + passdata
dgst = md5(pack("%dB"%len(bodydata),*bodydata)).digest()
hashdata = unpack("!%dB"%len(dgst),dgst)
| |
#!/usr/bin/env python
#
# base.py - The Action and ToggleAction classes.
#
# Author: <NAME> <<EMAIL>>
#
"""This module provides the :class:`Action`, :class:`NeedOverlayAction`, and
:class:`ToggleAction` classes. See the :mod:`.actions` package documentation
for more details.
"""
import logging
import fsl.data.image as fslimage
import fsleyes_props as props
import fsleyes_widgets as fwidgets
log = logging.getLogger(__name__)
class ActionDisabledError(Exception):
"""Exception raised when an attempt is made to call a disabled
:class:`Action`.
"""
class BoundWidget(object):
"""Container class used by :class:`Action` instances to store references
to widgets that are currently bound to them.
"""
def __init__(self, parent, evType, widget):
import wx
self.parent = parent
self.evType = evType
self.widget = widget
# Under OSX, if a wx.Menu is destroyed,
# and then the GetMenu method of a
# contained wx.MenuItem is called, a
# segfault will occur. So let's get a
# reference now.
if isinstance(widget, wx.MenuItem):
self.menu = widget.GetMenu()
else:
self.menu = None
def isAlive(self):
"""Returns ``True`` if the widget contained by this ``BoundWidget`` is
still alive, ``False`` otherwise.
"""
import wx
if not fwidgets.isalive(self.parent):
return False
if isinstance(self.widget, wx.MenuItem):
return fwidgets.isalive(self.menu)
else:
return fwidgets.isalive(self.widget)
class Action(props.HasProperties):
"""Represents an action of some sort. """
enabled = props.Boolean(default=True)
"""Controls whether the action is currently enabled or disabled.
When this property is ``False`` calls to the action will
result in a :exc:`ActionDisabledError`.
"""
@staticmethod
def title():
"""May be overridden by sub-classes. Returns a title to be used
in menus.
"""
return None
@staticmethod
def supportedViews():
"""May be overridden to declare that this Action should be associated
with a specific :class:`.ViewPanel`. If overridden, must return a
list containing all of the supported ``ViewPanel`` types.
"""
return None
@staticmethod
def ignoreTool():
"""Used by the FSLeyes :mod:`.plugins` module for actions which are
loaded as plugins. Can be used to tell the ``plugins`` module that
a particular ``Action`` should not be added as an option to the
FSLeyes Tools menu.
Note that this method must be implemented on the class that is to
be ignored - inherited implementations from base classes are not
considered.
"""
return False
def __init__(self,
overlayList,
displayCtx,
func,
name=None):
"""Create an ``Action``.
:arg overlayList: The :class:`.OverlayList`.
:arg displayCtx: The :class:`.DisplayContext` associated with this
``Action``; note that this is not necessarily the
master :class:`.DisplayContext`.
:arg func: The action function.
:arg name: Action name. Defaults to ``func.__name__``.
.. note:: If an ``Action`` encapsulates a method of an
:class:`.ActionProvider` instance, it is assumed that the
``name`` is the name of the method on the instance.
"""
if name is None:
name = func.__name__
self.__overlayList = overlayList
self.__displayCtx = displayCtx
self.__func = func
self.__name = '{}_{}'.format(type(self).__name__, id(self))
self.__actionName = name
self.__destroyed = False
self.__boundWidgets = []
self.addListener('enabled',
'Action_{}_internal'.format(id(self)),
self.__enabledChanged)
def __str__(self):
"""Returns a string representation of this ``Action``. """
return '{}({})'.format(type(self).__name__, self.__name)
def __repr__(self):
"""Returns a string representation of this ``Action``. """
return self.__str__()
@property
def actionName(self):
"""Returns the name of this ``Action``, often the method name of the
:class:`.ActionProvider` that implements the action. Not to be
confused with :meth:`name`.
"""
return self.__actionName
@property
def name(self):
"""Not to be confused with :meth:`actionName`.
Returns a unique name for a specific ``Action`` instance, which
can be used (e.g.) for registering property listeners.
"""
return self.__name
@property
def overlayList(self):
"""Return a reference to the :class:`.OverlayList`. """
return self.__overlayList
@property
def displayCtx(self):
"""Return a reference to the :class:`.DisplayContext`. """
return self.__displayCtx
def __call__(self, *args, **kwargs):
"""Calls this action. An :exc:`ActionDisabledError` will be raised
if :attr:`enabled` is ``False``.
"""
if not self.enabled:
raise ActionDisabledError('Action {} is disabled'.format(
self.__name))
log.debug('Action %s called', self.__name)
return self.__func(*args, **kwargs)
@property
def destroyed(self):
"""Returns ``True`` if :meth:`destroy` has been called, ``False``
otherwise.
"""
return self.__destroyed
def destroy(self):
"""Must be called when this ``Action`` is no longer needed. """
self.unbindAllWidgets()
self.__destroyed = True
self.__overlayList = None
self.__displayCtx = None
self.__func = None
def bindToWidget(self, parent, evType, widget, wrapper=None):
"""Binds this action to the given :mod:`wx` widget.
:arg parent: The :mod:`wx` object on which the event should be bound.
:arg evType: The :mod:`wx` event type.
:arg widget: The :mod:`wx` widget.
:arg wrapper: Optional custom wrapper function used to execute the
action.
"""
if wrapper is None:
def wrapper(ev):
self()
parent.Bind(evType, wrapper, widget)
widget.Enable(self.enabled)
self.__boundWidgets.append(BoundWidget(parent, evType, widget))
def unbindWidget(self, widget):
"""Unbinds the given widget from this ``Action``. """
# Figure out the index into __boundWidgets,
# as we need this to pass to __unbindWidget,
# which does the real work.
index = -1
for i, bw in enumerate(self.__boundWidgets):
if bw.widget == widget:
index = i
break
if index == -1:
raise ValueError('Widget {} [{}] is not bound'
.format(type(widget).__name__, id(widget)))
self.__unbindWidget( index)
self.__boundWidgets.pop(index)
def __unbindWidget(self, index):
"""Unbinds the widget at the specified index into the
``__boundWidgets`` list. Does not remove it from the list.
"""
bw = self.__boundWidgets[index]
# Only attempt to unbind if the parent
# and widget have not been destroyed
if bw.isAlive():
bw.parent.Unbind(bw.evType, source=bw.widget)
def unbindAllWidgets(self):
"""Unbinds all widgets which have been bound via :meth:`bindToWidget`.
"""
for i in range(len(self.__boundWidgets)):
self.__unbindWidget(i)
self.__boundWidgets = []
def getBoundWidgets(self):
"""Returns a list of :class:`BoundWidget` instances, containing all
widgets which have been bound to this ``Action``.
"""
return list(self.__boundWidgets)
def __enabledChanged(self, *args):
"""Internal method which is called when the :attr:`enabled` property
changes. Enables/disables any bound widgets.
"""
for bw in self.__boundWidgets:
# The widget may have been destroyed,
# so check before trying to access it
if bw.isAlive(): bw.widget.Enable(self.enabled)
else: self.unbindWidget(bw.widget)
class ToggleAction(Action):
"""A ``ToggleAction`` an ``Action`` which is intended to encapsulate
actions that toggle some sort of state. For example, a ``ToggleAction``
could be used to encapsulate an action which opens and/or closes a dialog
window.
"""
toggled = props.Boolean(default=False)
"""Boolean which tracks the current state of the ``ToggleAction``. """
def __init__(self, *args, **kwargs):
"""Create a ``ToggleAction``.
:arg autoToggle: Must be specified as a keyword argument. If ``True``
(the default), the state of ``toggled`` is inverted
every time this action is called. Otherwise, the
state of ``toggled``, and of all bound widgets/menu
items, needs to be changed manually.
All other arguments are passed to :meth:`Action.__init__`.
"""
autoToggle = kwargs.pop('autoToggle', True)
Action.__init__(self, *args, **kwargs)
self.__autoToggle = autoToggle
self.addListener('toggled',
'ToggleAction_{}_internal'.format(id(self)),
self.__toggledChanged)
def __call__(self, *args, **kwargs):
"""Call this ``ToggleAction``. The value of the :attr:`toggled` property
is flipped.
"""
# Copy the toggled value before running
# the action, in case it gets inadvertently
# changed
toggled = self.toggled
result = Action.__call__(self, *args, **kwargs)
# Update self.toggled to align
# it with the widget state.
if self.__autoToggle:
self.toggled = not toggled
# Or update the widget state to
# align it with self.toggled
else:
self.__toggledChanged()
return result
def bindToWidget(self, parent, evType, widget, wrapper=None):
"""Bind this ``ToggleAction`` to a widget. If the widget is a
``wx.MenuItem``, its ``Check`` is called whenever the :attr:`toggled`
state changes.
"""
Action.bindToWidget(self, parent, evType, widget, wrapper)
self.__setState(widget)
def __setState(self, widget):
"""Sets the toggled state of the given widget to the current value of
:attr:`toggled`.
"""
import wx
import fsleyes_widgets.bitmaptoggle as bmptoggle
if isinstance(widget, wx.MenuItem):
widget.Check(self.toggled)
elif isinstance(widget, (wx.CheckBox,
wx.ToggleButton,
bmptoggle.BitmapToggleButton)):
widget.SetValue(self.toggled)
def __toggledChanged(self, *a):
"""Internal method called when :attr:`toggled` changes. Updates the
state of any bound widgets.
"""
for bw in list(self.getBoundWidgets()):
# An error will be raised if a widget
# has been destroyed, so we'll unbind
# any widgets which no longer exist.
try:
if not bw.isAlive():
raise Exception()
self.__setState(bw.widget)
except Exception:
self.unbindWidget(bw.widget)
class NeedOverlayAction(Action):
"""The ``NeedOverlayAction`` is a convenience base class for actions
which can only be executed when an overlay of a specific type is selected.
It enables/disables itself based on the type of the currently selected
overlay.
"""
def __init__(self,
overlayList,
displayCtx,
func=None,
overlayType=fslimage.Image):
"""Create a ``NeedOverlayAction``.
:arg overlayList: The :class:`.OverlayList`.
:arg displayCtx: The :class:`.DisplayContext`.
:arg func: The action function
:arg overlayType: The required overlay type (defaults to :class:`.Image`)
"""
Action.__init__(self, overlayList, displayCtx, func)
self.__overlayType = overlayType
self.__name = 'NeedOverlayAction_{}_{}'.format(
type(self).__name__, id(self))
displayCtx .addListener('selectedOverlay',
self.__name,
self.__selectedOverlayChanged)
overlayList.addListener('overlays',
self.__name,
self.__selectedOverlayChanged)
self.__selectedOverlayChanged()
def destroy(self):
"""Removes listeners from the :class:`.DisplayContext` and
:class:`.OverlayList`, and calls :meth:`.Action.destroy`.
"""
self.displayCtx .removeListener('selectedOverlay', self.__name)
self.overlayList.removeListener('overlays', self.__name)
Action.destroy(self)
def __selectedOverlayChanged(self, | |
#!/bin/env python
#
# File: RDKitPerformMinimization.py
# Author: <NAME> <<EMAIL>>
#
# Copyright (C) 2020 <NAME>. All rights reserved.
#
# The functionality available in this script is implemented using RDKit, an
# open source toolkit for cheminformatics developed by <NAME>.
#
# This file is part of MayaChemTools.
#
# MayaChemTools is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 3 of the License, or (at your option) any
# later version.
#
# MayaChemTools is distributed in the hope that it will be useful, but without
# any warranty; without even the implied warranty of merchantability of fitness
# for a particular purpose. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MayaChemTools; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation Inc., 59 Temple Place, Suite 330,
# Boston, MA, 02111-1307, USA.
#
from __future__ import print_function
# Add local python path to the global path and import standard library modules...
import os
import sys; sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), "..", "lib", "Python"))
import time
import re
import multiprocessing as mp
# RDKit imports...
try:
from rdkit import rdBase
from rdkit import Chem
from rdkit.Chem import AllChem
except ImportError as ErrMsg:
sys.stderr.write("\nFailed to import RDKit module/package: %s\n" % ErrMsg)
sys.stderr.write("Check/update your RDKit environment and try again.\n\n")
sys.exit(1)
# MayaChemTools imports...
try:
from docopt import docopt
import MiscUtil
import RDKitUtil
except ImportError as ErrMsg:
sys.stderr.write("\nFailed to import MayaChemTools module/package: %s\n" % ErrMsg)
sys.stderr.write("Check/update your MayaChemTools environment and try again.\n\n")
sys.exit(1)
ScriptName = os.path.basename(sys.argv[0])
Options = {}
OptionsInfo = {}
def main():
"""Start execution of the script"""
MiscUtil.PrintInfo("\n%s (RDK v%s; %s): Starting...\n" % (ScriptName, rdBase.rdkitVersion, time.asctime()))
(WallClockTime, ProcessorTime) = MiscUtil.GetWallClockAndProcessorTime()
# Retrieve command line arguments and options...
RetrieveOptions()
# Process and validate command line arguments and options...
ProcessOptions()
# Perform actions required by the script...
PerformMinimization()
MiscUtil.PrintInfo("\n%s: Done...\n" % ScriptName)
MiscUtil.PrintInfo("Total time: %s" % MiscUtil.GetFormattedElapsedTime(WallClockTime, ProcessorTime))
def PerformMinimization():
"""Perform minimization."""
# Setup a molecule reader...
MiscUtil.PrintInfo("\nProcessing file %s..." % OptionsInfo["Infile"])
Mols = RDKitUtil.ReadMolecules(OptionsInfo["Infile"], **OptionsInfo["InfileParams"])
# Set up a molecule writer...
Writer = RDKitUtil.MoleculesWriter(OptionsInfo["Outfile"], **OptionsInfo["OutfileParams"])
if Writer is None:
MiscUtil.PrintError("Failed to setup a writer for output fie %s " % OptionsInfo["Outfile"])
MiscUtil.PrintInfo("Generating file %s..." % OptionsInfo["Outfile"])
MolCount, ValidMolCount, MinimizationFailedCount, WriteFailedCount = ProcessMolecules(Mols, Writer)
if Writer is not None:
Writer.close()
MiscUtil.PrintInfo("\nTotal number of molecules: %d" % MolCount)
MiscUtil.PrintInfo("Number of valid molecules: %d" % ValidMolCount)
MiscUtil.PrintInfo("Number of molecules failed during conformation generation or minimization: %d" % MinimizationFailedCount)
MiscUtil.PrintInfo("Number of molecules failed during writing: %d" % WriteFailedCount)
MiscUtil.PrintInfo("Number of ignored molecules: %d" % (MolCount - ValidMolCount + MinimizationFailedCount + WriteFailedCount))
def ProcessMolecules(Mols, Writer):
"""Process and minimize molecules. """
if OptionsInfo["MPMode"]:
return ProcessMoleculesUsingMultipleProcesses(Mols, Writer)
else:
return ProcessMoleculesUsingSingleProcess(Mols, Writer)
def ProcessMoleculesUsingSingleProcess(Mols, Writer):
"""Process and minimize molecules using a single process."""
if OptionsInfo["SkipConformerGeneration"]:
MiscUtil.PrintInfo("\nPerforming minimization without generation of conformers...")
else:
MiscUtil.PrintInfo("\nPerforming minimization with generation of conformers...")
(MolCount, ValidMolCount, MinimizationFailedCount, WriteFailedCount) = [0] * 4
for Mol in Mols:
MolCount += 1
if Mol is None:
continue
if RDKitUtil.IsMolEmpty(Mol):
if not OptionsInfo["QuietMode"]:
MolName = RDKitUtil.GetMolName(Mol, MolCount)
MiscUtil.PrintWarning("Ignoring empty molecule: %s" % MolName)
continue
ValidMolCount += 1
Mol, CalcStatus, ConfID, Energy = MinimizeMoleculeOrConformers(Mol, MolCount)
if not CalcStatus:
MinimizationFailedCount += 1
continue
WriteStatus = WriteMolecule(Writer, Mol, MolCount, ConfID, Energy)
if not WriteStatus:
WriteFailedCount += 1
return (MolCount, ValidMolCount, MinimizationFailedCount, WriteFailedCount)
def ProcessMoleculesUsingMultipleProcesses(Mols, Writer):
"""Process and minimize molecules using multiprocessing."""
if OptionsInfo["SkipConformerGeneration"]:
MiscUtil.PrintInfo("\nPerforming minimization without generation of conformers using multiprocessing...")
else:
MiscUtil.PrintInfo("\nPerforming minimization with generation of conformers using multiprocessing...")
MPParams = OptionsInfo["MPParams"]
# Setup data for initializing a worker process...
InitializeWorkerProcessArgs = (MiscUtil.ObjectToBase64EncodedString(Options), MiscUtil.ObjectToBase64EncodedString(OptionsInfo))
# Setup a encoded mols data iterable for a worker process...
WorkerProcessDataIterable = RDKitUtil.GenerateBase64EncodedMolStrings(Mols)
# Setup process pool along with data initialization for each process...
MiscUtil.PrintInfo("\nConfiguring multiprocessing using %s method..." % ("mp.Pool.imap()" if re.match("^Lazy$", MPParams["InputDataMode"], re.I) else "mp.Pool.map()"))
MiscUtil.PrintInfo("NumProcesses: %s; InputDataMode: %s; ChunkSize: %s\n" % (MPParams["NumProcesses"], MPParams["InputDataMode"], ("automatic" if MPParams["ChunkSize"] is None else MPParams["ChunkSize"])))
ProcessPool = mp.Pool(MPParams["NumProcesses"], InitializeWorkerProcess, InitializeWorkerProcessArgs)
# Start processing...
if re.match("^Lazy$", MPParams["InputDataMode"], re.I):
Results = ProcessPool.imap(WorkerProcess, WorkerProcessDataIterable, MPParams["ChunkSize"])
elif re.match("^InMemory$", MPParams["InputDataMode"], re.I):
Results = ProcessPool.map(WorkerProcess, WorkerProcessDataIterable, MPParams["ChunkSize"])
else:
MiscUtil.PrintError("The value, %s, specified for \"--inputDataMode\" is not supported." % (MPParams["InputDataMode"]))
(MolCount, ValidMolCount, MinimizationFailedCount, WriteFailedCount) = [0] * 4
for Result in Results:
MolCount += 1
MolIndex, EncodedMol, CalcStatus, ConfID, Energy = Result
if EncodedMol is None:
continue
ValidMolCount += 1
if not CalcStatus:
MinimizationFailedCount += 1
continue
Mol = RDKitUtil.MolFromBase64EncodedMolString(EncodedMol)
WriteStatus = WriteMolecule(Writer, Mol, MolCount, ConfID, Energy)
if not WriteStatus:
WriteFailedCount += 1
return (MolCount, ValidMolCount, MinimizationFailedCount, WriteFailedCount)
def InitializeWorkerProcess(*EncodedArgs):
"""Initialize data for a worker process."""
global Options, OptionsInfo
MiscUtil.PrintInfo("Starting process (PID: %s)..." % os.getpid())
# Decode Options and OptionInfo...
Options = MiscUtil.ObjectFromBase64EncodedString(EncodedArgs[0])
OptionsInfo = MiscUtil.ObjectFromBase64EncodedString(EncodedArgs[1])
def WorkerProcess(EncodedMolInfo):
"""Process data for a worker process."""
MolIndex, EncodedMol = EncodedMolInfo
CalcStatus = False
ConfID = None
Energy = None
if EncodedMol is None:
return [MolIndex, None, CalcStatus, ConfID, Energy]
Mol = RDKitUtil.MolFromBase64EncodedMolString(EncodedMol)
if RDKitUtil.IsMolEmpty(Mol):
if not OptionsInfo["QuietMode"]:
MolName = RDKitUtil.GetMolName(Mol, (MolIndex + 1))
MiscUtil.PrintWarning("Ignoring empty molecule: %s" % MolName)
return [MolIndex, None, CalcStatus, ConfID, Energy]
Mol, CalcStatus, ConfID, Energy = MinimizeMoleculeOrConformers(Mol, (MolIndex + 1))
return [MolIndex, RDKitUtil.MolToBase64EncodedMolString(Mol, PropertyPickleFlags = Chem.PropertyPickleOptions.MolProps | Chem.PropertyPickleOptions.PrivateProps), CalcStatus, ConfID, Energy]
def MinimizeMoleculeOrConformers(Mol, MolNum = None):
"""Minimize molecule or conformers."""
ConfID = None
if OptionsInfo["SkipConformerGeneration"]:
Mol, CalcStatus, Energy = MinimizeMolecule(Mol, MolNum)
else:
Mol, CalcStatus, ConfID, Energy = MinimizeConformers(Mol, MolNum)
return (Mol, CalcStatus, ConfID, Energy)
def MinimizeMolecule(Mol, MolNum = None):
"Minimize molecule."
if OptionsInfo["AddHydrogens"]:
Mol = Chem.AddHs(Mol, addCoords = True)
Status = 0
try:
if OptionsInfo["UseUFF"]:
Status = AllChem.UFFOptimizeMolecule(Mol, maxIters = OptionsInfo["MaxIters"])
elif OptionsInfo["UseMMFF"]:
Status = AllChem.MMFFOptimizeMolecule(Mol, maxIters = OptionsInfo["MaxIters"], mmffVariant = OptionsInfo["MMFFVariant"])
else:
MiscUtil.PrintError("Minimization couldn't be performed: Specified forcefield, %s, is not supported" % OptionsInfo["ForceField"])
except (ValueError, RuntimeError, Chem.rdchem.KekulizeException) as ErrMsg:
if not OptionsInfo["QuietMode"]:
MolName = RDKitUtil.GetMolName(Mol, MolNum)
MiscUtil.PrintWarning("Minimization couldn't be performed for molecule %s:\n%s\n" % (MolName, ErrMsg))
return (Mol, False, None)
if Status != 0:
if not OptionsInfo["QuietMode"]:
MolName = RDKitUtil.GetMolName(Mol, MolNum)
MiscUtil.PrintWarning("Minimization failed to converge for molecule %s in %d steps. Try using higher value for \"--maxIters\" option...\n" % (MolName, OptionsInfo["MaxIters"]))
Energy = None
if OptionsInfo["EnergyOut"]:
EnergyStatus, Energy = GetEnergy(Mol)
if EnergyStatus:
Energy = "%.2f" % Energy
else:
if not OptionsInfo["QuietMode"]:
MolName = RDKitUtil.GetMolName(Mol, MolNum)
MiscUtil.PrintWarning("Failed to retrieve calculated energy for molecule %s. Try again after removing any salts or cleaing up the molecule...\n" % (MolName))
if OptionsInfo["RemoveHydrogens"]:
Mol = Chem.RemoveHs(Mol)
return (Mol, True, Energy)
def MinimizeConformers(Mol, MolNum = None):
"Generate and minimize conformers for a molecule to get the lowest energy conformer."
if OptionsInfo["AddHydrogens"]:
Mol = Chem.AddHs(Mol)
ConfIDs = EmbedMolecule(Mol, MolNum)
if not len(ConfIDs):
if not OptionsInfo["QuietMode"]:
MolName = RDKitUtil.GetMolName(Mol, MolNum)
MiscUtil.PrintWarning("Minimization couldn't be performed for molecule %s: Embedding failed...\n" % MolName)
return (Mol, False, None, None)
CalcEnergyMap = {}
for ConfID in ConfIDs:
try:
if OptionsInfo["UseUFF"]:
Status = AllChem.UFFOptimizeMolecule(Mol, confId = ConfID, maxIters = OptionsInfo["MaxIters"])
elif OptionsInfo["UseMMFF"]:
Status = AllChem.MMFFOptimizeMolecule(Mol, confId = ConfID, maxIters = OptionsInfo["MaxIters"], mmffVariant = OptionsInfo["MMFFVariant"])
else:
MiscUtil.PrintError("Minimization couldn't be performed: Specified forcefield, %s, is not supported" % OptionsInfo["ForceField"])
except (ValueError, RuntimeError, Chem.rdchem.KekulizeException) as ErrMsg:
if not OptionsInfo["QuietMode"]:
MolName = RDKitUtil.GetMolName(Mol, MolNum)
MiscUtil.PrintWarning("Minimization couldn't be performed for molecule %s:\n%s\n" % (MolName, ErrMsg))
return (Mol, False, None, None)
EnergyStatus, Energy = GetEnergy(Mol, ConfID)
if not EnergyStatus:
if not OptionsInfo["QuietMode"]:
MolName = RDKitUtil.GetMolName(Mol, MolNum)
MiscUtil.PrintWarning("Failed to retrieve calculated energy for conformation number %d of molecule %s. Try again after removing any salts or cleaing up the molecule...\n" % (ConfID, MolName))
return (Mol, False, None, None)
if Status != 0:
if not OptionsInfo["QuietMode"]:
MolName = RDKitUtil.GetMolName(Mol, MolNum)
MiscUtil.PrintWarning("Minimization failed to converge for conformation number %d of molecule %s in %d steps. Try using higher value for \"--maxIters\" option...\n" % (ConfID, MolName, OptionsInfo["MaxIters"]))
CalcEnergyMap[ConfID] = Energy
SortedConfIDs = sorted(ConfIDs, key = lambda ConfID: CalcEnergyMap[ConfID])
MinEnergyConfID = | |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.node.services.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class ClusterManager(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.node.services.cluster_manager'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ClusterManagerStub)
self._VAPI_OPERATION_IDS = {}
def get(self):
"""
Read cluster boot manager service properties
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceProperties`
:return: com.vmware.nsx.model.NodeServiceProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
def restart(self):
"""
Restart, start or stop the cluster boot manager service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('restart', None)
def start(self):
"""
Restart, start or stop the cluster boot manager service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('start', None)
def stop(self):
"""
Restart, start or stop the cluster boot manager service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('stop', None)
class CmInventory(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.node.services.cm_inventory'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _CmInventoryStub)
self._VAPI_OPERATION_IDS = {}
def get(self):
"""
Read cm inventory service properties
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceProperties`
:return: com.vmware.nsx.model.NodeServiceProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
def restart(self):
"""
Restart, start or stop the manager service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('restart', None)
def start(self):
"""
Restart, start or stop the manager service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('start', None)
def stop(self):
"""
Restart, start or stop the manager service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('stop', None)
class Controller(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.node.services.controller'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ControllerStub)
self._VAPI_OPERATION_IDS = {}
def get(self):
"""
Read controller service properties
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceProperties`
:return: com.vmware.nsx.model.NodeServiceProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
def restart(self):
"""
Restart, start or stop the controller service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('restart', None)
def start(self):
"""
Restart, start or stop the controller service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('start', None)
def stop(self):
"""
Restart, start or stop the controller service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('stop', None)
class Http(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.node.services.http'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _HttpStub)
self._VAPI_OPERATION_IDS = {}
def applycertificate(self,
certificate_id,
):
"""
Applies a security certificate to the http service. In the POST
request, the CERTIFICATE_ID references a certificate created with the
/api/v1/trust-management APIs. Issuing this request causes the http
service to restart so that the service can begin using the new
certificate. When the POST request succeeds, it doesn't return a valid
response. The request times out because of the restart.
:type certificate_id: :class:`str`
:param certificate_id: Certificate ID (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('applycertificate',
{
'certificate_id': certificate_id,
})
def get(self):
"""
This API is deprecated. Read the configuration of the http service by
calling the GET /api/v1/cluster/api-service API.
:rtype: :class:`com.vmware.nsx.model_client.NodeHttpServiceProperties`
:return: com.vmware.nsx.model.NodeHttpServiceProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
def restart(self):
"""
Restart the http service
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('restart', None)
def start(self):
"""
Start the http service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('start', None)
def stop(self):
"""
Stop the http service
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('stop', None)
def update(self,
node_http_service_properties,
):
"""
This API is deprecated. Make changes to the http service configuration
by calling the PUT /api/v1/cluster/api-service API.
:type node_http_service_properties: :class:`com.vmware.nsx.model_client.NodeHttpServiceProperties`
:param node_http_service_properties: (required)
:rtype: :class:`com.vmware.nsx.model_client.NodeHttpServiceProperties`
:return: com.vmware.nsx.model.NodeHttpServiceProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'node_http_service_properties': node_http_service_properties,
})
class InstallUpgrade(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.node.services.install_upgrade'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _InstallUpgradeStub)
self._VAPI_OPERATION_IDS = {}
def get(self):
"""
Read NSX install-upgrade service properties
:rtype: :class:`com.vmware.nsx.model_client.NodeInstallUpgradeServiceProperties`
:return: com.vmware.nsx.model.NodeInstallUpgradeServiceProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get', None)
def restart(self):
"""
Restart, start or stop the NSX install-upgrade service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.ConcurrentChange`
Conflict
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('restart', None)
def start(self):
"""
Restart, start or stop the NSX install-upgrade service
:rtype: :class:`com.vmware.nsx.model_client.NodeServiceStatusProperties`
:return: com.vmware.nsx.model.NodeServiceStatusProperties
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server | |
import torch
import utility
import logging
import os
import torch.nn as nn
from torch import Tensor
from typing import Type, Any, Callable, Union, List, Optional
from torch.utils.data import DataLoader
from models import CoarseNet, RefineNet
from argparse import Namespace
from checkpoint import CheckPoint
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR
from torchvision.utils import save_image
class Trainer:
def __init__(self, model: Union[CoarseNet.CoarseNet, RefineNet.RefineNet],
opt: Namespace, optim_state: Optional[dict]) -> None:
print('\n\n --> Initializing Trainer')
self.opt = opt
self.model = model.cuda()
self.warping_module = self.setup_warping_module()
self.multi_scale_data = self.setup_ms_data_module()
self.optim_state = self.setup_solver(optim_state)
self.setup_criterions()
self.optimizer = torch.optim.Adam(self.model.parameters(), **(self.optim_state), \
weight_decay=0.01)
self.scheduler = StepLR(self.optimizer, step_size=5, gamma=0.5)
print('\n\n --> Total number of parameters in ETOM-Net: ' + str(sum(p.numel() for p in self.model.parameters())))
self.input_image = None
self.ref_images = None
self.tar_images = None
self.rhos = None
self.masks = None
self.flows = None
def setup_ms_data_module(self) -> utility.CreateMultiScaleData:
print('[Multi Scale] Setting up multi scale data module')
ms_data_module = utility.CreateMultiScaleData(self.opt.ms_num)
return ms_data_module
def setup_warping_module(self) -> utility.CreateMultiScaleWarping:
print('[Multi Scale] Setting up multi scale warping')
warping_module = utility.CreateMultiScaleWarping(self.opt.ms_num)
return warping_module
def setup_criterions(self) -> None:
print('\n\n --> Setting up criterion')
print('[Flow Loss] Setting up EPELoss for flow')
self.flow_criterion = utility.EPELoss
print('[Rec Loss] Setting up MSELoss for reconstructed image')
self.rec_criterion = nn.MSELoss
print('[Mask Loss] Setting up CrossENtropyLoss for mask')
self.mask_criterion = nn.CrossEntropyLoss
print('[Rho Loss] Setting up MSELoss for rho')
self.rho_criterion = nn.MSELoss
def setup_solver(self, in_optim_state: dict) -> dict:
optim_state = None
if self.opt.solver == 'ADAM':
print('[Solver] Using Adam solver')
optim_state = in_optim_state or {
'lr': self.opt.lr_r if self.opt.refine else self.opt.lr,
'betas': (self.opt.beta_1, self.opt.beta_2)
}
else:
logging.warning('Unknown optimization method')
return optim_state
def train(self, epoch: int, dataloader: DataLoader, split: str) -> float:
gradient_accumulations = self.opt.ga
num_batches = len(dataloader)
print('\n====================')
print(self.optim_state)
print(f'Training epoch # {epoch+1}, totaling mini batches {num_batches}')
print('====================\n')
self.model.train()
loss_iter = {} # loss every n iterations
loss_epoch = {} # loss of the entire epoch
eps = 1e-7
# Zero gradients
self.optimizer.zero_grad()
if self.opt.refine:
loss_iter['mask'] = 0
loss_iter['flow'] = 0
for iter, sample in enumerate(dataloader):
input = self.setup_inputs(sample)
torch.cuda.empty_cache()
torch.autograd.set_detect_anomaly(True)
output = self.model.forward(input)
pred_images = self.single_flow_warping(output) # warp input image with flow
flow_loss = self.opt.r_flow_w * self.flow_criterion()(output[0], self.flows, \
self.masks.unsqueeze(1), self.rhos.unsqueeze(1))
mask_loss = self.opt.r_mask_w * self.mask_criterion()(output[1] + eps, self.masks.squeeze(1).long())
loss = flow_loss + mask_loss
loss_iter['mask'] += mask_loss.item()
loss_iter['flow'] += flow_loss.item()
# Perform a backward pass
(loss / gradient_accumulations).backward()
# Update the weights
if (iter + 1) % gradient_accumulations == 0:
self.optimizer.step()
self.optimizer.zero_grad()
if (iter+1) % self.opt.train_display == 0:
loss_epoch[iter] = self.display(epoch+1, iter+1, num_batches, loss_iter, split)
loss_iter['mask'] = 0
loss_iter['flow'] = 0
if (iter+1) % self.opt.train_save == 0:
self.save_results(epoch+1, iter+1, output, pred_images, split, 0)
else:
for i in range(self.opt.ms_num):
loss_iter[f'Scale {i} mask'] = 0
loss_iter[f'Scale {i} rho'] = 0
loss_iter[f'Scale {i} flow'] = 0
loss_iter[f'Scale {i} rec'] = 0
for iter, sample in enumerate(dataloader):
input = self.setup_inputs(sample)
torch.cuda.empty_cache()
torch.autograd.set_detect_anomaly(True)
output = self.model.forward(input)
pred_images = self.flow_warping(output) # warp input image with flow
loss = None
for i in range(self.opt.ms_num):
mask_loss = self.opt.mask_w * self.mask_criterion()(output[i][1] + eps, \
self.multi_masks[i].squeeze(1).long()) * (1 / 2 ** (self.opt.ms_num - i - 1))
rho_loss = self.opt.rho_w * self.rho_criterion()(output[i][2], \
self.multi_rhos[i]) * (1 / 2 ** (self.opt.ms_num - i - 1))
flow_loss = self.opt.flow_w * self.flow_criterion()(output[i][0], \
self.multi_flows[i], self.multi_masks[i], self.multi_rhos[i]) * (1 / 2 ** (self.opt.ms_num - i - 1))
mask = utility.get_mask(output[i][1]).expand(output[i][1].size(0), \
3, output[i][1].size(2), output[i][1].size(3))
final_pred = utility.get_final_pred(self.multi_ref_images[i], \
pred_images[i], mask, output[i][2])
rec_loss = self.opt.img_w * self.rec_criterion()(final_pred, \
self.multi_tar_images[i]) * (1 / 2 ** (self.opt.ms_num - i - 1))
if i == 0:
loss = mask_loss + rho_loss + flow_loss + rec_loss
else:
loss += mask_loss + rho_loss + flow_loss + rec_loss
loss_iter[f'Scale {i} mask'] += mask_loss.item()
loss_iter[f'Scale {i} rho'] += rho_loss.item()
loss_iter[f'Scale {i} flow'] += flow_loss.item()
loss_iter[f'Scale {i} rec'] += rec_loss.item()
# Perform a backward pass
(loss / gradient_accumulations).backward()
# Update the weights
if (iter + 1) % gradient_accumulations == 0:
self.optimizer.step()
self.optimizer.zero_grad()
if (iter+1) % self.opt.train_display == 0:
loss_epoch[iter] = self.display(epoch+1, iter+1, num_batches, loss_iter, split)
for i in range(self.opt.ms_num):
loss_iter[f'Scale {i} mask'] = 0
loss_iter[f'Scale {i} rho'] = 0
loss_iter[f'Scale {i} flow'] = 0
loss_iter[f'Scale {i} rec'] = 0
if (iter+1) % self.opt.train_save == 0:
self.save_ms_results(epoch+1, iter+1, output, pred_images, split, 0)
average_loss = utility.build_loss_string(utility.dict_of_dict_average(loss_epoch))
print(f'\n\n --> Epoch: [{epoch+1}] Loss summary: \n{average_loss}')
self.scheduler.step()
self.optim_state['lr'] = self.optimizer.param_groups[0]['lr']
return average_loss
def get_saving_name(self, log_dir: str, split: str, epoch: int, iter: int, id: int) -> str:
f_path = f'{log_dir}/{split}/Images/'
f_names = f'epoch:{epoch}_iter:{iter}_id:{id}'
return os.path.join(f_path, f_names + '.png')
def save_images(self, pred_images: Tensor, output: List[Tensor], count: int) -> int:
for i in range(pred_images.size()[0]):
print(count)
os.makedirs(f'results/{count}')
mask = torch.squeeze(utility.get_mask(output[1][i].unsqueeze(0))).expand(3, output[1].size(2), output[1].size(3))
rho = output[2][i].repeat(3, 1, 1)
final_img = utility.get_final_pred(self.ref_images[i], pred_images[i], mask, rho)
save_image(final_img, f'results/{count}/in_rec.png')
save_image(mask.float(), f'results/{count}/mask.png')
save_image(rho, f'results/{count}/rho.png')
utility.save_flow(f'results/{count}/flow.flo', output[0][i])
save_image(self.ref_images[i], f'results/{count}/bg.png')
save_image(self.masks[i], f'results/{count}/mask_gt.png')
save_image(self.rhos[i], f'results/{count}/rho_gt.png')
save_image(self.input_image[i], f'results/{count}/input.png')
save_image(self.tar_images[i], f'results/{count}/tar.png')
utility.save_flow(f'results/{count}/flow_gt.flo', self.flows[i][0:2, :, :])
save_image(utility.flow_to_color(torch.mul(output[0][i], self.masks[i])), f'results/{count}/fcolor.png')
save_image(utility.flow_to_color(self.flows[i]), f'results/{count}/fcolor_gt.png')
count += 1
return count
def get_predicts(self, id: int, output: List[Tensor], pred_img: Tensor, m_scale: int) -> List[Tensor]:
pred = []
if m_scale != None:
gt_color_flow = utility.flow_to_color(self.multi_flows[m_scale][id])
else:
gt_color_flow = utility.flow_to_color(self.flows[id])
pred.append(gt_color_flow)
color_flow = utility.flow_to_color(output[0][id])
pred.append(color_flow)
mask = torch.squeeze(utility.get_mask(output[1][id].unsqueeze(0))).expand(3, output[1].size(2), output[1].size(3))
pred.append(mask)
rho = output[2][id].repeat(3, 1, 1)
pred.append(rho)
if m_scale != None:
final_img = utility.get_final_pred(self.multi_ref_images[m_scale][id], pred_img[id], mask, rho)
first_img = self.multi_tar_images[m_scale][id]
else:
final_img = utility.get_final_pred(self.ref_images[id], pred_img[id], mask, rho)
first_img = self.tar_images[id]
pred.insert(0, first_img)
pred.insert(1, final_img)
return pred
def get_first_row(self, id: int) -> List[Union[bool, Tensor]]:
first = []
first.append(self.ref_images[id])
first.append(self.tar_images[id])
first.append(False)
first.append(False)
first.append(self.masks[id])
first.append(self.rhos[id])
return first
def save_ms_results(
self,
epoch: int,
iter: int,
output: List[List[Tensor]],
multi_pred_img: List[List[Tensor]],
split: str,
id: int
) -> None:
id = id or 0
scales = self.opt.ms_num
results = []
first_row = self.get_first_row(id)
for val in first_row:
results.append(val)
for i in range(scales-1, -1, -1):
sub_pred = self.get_predicts(id, output[i], multi_pred_img[i], i)
for val in sub_pred:
results.append(val)
save_name = self.get_saving_name(self.opt.log_dir, split, epoch, iter, id)
utility.save_compact_results(save_name, results, 6)
print('\n\n --> Flow magnitude: Max {}, Min {}, Mean {}'.format(
torch.max(output[scales-1][0][id]), torch.min(output[scales-1][0][id]),
torch.mean(torch.abs(output[scales-1][0][id]))))
def save_results(
self,
epoch: int,
iter: int,
output: List[Tensor],
pred_img: Tensor,
split: str,
id: int
) -> None:
id = id or 0
results = []
first_row = self.get_first_row(id)
for val in first_row:
results.append(val)
sub_pred = self.get_predicts(id, output, pred_img, None)
for val in sub_pred:
results.append(val)
save_name = self.get_saving_name(self.opt.log_dir, split, epoch, iter, id)
utility.save_compact_results(save_name, results, 6)
def flow_warping(self, output: List[List[Tensor]]) -> List[Tensor]:
flows = []
for i in range(self.opt.ms_num):
flows.append(output[i][0])
pred_images= self.warping_module([self.multi_ref_images, flows])
return pred_images
def single_flow_warping(self, output: List[Tensor]) -> Tensor:
pred_images= utility.create_single_warping([self.ref_images, output[0]])
return pred_images
def test(self, epoch: int, dataloader: DataLoader, split: str) -> float:
num_batches = len(dataloader)
loss_iter = {}
loss_epoch = {}
print(f'\n\n===== Testing after {epoch+1} epochs =====')
self.model.eval()
rec_err = 0
rho_err = 0
flow_err = 0
mask_err = 0
size = 400
def iou(pred, tar):
intersection = torch.logical_and(tar, pred)
union = torch.logical_or(tar, pred)
iou_score = torch.true_divide(torch.sum(intersection), torch.sum(union))
return iou_score
def epe(mask_gt, flow_gt, flow):
mask_gt = mask_gt.expand_as(flow_gt)
flow = flow * mask_gt
flow_gt = flow_gt * mask_gt
return torch.norm(flow_gt-flow, dim=1).mean() / 100
if self.opt.refine:
loss_iter['mask'] = 0
loss_iter['flow'] = 0
count = 1
for iter, sample in enumerate(dataloader):
with torch.no_grad():
input = self.setup_inputs(sample)
torch.cuda.empty_cache()
torch.autograd.set_detect_anomaly(True)
output = self.model.forward(input)
pred_images = self.single_flow_warping(output) # warp input image with flow
if self.opt.save_images:
count = self.save_images(pred_images, output, count)
for i in range(output[0].size(0)):
mask = torch.squeeze(utility.get_mask(output[1][i].unsqueeze(0))).expand(3, \
output[1][i].size(1), output[1][i].size(2))
final_pred = utility.get_final_pred(self.ref_images[i], \
pred_images[i], mask, output[2][i])
rec_err += 100 * F.mse_loss(final_pred, self.tar_images[i])
rho_err += 100 * F.mse_loss(output[2][i], self.rhos[i])
flow_err += epe(self.masks[i], self.flows[i][0:2, :, :] * \
self.rhos[i], output[0][i] * self.rhos[i])
mask_err += iou(mask, self.masks[i])
flow_loss = self.opt.r_flow_w * self.flow_criterion()(output[0], self.flows, \
self.masks.unsqueeze(1), self.rhos.unsqueeze(1))
mask_loss = self.opt.r_mask_w * self.mask_criterion()(output[1], self.masks.squeeze(1).long())
loss_iter['mask'] += mask_loss.item()
loss_iter['flow'] += flow_loss.item()
if (iter+1) % self.opt.val_display == 0:
loss_epoch[iter] = self.display(epoch+1, iter+1, num_batches, loss_iter, split)
loss_iter['mask'] = 0
loss_iter['flow'] = 0
if (iter+1) % self.opt.val_save == 0:
self.save_results(epoch+1, iter+1, output, pred_images, | |
import numpy as np
import os
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
from argparse import ArgumentParser
import astropy.constants as const
import astropy.units as units
# i don't understand how to make this work correctly...
# from . import utils
# from . import transformation as transform
# from . import numba_transformation as numba_transform
import utils
from transformation import TransformableImage
import transformation as transform
import numba_transformation as numba_transform
from parameters import pars2theta
import pudb
parser = ArgumentParser()
parser.add_argument('--show', action='store_true', default=False,
help='Set to show test plots')
parser.add_argument('--test', action='store_true', default=False,
help='Set to run tests')
class VelocityModel(object):
'''
Default velocity model. Can subclass to
define your own model
'''
name = 'centered'
_model_params = ['v0', 'vcirc', 'rscale', 'sini',
'theta_int', 'g1', 'g2', 'r_unit', 'v_unit']
def __init__(self, model_pars):
if not isinstance(model_pars, dict):
t = type(model_pars)
raise TypeError(f'model_pars must be a dict, not a {t}!')
self.pars = model_pars
self._check_model_pars()
# Needed if using numba for transformations
self._build_pars_array()
return
def _check_model_pars(self):
mname = self.name
# Make sure there are no undefined pars
for name, val in self.pars.items():
if val is None:
raise ValueError(f'{param} must be set!')
if name not in self._model_params:
raise AttributeError(f'{name} is not a valid model ' +\
f'parameter for {mname} velocity model!')
# Make sure all req pars are present
for par in self._model_params:
if par not in self.pars:
raise AttributeError(f'{par} must be in passed parameters ' +\
f'to instantiate {mname} velocity model!')
# Make sure units are astropy.unit objects
for u in [self.pars['r_unit'], self.pars['v_unit']]:
if (not isinstance(u, units.Unit)) and \
(not isinstance(u, units.CompositeUnit)) and \
(not isinstance(u, units.IrreducibleUnit)):
raise TypeError('unit params must be an astropy unit class!')
return
def _build_pars_array(self):
'''
Numba requires a pars array instead of a more flexible dict
'''
self.pars_arr = pars2theta(self.pars)
return
def get_transform_pars(self):
pars = {}
for name in ['g1', 'g2', 'sini', 'theta_int']:
pars[name] = self.pars[name]
return pars
class VelocityMap(TransformableImage):
'''
Base class for a velocity map
It is helpful to compute various things in different coordinate
planes. The available ones are defined as follows:
disk: Face-on view of the galactic disk, no inclination angle.
This will be cylindrically symmetric for most models
gal: Galaxy major/minor axis frame with inclination angle same as
source plane. Will now be ~ellipsoidal for sini!=0
source: View from the lensing source plane, rotated version of gal
plane with theta = theta_intrinsic
obs: Observed image plane. Sheared version of source plane
'''
def __init__(self, model_name, model_pars):
'''
model_name: The name of the velocity to model.
model_pars: A dict with key:val pairs for the model parameters.
Must be a registered velocity model.
'''
self.model_name = model_name
self.model = build_model(model_name, model_pars)
transform_pars = self.model.get_transform_pars()
super(VelocityMap, self).__init__(transform_pars)
return
def __call__(self, plane, x, y, speed=False, normalized=False,
use_numba=False):
'''
Evaluate the velocity map at position (x,y) in the given plane. Note
that position must be defined in the same plane
speed: bool
Set to True to return speed map instead of velocity
normalized: bool
Set to True to return velocity / c
use_numba: bool
Set to True to use numba versions of transformations
'''
super(VelocityMap, self).__call__(
plane, x, y, use_numba=use_numba
)
if normalized is True:
norm = 1. / const.c.to(self.model.pars['v_unit']).value
else:
norm = 1.
return norm * self._eval_map_in_plane(
plane, x, y, speed=speed, use_numba=use_numba
)
def _eval_map_in_plane(self, plane, x, y, speed=False, use_numba=False):
'''
We use static methods defined in transformation.py
to speed up these very common function calls
The input (x,y) position is defined in the plane
# The input (x,y) position is transformed from the obs
# plane to the relevant plane
speed: bool
Set to True to return speed map instead of velocity
use_numba: bool
Set to True to use numba versions of transformations
'''
# Need to use array for numba
if use_numba is True:
pars = self.model.pars_arr
else:
pars = self.model.pars
func = self._get_plane_eval_func(plane, use_numba=use_numba)
return func(pars, x, y, speed=speed)
@classmethod
def _eval_in_obs_plane(cls, pars, x, y, **kwargs):
'''
pars: dict
Holds the model & transformation parameters
x,y: np.ndarray
The position coordintates in the obs plane
kwargs holds any additional params that might be needed
in subclass evaluations, such as using speed instead of
velocity
'''
# evaluate vmap in obs plane without any systematic offset
obs_vmap = super(VelocityMap, cls)._eval_in_obs_plane(
pars, x, y, **kwargs)
# now add systematic velocity
return pars['v0'] + obs_vmap
@classmethod
def _eval_in_gal_plane(cls, pars, x, y, **kwargs):
'''
pars: dict
Holds the model & transformation parameters
x,y: np.ndarray
The position coordintates in the gal plane
kwargs holds any additional params that might be needed
in subclass evaluations, such as using speed instead of
velocity
'''
xp, yp = transform._gal2disk(pars, x, y)
# Need the speed map in either case
speed = kwargs['speed']
kwargs['speed'] = True
speed_map = cls._eval_in_disk_plane(pars, xp, yp, **kwargs)
# speed will be in kwargs
if speed is True:
return speed_map
else:
# euler angles which handle the vector aspect of velocity transform
sini = pars['sini']
phi = np.arctan2(yp, xp)
return sini * np.cos(phi) * speed_map
@classmethod
def _eval_in_disk_plane(cls, pars, x, y, **kwargs):
'''
Evaluates model at positon array in the disk plane, where
pos=(x,y) is defined relative to galaxy center
pars is a dict with model parameters
will eval speed map instead of velocity if speed is True
'''
if kwargs['speed'] is False:
return np.zeros(np.shape(x))
r = np.sqrt(x**2 + y**2)
atan_r = np.arctan(r / pars['rscale'])
v_r = (2./ np.pi) * pars['vcirc'] * atan_r
return v_r
def plot(self, plane, x=None, y=None, rmax=None, show=True, close=True,
title=None, size=(9,8), center=True, outfile=None, speed=False,
normalized=False):
'''
Plot speed or velocity map in given plane. Will create a (x,y)
grid based off of rmax centered at 0 in the chosen plane unless
(x,y) are passed
Can normalize to v/c if desired
'''
if plane not in self._planes:
raise ValueError(f'{plane} not a valid image plane to plot!')
pars = self.model.pars
if rmax is None:
rmax = 5. * pars['rscale']
if (x is None) or (y is None):
if (x is not None) or (y is not None):
raise ValueError('Can only pass both (x,y), not just one')
# make square position grid in given plane
Nr = 100
dr = rmax / (Nr+1)
r = np.arange(0, rmax+dr, dr)
dx = (2*rmax) / (2*Nr+1)
xarr = np.arange(-rmax, rmax+dx, dx)
x, y = np.meshgrid(xarr, xarr)
else:
# parse given positions
assert type(x) == type(y)
if not isinstance(x, np.ndarray):
x = np.array(x)
y = np.array(y)
if x.shape != y.shape:
raise ValueError('x and y must have the same shape!')
V = self(plane, x, y, speed=speed, normalized=normalized)
runit = pars['r_unit']
vunit = pars['v_unit']
if normalized is True:
cstr = ' / c'
else:
cstr = ''
if speed is True:
mtype = 'Speed'
else:
mtype = 'Velocity'
plt.pcolormesh(x, y, V)
plt.colorbar(label=f'{vunit}{cstr}')
runit = pars['r_unit']
plt.xlabel(f'{runit}')
plt.ylabel(f'{runit}')
if center is True:
plt.plot(0, 0, 'r', ms=10, markeredgewidth=2, marker='x')
if title is not None:
plt.title(title)
else:
rscale = pars['rscale']
plt.title(f'{mtype} map in {plane} plane\n' +\
f'r_0 = {rscale} {runit}')
if size is not None:
plt.gcf().set_size_inches(size)
if outfile is not None:
plt.savefig(outfile, bbox_inches='tight', dpi=300)
if show is True:
plt.show()
if close is True:
plt.close()
return
def plot_all_planes(self, plot_kwargs={}, show=True, close=True, outfile=None,
size=(9, 8), speed=False, normalized=False, center=True):
if size not in plot_kwargs:
plot_kwargs['size'] = size
# Overwrite defaults if present
if show in plot_kwargs:
show = plot_kwargs['show']
del plot_kwargs['show']
if close in plot_kwargs:
close = plot_kwargs['close']
del plot_kwargs['close']
if outfile in plot_kwargs:
outfile = plot_kwargs['outfile']
del plot_kwargs['outfile']
pars = self.model.pars
if 'rmax' in plot_kwargs:
rmax = plot_kwargs['rmax']
else:
rmax = 5. * pars['rscale']
plot_kwargs['rmax'] = rmax
# Create square grid centered at source for all planes
Nr = 100
dr = rmax / (Nr+1)
r = np.arange(0, rmax+dr, dr)
dx = (2*rmax) / (2*Nr+1)
x = np.arange(-rmax, rmax+dx, dx)
X, Y = np.meshgrid(x,x)
# Figure out subplot grid
Nplanes = len(self._planes)
sqrt = np.sqrt(Nplanes)
if sqrt % 1 == 0:
N = sqrt
else:
N = int(sqrt+1)
k = 1
for plane in self._planes:
plt.subplot(N,N,k)
# X, Y = transform.transform_coords(Xdisk, Ydisk, 'disk', plane, pars)
# X,Y = | |
from typing import Union, Any, List, Set, Dict, Tuple, Optional
import json
import math
from prosto.utils import *
from prosto.resolve import *
import prosto as pr # To resolve circular imports
from prosto.Prosto import *
from prosto.Table import *
from prosto.Column import *
from prosto.Operation import *
class ColumnOperation(Operation):
"""The class represents one column operation."""
def __init__(self, prosto, definition):
super(ColumnOperation, self).__init__(prosto, definition)
def get_dependencies_names(self) -> dict:
"""
Get all dependencies represented by names like table names and column names as they are specified in the definition.
These names are not resolved and might not be in the list of schema objects yet (like attributes or column paths).
:return: dict with table names as keys and lists of columns as values (empty in the case of only table as a dependency)
"""
definition = self.definition
operation = definition.get("operation", "UNKNOWN")
output_table_name = definition.get("table")
outputs = self.get_outputs()
output_column_name = outputs[0]
columns = self.get_columns()
dependencies = {}
# All derived columns depend on their table
dependencies[output_table_name] = []
#
# Collect operation-specific dependencies
#
if operation.lower().startswith("comp"):
# Input column objects for which we need to find definitions
dependencies[output_table_name].extend(columns)
elif operation.lower().startswith("calc"):
# Input column objects for which we need to find definitions
dependencies[output_table_name].extend(columns)
elif operation.lower().startswith("link"):
# Input (fact table) columns or column paths have to be evaluated
dependencies[output_table_name].extend(columns)
# Target (linked) table has to be populated
linked_table_name = self.prosto.get_type_table(output_table_name, output_column_name)
dependencies[linked_table_name] = []
# Target columns have to be evaluated in order to contain values. However, they are supposed to be attributes and hence they will be set during population
linked_columns = definition.get("linked_columns", [])
dependencies[linked_table_name].extend(linked_columns)
elif operation.lower().startswith("merg"):
segments = list()
for column_name in columns:
column_path = column_name.split(pr.Prosto.column_path_separator)
segments.extend(column_path)
type_table_names = self.prosto.get_type_tables(output_table_name, segments)
type_table_names.insert(0, output_table_name)
for i in range(len(segments)):
dependencies[type_table_names[i]] = [segments[i]]
elif operation.lower().startswith("roll"):
# Columns to be aggregated
dependencies[output_table_name].extend(columns)
# Link (group) column
link_column_name = definition.get("link")
if link_column_name:
dependencies[output_table_name].append(link_column_name)
# Linked table (if any) has to be populated. (Yet, it will be added to dependency by the link column.)
if link_column_name:
linked_table_name = self.prosto.get_type_table(output_table_name, link_column_name)
if linked_table_name:
dependencies[linked_table_name] = []
elif operation.lower().startswith("aggr"):
# The fact table has to be already populated
tables = self.get_tables()
source_table_name = tables[0]
dependencies[source_table_name] = []
# Measure columns of the fact table
dependencies[source_table_name].extend(columns)
# Link (group) column
link_column_name = definition.get("link")
dependencies[source_table_name].append(link_column_name)
elif operation.lower().startswith("disc"):
# Input column objects for which we need to find definitions
dependencies[output_table_name].extend(columns)
else:
raise ValueError("Unknown operation type '{}' in the definition of column '{}'.".format(operation, self.id))
return dependencies
def evaluate(self) -> None:
"""
Execute this column operation and evaluate the output column(s).
A generic sequence of operations:
- prepare the input slice by selecting input columns and input rows
- convert the selected slice to the necessary data format expected by UDF
- process input data by calling UDF or operation and returning some result
- convert the result to our standard format
- impose the result to our current data by overwriting the output columns and output values
Notes:
- There are two types of definitions: relying on UDFs (calc, roll, aggr), and not using UDFs (link, merge)
- There are UDFs of two types: value or row based (returning a value or row), and column or table based (returning a whole column or table)
"""
definition = self.definition
operation = definition.get("operation", "UNKNOWN")
input_length = definition.get("input_length", "UNKNOWN") # value for value-based functions or column for column-based functions
data_type = definition.get("data_type", "Series")
model = definition.get("model")
output_table_name = definition.get("table")
output_table = self.prosto.get_table(output_table_name)
outputs = self.get_outputs()
output_column_name = outputs[0]
output_column = self.prosto.get_column(output_table_name, output_column_name)
data = output_table.get_df()
#
# Operations without UDF
#
# Link columns use their own definition format different from computational (functional) definitions
if operation.lower().startswith("link"):
out = self._evaluate_link()
self._impose_output_columns(out)
return
# Compose columns use their own definition format different from computational (functional) definitions
if operation.lower().startswith("merg"):
out = self._evaluate_merge()
self._impose_output_columns(out)
return
# Discretize column using some logic of partitioning represented in the model
if operation.lower().startswith("disc"):
# Determine input columns
columns = self.get_columns()
columns = get_columns(columns, data)
if columns is None:
raise ValueError("Error reading column list. Skip column definition.")
# Validation: check if all explicitly specified columns available
if not all_columns_exist(columns, data):
raise ValueError("Not all input columns available. Skip column definition.".format())
# Slice input according to the change status
if self.prosto.incremental:
data = output_table.data.get_added_slice(columns)
range = output_table.data.added_range
else:
data = output_table.data.get_full_slice(columns)
range = output_table.data.id_range()
out = self._evaluate_discretize(data, model)
self._impose_output_columns(out, range)
return
#
# Operations with UDF
#
func_name = definition.get("function")
if not func_name:
raise ValueError("Column function '{}' is not specified. Skip column definition.".format(func_name))
func = resolve_full_name(func_name)
if not func:
raise ValueError("Cannot resolve user-defined function '{}'. Skip column definition.".format(func_name))
if operation.lower().startswith("comp") or operation.lower().startswith("calc"):
# Determine input columns
columns = self.get_columns()
columns = get_columns(columns, data)
if columns is None:
raise ValueError("Error reading column list. Skip column definition.")
# Validation: check if all explicitly specified columns available
if not all_columns_exist(columns, data):
raise ValueError("Not all input columns available. Skip column definition.".format())
# Slice input according to the change status
if self.prosto.incremental:
data = output_table.data.get_added_slice(columns)
range = output_table.data.added_range
else:
data = output_table.data.get_full_slice(columns)
range = output_table.data.id_range()
if operation.lower().startswith("comp"): # Equivalently: input_length == "column"
out = self._evaluate_compute(func, data, data_type, model)
elif operation.lower().startswith("calc"): # Equivalently: input_length == "value"
out = self._evaluate_calculate(func, data, data_type, model)
else:
raise ValueError("Unknown input_type parameter '{}'.".format(input_length))
elif operation.lower().startswith("roll"):
# Determine input columns
columns = self.get_columns()
columns = get_columns(columns, data)
if columns is None:
raise ValueError("Error reading input column list. Skip column definition.")
# Validation: check if all explicitly specified columns available
if not all_columns_exist(columns, data):
raise ValueError("Not all input columns available. Skip column definition.".format())
# It exists only for rolling aggregation with grouping
link_column_name = definition.get("link")
# Slice input according to the change status (incremental not implemented)
data = output_table.data.get_full_slice(columns)
range = output_table.data.id_range()
if input_length == "value":
raise NotImplementedError("Accumulation is not implemented.".format())
elif input_length == "column":
gb = output_table._get_or_create_groupby(link_column_name) if link_column_name else None
out = self._evaluate_roll(func, gb, data, data_type, model)
else:
raise ValueError("Unknown input_type parameter '{}'.".format(input_length))
elif operation.lower().startswith("aggr"):
#
# Get parameters
#
tables = self.get_tables()
source_table_name = tables[0]
source_table = self.prosto.get_table(source_table_name)
if source_table is None:
raise ValueError("Cannot find the fact table '{}'.".format(source_table_name))
link_column_name = definition.get("link")
link_column = source_table.get_column(link_column_name)
if link_column is None:
raise ValueError("Cannot find the link column '{}'.".format(link_column_name))
data = source_table.get_df() # Data (to be processed) is a (source) table which is different from the output table
# Determine input columns
columns = self.get_columns()
columns = get_columns(columns, data)
if columns is None:
raise ValueError("Error reading input column list. Skip column definition.")
# Validation: check if all explicitly specified columns available
if not all_columns_exist(columns, data):
raise ValueError("Not all input columns available. Skip column definition.".format())
data = data[columns] # Select only the specified *input* columns
data_type = definition.get("data_type")
# No incremental. Select full *output* range
range = output_table.data.id_range()
if input_length == "value":
raise NotImplementedError("Accumulation is not implemented.".format())
elif input_length == "column":
gb = source_table._get_or_create_groupby(link_column_name)
out = self._evaluate_aggregate(func, gb, data, data_type, model)
else:
raise ValueError("Unknown input_type parameter '{}'.".format(input_length))
else:
raise ValueError("Unknown operation type '{}' in the definition of column '{}'.".format(operation, self.id))
#
# Append the newly generated column(s) to this table
#
self._impose_output_columns(out, range)
def _evaluate_calculate(self, func, data, data_type, model):
"""Calculate column. Apply function to each row of the table."""
#
# Single input: Apply to a series. UDF will get single value
#
if len(data.columns) == 1:
data_arg = data[data.columns[0]] # Series
# Determine format/type of representation
if data_type == "ndarray":
data_arg = data_arg.values
#
# Call UDF depending on the necessary model parameter
#
if model is None:
out = pd.Series.apply(data_arg, func) # Do not pass model to the function
elif isinstance(model, (list, tuple)):
out = pd.Series.apply(data_arg, func, args=model) # Model as positional arguments
elif isinstance(model, dict):
# Pass model by flattening dict (alternative: arbitrary Python object as positional or key argument). UDF has to declare the expected arguments
out = pd.Series.apply(data_arg, func, **model) # Model as keyword arguments
else:
out = pd.Series.apply(data_arg, func, args=(model,)) # Model as an arbitrary object
#
# | |
DataLoader(val_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=num_loader_workers,
pin_memory=True,
drop_last=False,
collate_fn=self._batch_collate_fn)
# Prepare tensorboard writer
tb_writer = self._prepare_tensorboard_writer()
# if user wants to train the model for more epochs, ignore the n_epochs parameter
train_num_epochs = epochs if epochs > 0 else self.n_epochs
# Train model
self._train(train_loader, val_loader, tb_writer, verbose, train_num_epochs)
# Close tensorboard writer
if tb_writer is not None:
tb_writer.flush()
tb_writer.close()
@random_method
def predict(self,
n: int,
series: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
past_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
future_covariates: Optional[Union[TimeSeries, Sequence[TimeSeries]]] = None,
batch_size: Optional[int] = None,
verbose: bool = False,
n_jobs: int = 1,
roll_size: Optional[int] = None,
num_samples: int = 1,
num_loader_workers: int = 0
) -> Union[TimeSeries, Sequence[TimeSeries]]:
""" Predict the `n` time step following the end of the training series, or of the specified `series`.
Below, all possible parameters are documented, but not all models support all parameters. For instance,
all the :class:`PastCovariatesTorchModel` support only `past_covariates` and not `future_covariates`.
Darts will complain if you try calling :func:`predict()` on a model with the wrong covariates argument.
Darts will also complain if the provided covariates do not have a sufficient time span.
In general, not all models require the same covariates' time spans:
* | Models relying on past covariates require the last `input_chunk_length` of the `past_covariates`
| points to be known at prediction time. For horizon values `n > output_chunk_length`, these models
| require at least the next `n - output_chunk_length` future values to be known as well.
* | Models relying on future covariates require the next `n` values to be known.
| In addition (for :class:`DualCovariatesTorchModel` and :class:`MixedCovariatesTorchModel`), they also
| require the "historic" values of these future covariates (over the past `input_chunk_length`).
When handling covariates, Darts will try to use the time axes of the target and the covariates
to come up with the right time slices. So the covariates can be longer than needed; as long as the time axes
are correct Darts will handle them correctly. It will also complain if their time span is not sufficient.
Parameters
----------
n
The number of time steps after the end of the training time series for which to produce predictions
series
Optionally, a series or sequence of series, representing the history of the target series whose
future is to be predicted. If specified, the method returns the forecasts of these
series. Otherwise, the method returns the forecast of the (single) training series.
past_covariates
Optionally, the past-observed covariates series needed as inputs for the model.
They must match the covariates used for training in terms of dimension.
future_covariates
Optionally, the future-known covariates series needed as inputs for the model.
They must match the covariates used for training in terms of dimension.
batch_size
Size of batches during prediction. Defaults to the models' training `batch_size` value.
verbose
Optionally, whether to print progress.
n_jobs
The number of jobs to run in parallel. `-1` means using all processors. Defaults to `1`.
roll_size
For self-consuming predictions, i.e. `n > output_chunk_length`, determines how many
outputs of the model are fed back into it at every iteration of feeding the predicted target
(and optionally future covariates) back into the model. If this parameter is not provided,
it will be set `output_chunk_length` by default.
num_samples
Number of times a prediction is sampled from a probabilistic model. Should be left set to 1
for deterministic models.
num_loader_workers
Optionally, an integer specifying the `num_workers` to use in PyTorch ``DataLoader`` instances,
for the inference/prediction dataset loaders (if any).
A larger number of workers can sometimes increase performance, but can also incur extra overheads
and increase memory usage, as more batches are loaded in parallel.
Returns
-------
Union[TimeSeries, Sequence[TimeSeries]]
One or several time series containing the forecasts of `series`, or the forecast of the training series
if `series` is not specified and the model has been trained on a single series.
"""
super().predict(n, series, past_covariates, future_covariates)
if series is None:
raise_if(self.training_series is None, "Input series has to be provided after fitting on multiple series.")
series = self.training_series
if past_covariates is None and self.past_covariate_series is not None:
past_covariates = self.past_covariate_series
if future_covariates is None and self.future_covariate_series is not None:
future_covariates = self.future_covariate_series
called_with_single_series = False
if isinstance(series, TimeSeries):
called_with_single_series = True
series = [series]
past_covariates = [past_covariates] if isinstance(past_covariates, TimeSeries) else past_covariates
future_covariates = [future_covariates] if isinstance(future_covariates, TimeSeries) else future_covariates
if self.encoders.encoding_available:
past_covariates, future_covariates = self.encoders.encode_inference(n=n,
target=series,
past_covariate=past_covariates,
future_covariate=future_covariates)
dataset = self._build_inference_dataset(target=series,
n=n,
past_covariates=past_covariates,
future_covariates=future_covariates)
predictions = self.predict_from_dataset(n, dataset, verbose=verbose, batch_size=batch_size, n_jobs=n_jobs,
roll_size=roll_size, num_samples=num_samples)
return predictions[0] if called_with_single_series else predictions
def predict_from_dataset(self,
n: int,
input_series_dataset: InferenceDataset,
batch_size: Optional[int] = None,
verbose: bool = False,
n_jobs: int = 1,
roll_size: Optional[int] = None,
num_samples: int = 1,
num_loader_workers: int = 0
) -> Sequence[TimeSeries]:
"""
This method allows for predicting with a specific :class:`darts.utils.data.InferenceDataset` instance.
These datasets implement a PyTorch `Dataset`, and specify how the target and covariates are sliced
for inference. In most cases, you'll rather want to call :func:`predict()` instead, which will create an
appropriate :class:`InferenceDataset` for you.
Parameters
----------
n
The number of time steps after the end of the training time series for which to produce predictions
input_series_dataset
Optionally, a series or sequence of series, representing the history of the target series' whose
future is to be predicted. If specified, the method returns the forecasts of these
series. Otherwise, the method returns the forecast of the (single) training series.
batch_size
Size of batches during prediction. Defaults to the models `batch_size` value.
verbose
Shows the progress bar for batch predicition. Off by default.
n_jobs
The number of jobs to run in parallel. `-1` means using all processors. Defaults to `1`.
roll_size
For self-consuming predictions, i.e. `n > output_chunk_length`, determines how many
outputs of the model are fed back into it at every iteration of feeding the predicted target
(and optionally future covariates) back into the model. If this parameter is not provided,
it will be set `output_chunk_length` by default.
num_samples
Number of times a prediction is sampled from a probabilistic model. Should be left set to 1
for deterministic models.
num_loader_workers
Optionally, an integer specifying the `num_workers` to use in PyTorch ``DataLoader`` instances,
for the inference/prediction dataset loaders (if any).
A larger number of workers can sometimes increase performance, but can also incur extra overheads
and increase memory usage, as more batches are loaded in parallel.
Returns
-------
Sequence[TimeSeries]
Returns one or more forecasts for time series.
"""
self._verify_inference_dataset_type(input_series_dataset)
# check that covariates and dimensions are matching what we had during training
self._verify_predict_sample(input_series_dataset[0])
if roll_size is None:
roll_size = self.output_chunk_length
else:
raise_if_not(0 < roll_size <= self.output_chunk_length,
'`roll_size` must be an integer between 1 and `self.output_chunk_length`.')
# check that `num_samples` is a positive integer
raise_if_not(num_samples > 0, '`num_samples` must be a positive integer.')
# iterate through batches to produce predictions
batch_size = batch_size or self.batch_size
pred_loader = DataLoader(input_series_dataset,
batch_size=batch_size,
shuffle=False,
num_workers=num_loader_workers,
pin_memory=True,
drop_last=False,
collate_fn=self._batch_collate_fn)
predictions = []
iterator = _build_tqdm_iterator(pred_loader, verbose=verbose)
self.model.eval()
with torch.no_grad():
for batch_tuple in iterator:
batch_tuple = self._batch_to_device(batch_tuple)
input_data_tuple, batch_input_series = batch_tuple[:-1], batch_tuple[-1]
# number of individual series to be predicted in current batch
num_series = input_data_tuple[0].shape[0]
# number of of times the input tensor should be tiled to produce predictions for multiple samples
# this variable is larger than 1 only if the batch_size is at least twice as large as the number
# of individual time series being predicted in current batch (`num_series`)
batch_sample_size = min(max(batch_size // num_series, 1), num_samples)
# counts number of produced prediction samples for every series to be predicted in current batch
sample_count = 0
# repeat prediction procedure for every needed sample
batch_predictions = []
while sample_count < num_samples:
# make sure we don't produce too many samples
if sample_count + batch_sample_size > num_samples:
batch_sample_size = num_samples - sample_count
# stack multiple copies of the tensors to produce probabilistic forecasts
input_data_tuple_samples = | |
DFA.unpack(
u"\1\130\11\uffff"
)
DFA24_accept = DFA.unpack(
u"\1\uffff\1\1\1\2\7\uffff"
)
DFA24_special = DFA.unpack(
u"\12\uffff"
)
DFA24_transition = [
DFA.unpack(u"\4\2\1\uffff\1\2\2\uffff\3\2\15\uffff\1\2\36\uffff"
u"\2\2\6\uffff\1\1\22\uffff\1\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #24
class DFA24(DFA):
pass
# lookup tables for DFA #23
DFA23_eot = DFA.unpack(
u"\20\uffff"
)
DFA23_eof = DFA.unpack(
u"\20\uffff"
)
DFA23_min = DFA.unpack(
u"\1\5\17\uffff"
)
DFA23_max = DFA.unpack(
u"\1\154\17\uffff"
)
DFA23_accept = DFA.unpack(
u"\1\uffff\1\1\15\uffff\1\2"
)
DFA23_special = DFA.unpack(
u"\20\uffff"
)
DFA23_transition = [
DFA.unpack(u"\1\1\13\uffff\5\1\1\uffff\2\1\2\uffff\2\1\1\uffff\1"
u"\1\25\uffff\1\1\10\uffff\1\1\7\uffff\1\1\1\17\11\uffff\2\1\6\uffff"
u"\1\1\10\uffff\14\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #23
class DFA23(DFA):
pass
# lookup tables for DFA #28
DFA28_eot = DFA.unpack(
u"\40\uffff"
)
DFA28_eof = DFA.unpack(
u"\40\uffff"
)
DFA28_min = DFA.unpack(
u"\1\5\37\uffff"
)
DFA28_max = DFA.unpack(
u"\1\154\37\uffff"
)
DFA28_accept = DFA.unpack(
u"\1\uffff\1\2\35\uffff\1\1"
)
DFA28_special = DFA.unpack(
u"\40\uffff"
)
DFA28_transition = [
DFA.unpack(u"\1\1\4\uffff\1\37\6\uffff\5\1\1\uffff\6\1\1\uffff\1"
u"\1\25\uffff\1\1\10\uffff\2\1\1\uffff\1\1\4\uffff\1\1\2\uffff\1"
u"\1\3\uffff\1\1\3\uffff\2\1\2\uffff\1\1\3\uffff\2\1\1\uffff\22\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #28
class DFA28(DFA):
pass
# lookup tables for DFA #29
DFA29_eot = DFA.unpack(
u"\37\uffff"
)
DFA29_eof = DFA.unpack(
u"\37\uffff"
)
DFA29_min = DFA.unpack(
u"\1\5\36\uffff"
)
DFA29_max = DFA.unpack(
u"\1\154\36\uffff"
)
DFA29_accept = DFA.unpack(
u"\1\uffff\1\2\1\1\34\uffff"
)
DFA29_special = DFA.unpack(
u"\37\uffff"
)
DFA29_transition = [
DFA.unpack(u"\1\2\13\uffff\5\2\1\uffff\6\2\1\uffff\1\2\25\uffff"
u"\1\2\10\uffff\2\2\1\uffff\1\1\4\uffff\1\2\2\uffff\1\2\3\uffff\1"
u"\2\3\uffff\2\2\2\uffff\1\2\3\uffff\2\2\1\uffff\22\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #29
class DFA29(DFA):
pass
# lookup tables for DFA #36
DFA36_eot = DFA.unpack(
u"\13\uffff"
)
DFA36_eof = DFA.unpack(
u"\1\3\12\uffff"
)
DFA36_min = DFA.unpack(
u"\1\36\1\0\11\uffff"
)
DFA36_max = DFA.unpack(
u"\1\130\1\0\11\uffff"
)
DFA36_accept = DFA.unpack(
u"\2\uffff\1\1\1\2\7\uffff"
)
DFA36_special = DFA.unpack(
u"\1\uffff\1\0\11\uffff"
)
DFA36_transition = [
DFA.unpack(u"\1\3\36\uffff\1\3\1\uffff\1\3\6\uffff\1\3\17\uffff"
u"\2\2\1\1"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #36
class DFA36(DFA):
pass
def specialStateTransition(self_, s, input):
# convince pylint that my self_ magic is ok ;)
# pylint: disable-msg=E0213
# pretend we are a member of the recognizer
# thus semantic predicates can be evaluated
self = self_.recognizer
_s = s
if s == 0:
LA36_1 = input.LA(1)
index36_1 = input.index()
input.rewind()
s = -1
if (self.synpred61_sol()):
s = 2
elif (True):
s = 3
input.seek(index36_1)
if s >= 0:
return s
if self._state.backtracking >0:
raise BacktrackingFailed
nvae = NoViableAltException(self_.getDescription(), 36, _s, input)
self_.error(nvae)
raise nvae
# lookup tables for DFA #45
DFA45_eot = DFA.unpack(
u"\12\uffff"
)
DFA45_eof = DFA.unpack(
u"\1\uffff\1\3\10\uffff"
)
DFA45_min = DFA.unpack(
u"\2\36\10\uffff"
)
DFA45_max = DFA.unpack(
u"\2\130\10\uffff"
)
DFA45_accept = DFA.unpack(
u"\2\uffff\1\1\1\2\6\uffff"
)
DFA45_special = DFA.unpack(
u"\12\uffff"
)
DFA45_transition = [
DFA.unpack(u"\1\3\36\uffff\1\3\30\uffff\2\2\1\1"),
DFA.unpack(u"\1\2\23\uffff\1\3\6\uffff\1\3\3\uffff\1\2\1\uffff"
u"\1\3\6\uffff\1\3\21\uffff\1\2"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #45
class DFA45(DFA):
pass
# lookup tables for DFA #46
DFA46_eot = DFA.unpack(
u"\44\uffff"
)
DFA46_eof = DFA.unpack(
u"\1\uffff\1\5\42\uffff"
)
DFA46_min = DFA.unpack(
u"\1\21\1\6\42\uffff"
)
DFA46_max = DFA.unpack(
u"\1\144\1\u0083\42\uffff"
)
DFA46_accept = DFA.unpack(
u"\2\uffff\1\2\1\3\1\4\1\1\1\5\35\uffff"
)
DFA46_special = DFA.unpack(
u"\44\uffff"
)
DFA46_transition = [
DFA.unpack(u"\5\5\10\uffff\1\2\36\uffff\1\2\16\uffff\1\4\3\uffff"
u"\1\1\3\uffff\1\3\3\uffff\1\2\10\uffff\4\5"),
DFA.unpack(u"\4\5\3\uffff\1\5\2\uffff\1\6\15\uffff\1\5\23\uffff"
u"\2\5\1\uffff\5\5\2\uffff\2\5\1\uffff\2\5\4\uffff\2\5\12\uffff\3"
u"\5\2\uffff\3\5\15\uffff\4\5\3\uffff\27\5"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #46
class DFA46(DFA):
pass
# lookup tables for DFA #48
DFA48_eot = DFA.unpack(
u"\55\uffff"
)
DFA48_eof = DFA.unpack(
u"\1\1\54\uffff"
)
DFA48_min = DFA.unpack(
u"\1\6\12\uffff\1\5\22\uffff\16\0\1\uffff"
)
DFA48_max = DFA.unpack(
u"\1\u0083\12\uffff\1\154\22\uffff\16\0\1\uffff"
)
DFA48_accept = DFA.unpack(
u"\1\uffff\1\2\52\uffff\1\1"
)
DFA48_special = DFA.unpack(
u"\36\uffff\1\0\1\1\1\2\1\3\1\4\1\5\1\6\1\7\1\10\1\11\1\12\1\13"
u"\1\14\1\15\1\uffff"
)
DFA48_transition = [
DFA.unpack(u"\4\1\3\uffff\1\1\20\uffff\1\1\23\uffff\2\1\1\uffff"
u"\5\1\2\uffff\2\1\1\uffff\2\1\4\uffff\2\1\12\uffff\1\13\2\1\2\uffff"
u"\3\1\15\uffff\4\1\3\uffff\27\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\50\13\uffff\5\53\1\uffff\1\45\1\47\2\uffff\2\46"
u"\1\uffff\1\51\25\uffff\1\44\10\uffff\1\51\7\uffff\1\37\12\uffff"
u"\1\53\1\52\1\54\5\uffff\1\51\10\uffff\4\53\1\36\2\40\2\41\2\42"
u"\1\43"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"\1\uffff"),
DFA.unpack(u"")
]
# class definition for DFA #48
class DFA48(DFA):
pass
def specialStateTransition(self_, s, input):
# convince pylint that my self_ magic is ok ;)
# pylint: disable-msg=E0213
# pretend we are a member of the recognizer
# thus semantic predicates can be evaluated
self = self_.recognizer
_s = s
if s == 0:
LA48_30 = input.LA(1)
index48_30 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_30)
if s >= 0:
return s
elif s == 1:
LA48_31 = input.LA(1)
index48_31 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_31)
if s >= 0:
return s
elif s == 2:
LA48_32 = input.LA(1)
index48_32 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_32)
if s >= 0:
return s
elif s == 3:
LA48_33 = input.LA(1)
index48_33 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_33)
if s >= 0:
return s
elif s == 4:
LA48_34 = input.LA(1)
index48_34 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_34)
if s >= 0:
return s
elif s == 5:
LA48_35 = input.LA(1)
index48_35 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_35)
if s >= 0:
return s
elif s == 6:
LA48_36 = input.LA(1)
index48_36 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_36)
if s >= 0:
return s
elif s == 7:
LA48_37 = input.LA(1)
index48_37 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_37)
if s >= 0:
return s
elif s == 8:
LA48_38 = input.LA(1)
index48_38 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_38)
if s >= 0:
return s
elif s == 9:
LA48_39 = input.LA(1)
index48_39 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_39)
if s >= 0:
return s
elif s == 10:
LA48_40 = input.LA(1)
index48_40 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_40)
if s >= 0:
return s
elif s == 11:
LA48_41 = input.LA(1)
index48_41 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_41)
if s >= 0:
return s
elif s == 12:
LA48_42 = input.LA(1)
index48_42 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_42)
if s >= 0:
return s
elif s == 13:
LA48_43 = input.LA(1)
index48_43 = input.index()
input.rewind()
s = -1
if (self.synpred76_sol()):
s = 44
elif (True):
s = 1
input.seek(index48_43)
if s >= 0:
return s
if self._state.backtracking >0:
raise BacktrackingFailed
nvae = NoViableAltException(self_.getDescription(), 48, _s, input)
self_.error(nvae)
raise nvae
# lookup tables for DFA #47
DFA47_eot = DFA.unpack(
u"\20\uffff"
)
DFA47_eof = DFA.unpack(
u"\20\uffff"
)
DFA47_min = DFA.unpack(
u"\1\5\17\uffff"
)
DFA47_max = DFA.unpack(
u"\1\154\17\uffff"
)
DFA47_accept = DFA.unpack(
u"\1\uffff\1\1\15\uffff\1\2"
)
DFA47_special = DFA.unpack(
u"\20\uffff"
)
DFA47_transition = [
DFA.unpack(u"\1\1\13\uffff\5\1\1\uffff\2\1\2\uffff\2\1\1\uffff\1"
u"\1\25\uffff\1\1\10\uffff\1\1\7\uffff\1\1\12\uffff\2\1\1\17\5\uffff"
u"\1\1\10\uffff\14\1"),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u""),
DFA.unpack(u"")
]
# class definition for DFA #47
class DFA47(DFA):
pass
# lookup tables for DFA #49
DFA49_eot = DFA.unpack(
u"\41\uffff"
| |
import os
import sys
import cv2
from core.bbreg.bbreg_nn_klloss import BBRegNN
from net.loss.ghmseloss import GHMSE_Loss
from net.loss.mmd import mmd
from tracking.options import opts
import numpy as np
sys.path.append(os.path.abspath('../'))
from torch.autograd import Variable
from core.trackers.tracker_base import tracker
from net.classifier.fc_hf import FC_HF
from net.classifier.HessianFree import HessianFree
from net.classifier.Lookahead import Lookahead
from core.samples.samples import Memory
from net.wae64 import WAE64
from core.samples.sample_generator import SampleGenerator, gen_bboxes
import torch
from core.utils import shuffleTensor, sample_bbox, overlap_ratio
class DCFT_FCHF(tracker):
def __init__(self, frame=None, init_bbox=None, gt=None):
tracker.__init__(self, frame, init_bbox)
# Generate sequence config
self.first_frame = frame
self.init_bbox = init_bbox
self.current_bbox = init_bbox
self.img_sz = (frame.shape[1], frame.shape[0])
self.gt = gt
######### Encoder ##################
self.encoder = WAE64().cuda().eval()
# classifer and it's delayed updated brothers
self.classifier = FC_HF(in_dim=opts['in_dim'], l_dim=opts['l_dim']).cuda()
# checkpt_clc = torch.load("../net/train/pretrain_classifier/model_best.pth.tar")
# self.classifier.load_state_dict(checkpt_clc['state_dict'])
# self.classifier_delay = FC_HF(in_dim=opts['in_dim'], l_dim=opts['l_dim']).cuda()
self.update_step = 0
# self.criterion = torch.nn.MSELoss()
# self.criterion = GHMR_Loss(bins=30, alpha=0, mu=0.02)
self.criterion = GHMSE_Loss(bins=30, alpha=0, mu=0.02)
# self.optimizer = Adam(self.classifier.parameters())
base_opt = torch.optim.Adam(self.classifier.parameters(), lr=1e-3, betas=(0.9, 0.999)) # Any optimizer
self.optimizer = Lookahead(base_opt, k=5, alpha=0.5) # Initialize Lookahead
# defined to generate the candidate damples
self.sample_generator = SampleGenerator('gaussian', self.img_sz, opts['trans'], opts['scale'])
## Bounding box regression
# self.bbreg = BBRegressor(self.img_sz)
self.bbreg = BBRegNN(self.img_sz)
# how many frames did we track
self.step = 1
# if > 3, then set the opts['long_interval'] = 15
self.fail_num = 0
## Sampler
self.memory = Memory(extractor=self.encoder)
## Trace prediction at failure
init_cx, init_cy = int(self.init_bbox[0] + self.init_bbox[2] / 2), int(self.init_bbox[1] + self.init_bbox[3] / 2)
self.predictedCoords = np.array((init_cx, init_cy), np.int)
# hm
def train_cls(self, pos_feats, neg_feats, params, iter=opts['init_freq']):
pos_ious = np.concatenate(params[0])
neg_ious = np.concatenate(params[1])
self.classifier.train()
batch_pos = opts['batch_pos']
batch_neg = opts['batch_neg']
batch_test = opts['batch_test']
batch_neg_cand = max(opts['batch_neg_cand'], batch_neg)
pos_idx = np.random.permutation(pos_feats.size(0))
neg_idx = np.random.permutation(neg_feats.size(0))
while (len(pos_idx) < batch_pos * iter):
pos_idx = np.concatenate([pos_idx, np.random.permutation(pos_feats.size(0))])
while (len(neg_idx) < batch_neg_cand * iter):
neg_idx = np.concatenate([neg_idx, np.random.permutation(neg_feats.size(0))])
pos_pointer = 0
neg_pointer = 0
for i in range(iter):
self.optimizer.zero_grad()
# select pos idx
pos_next = pos_pointer + batch_pos
pos_cur_idx = pos_idx[pos_pointer:pos_next]
pos_cur_idx = pos_feats.new(pos_cur_idx).long()
pos_pointer = pos_next
# select neg idx
neg_next = neg_pointer + batch_neg_cand
neg_cur_idx = neg_idx[neg_pointer:neg_next]
neg_cur_idx = neg_feats.new(neg_cur_idx).long()
neg_pointer = neg_next
# create batch
batch_pos_feats = pos_feats[pos_cur_idx]
batch_pos_ious = pos_ious[pos_cur_idx.cpu().numpy()]
batch_neg_feats = neg_feats[neg_cur_idx]
batch_neg_ious = neg_ious[neg_cur_idx.cpu().numpy()]
# hard negative mining
if batch_neg_cand > batch_neg:
self.classifier.eval()
for start in range(0, batch_neg_cand, batch_test):
end = min(start + batch_test, batch_neg_cand)
with torch.no_grad():
score, _ = self.classifier.forward(batch_neg_feats[start:end])
if start == 0:
neg_cand_score = score.detach()[:, 1].clone()
else:
neg_cand_score = torch.cat((neg_cand_score, score.detach()[:, 1].clone()), 0)
_, top_idx = neg_cand_score.topk(batch_neg)
batch_neg_feats = batch_neg_feats[top_idx]
batch_neg_ious = batch_neg_ious[top_idx.cpu().numpy()]
self.classifier.train()
code, target = shuffleTensor(batch_pos_feats, batch_neg_feats, [batch_pos_ious, batch_neg_ious])
# pos_old_code = code[target[:, 1].gt(target[:, 0]), :]
# neg_old_code = code[target[:, 0].gt(target[:, 1]), :]
score, new_code = self.classifier(code)
# calc classifier loss
classifier_loss = self.criterion(score, target)
pos_new_code = new_code[target[:, 1].gt(target[:, 0]), :]
neg_new_code = new_code[target[:, 0].gt(target[:, 1]), :]
# calc two distribution's mmd loss
# pos_code_loss = torch.abs(mmd(pos_new_code, pos_old_code, z_var=2))
# neg_code_loss = torch.abs(mmd(neg_new_code, neg_old_code, z_var=2))
# Dimension alignment
idx = np.random.permutation(pos_new_code.size(0))
# pn_old_code_loss = torch.abs(mmd(batch_pos_feats, batch_neg_feats[idx], z_var=2))
pn_new_code_loss = torch.abs(mmd(pos_new_code, neg_new_code[idx], z_var=2))
# pn_old_code_eu = torch.sqrt(
# torch.sum((torch.cat([batch_pos_feats, batch_pos_feats, batch_pos_feats]) - batch_neg_feats).pow(2)))
# pn_new_code_eu = torch.sqrt(
# torch.sum((torch.cat([pos_new_code, pos_new_code, pos_new_code]) - neg_new_code).pow(2)))
# m = pn_new_code_loss - pn_old_code_loss
# m > 0 : reward
# m < 0: punish torch.log(1/pn_new_code_loss) +
# loss = classifier_loss / (1 + pn_new_code_loss) + (pos_code_loss + neg_code_loss) / (0.1 + torch.exp(m + 2))
loss = classifier_loss - torch.log(pn_new_code_loss)
# loss = classifier_loss - torch.log(pn_new_code_eu)
# writer.add_scalar("{}/distance新欧几里得".format(opts["seq_name"]), pn_new_code_eu, (self.step-1)*iter + i)
# writer.add_scalar("{}/distance旧欧几里得".format(opts["seq_name"]), pn_old_code_eu, (self.step-1)*iter + i)
# print((self.step-1)*iter + i)
#
# writer.add_scalar("{}/distance新MMD".format(opts["seq_name"]), pn_new_code_loss, self.step)
# writer.add_scalar("{}/distance旧MMD".format(opts["seq_name"]), pn_old_code_loss, self.step)
self.optimizer.zero_grad()
loss.backward()
if 'grad_clip' in opts:
torch.nn.utils.clip_grad_norm_(self.classifier.parameters(), opts['grad_clip'])
self.optimizer.step()
# delayed update classifier_delay
# if self.update_step % opts['delay_freq'] == 0:
# self.classifier_delay.load_state_dict(deepcopy(self.classifier.state_dict()))
self.update_step += 1
# Hard negative and hard negtive positive
def train_cls_ohem(self, pos_feats, neg_feats, params, iter=opts['init_freq']):
pos_ious = np.concatenate(params[0])
neg_ious = np.concatenate(params[1])
self.classifier.train()
batch_pos = opts['batch_pos']
batch_neg = opts['batch_neg']
batch_test_neg = opts['batch_test_neg']
batch_test_pos = opts['batch_test_pos']
batch_neg_cand = max(opts['batch_neg_cand'], batch_neg)
batch_pos_cand = max(opts['batch_pos_cand'], batch_pos)
pos_idx = np.random.permutation(pos_feats.size(0))
neg_idx = np.random.permutation(neg_feats.size(0))
while (len(pos_idx) < batch_pos_cand * iter):
pos_idx = np.concatenate([pos_idx, np.random.permutation(pos_feats.size(0))])
while (len(neg_idx) < batch_neg_cand * iter):
neg_idx = np.concatenate([neg_idx, np.random.permutation(neg_feats.size(0))])
pos_pointer = 0
neg_pointer = 0
for i in range(iter):
self.optimizer.zero_grad()
# select pos idx
pos_next = pos_pointer + batch_pos_cand # batch_pos
pos_cur_idx = pos_idx[pos_pointer:pos_next]
pos_cur_idx = pos_feats.new(pos_cur_idx).long()
pos_pointer = pos_next
# select neg idx
neg_next = neg_pointer + batch_neg_cand
neg_cur_idx = neg_idx[neg_pointer:neg_next]
neg_cur_idx = neg_feats.new(neg_cur_idx).long()
neg_pointer = neg_next
# create batch
batch_pos_feats = pos_feats[pos_cur_idx]
batch_pos_ious = pos_ious[pos_cur_idx.cpu().numpy()]
batch_neg_feats = neg_feats[neg_cur_idx]
batch_neg_ious = neg_ious[neg_cur_idx.cpu().numpy()]
# hard negative samples
if batch_neg_cand > batch_neg:
self.classifier.eval()
for start in range(0, batch_neg_cand, batch_test_neg):
end = min(start + batch_test_neg, batch_neg_cand)
with torch.no_grad():
score, _ = self.classifier.forward(batch_neg_feats[start:end])
if start == 0:
neg_cand_score = score.detach()[:, 1].clone()
else:
neg_cand_score = torch.cat((neg_cand_score, score.detach()[:, 1].clone()), 0)
_, top_idx = neg_cand_score.topk(batch_neg)
batch_neg_feats = batch_neg_feats[top_idx]
batch_neg_ious = batch_neg_ious[top_idx.cpu().numpy()]
# hard positive samples
if batch_pos_cand > batch_pos:
self.classifier.eval()
for start in range(0, batch_pos_cand, batch_test_pos):
end = min(start + batch_test_pos, batch_pos_cand)
with torch.no_grad():
score, _ = self.classifier.forward(batch_pos_feats[start:end])
if start == 0:
pos_cand_score = score.detach()[:, 1].clone()
else:
pos_cand_score = torch.cat((pos_cand_score, score.detach()[:, 1].clone()), 0)
_, top_idx = pos_cand_score.topk(batch_pos, largest=False)
batch_pos_feats = batch_pos_feats[top_idx]
batch_pos_ious = batch_pos_ious[top_idx.cpu().numpy()]
self.classifier.train()
code, target = shuffleTensor(batch_pos_feats, batch_neg_feats, [batch_pos_ious, batch_neg_ious])
pos_old_code = code[target[:, 1].gt(target[:, 0]), :]
neg_old_code = code[target[:, 0].gt(target[:, 1]), :]
score, new_code = self.classifier(code)
# calc classifier loss
classifier_loss = self.criterion(score, target)
pos_new_code = new_code[target[:, 1].gt(target[:, 0]), :]
neg_new_code = new_code[target[:, 0].gt(target[:, 1]), :]
# calc two distribution's mmd loss
pos_code_loss = torch.abs(mmd(pos_new_code, pos_old_code, z_var=2))
neg_code_loss = torch.abs(mmd(neg_new_code, neg_old_code, z_var=2))
# Dimension alignment
idx = np.random.permutation(pos_new_code.size(0))
pn_old_code_loss = torch.abs(mmd(batch_pos_feats, batch_neg_feats[idx], z_var=2))
pn_new_code_loss = torch.abs(mmd(pos_new_code, neg_new_code[idx], z_var=2))
# pn_old_code_eu = torch.sqrt(
# torch.sum((torch.cat([batch_pos_feats, batch_pos_feats, batch_pos_feats]) - batch_neg_feats).pow(2)))
# pn_new_code_eu = torch.sqrt(
# torch.sum((torch.cat([pos_new_code, pos_new_code, pos_new_code]) - neg_new_code).pow(2)))
m = pn_new_code_loss - pn_old_code_loss
# m > 0 : reward
# m < 0: punish torch.log(1/pn_new_code_loss) +
# loss = classifier_loss / (1 + pn_new_code_loss) + (pos_code_loss + neg_code_loss) / (0.1 + torch.exp(m + 2))
loss = classifier_loss - torch.log(pn_new_code_loss)
# loss = classifier_loss - torch.log(pn_new_code_eu)
self.optimizer.zero_grad()
loss.backward()
if 'grad_clip' in opts:
torch.nn.utils.clip_grad_norm_(self.classifier.parameters(), opts['grad_clip'])
self.optimizer.step()
# delayed update classifier_delay
# if self.update_step % opts['delay_freq'] == 0:
# self.classifier_delay.load_state_dict(deepcopy(self.classifier.state_dict()))
self.update_step += 1
def init_bbreg(self):
# Train bbox regressor
# bbreg_rects = gen_bboxes(
# SampleGenerator('uniform', self.img_sz, opts['trans_bbreg'], opts['scale_bbreg'], opts['aspect_bbreg']),
# self.init_bbox, opts['n_bbreg'], opts['overlap_bbreg'])
# bbreg_feats = self.memory.extract_feats(self.first_frame, bbreg_rects)
self.bbreg = BBRegNN(self.img_sz)
# self.bbreg.train(bbreg_feats, bbreg_rects, self.init_bbox)
# self.bbreg = BBRegressor(self.img_sz)
# self.bbreg.train(bbreg_feats, bbreg_rects, self.init_bbox)
def stage_two(self, frame=None, candidate=None):
global bbreg_bbox
old_bbox = self.current_bbox
old_bbreg_bbox = self.bbreg_bbox
# Candidate locations & it's scores
rects = gen_bboxes(self.sample_generator, old_bbox, opts['n_samples'])
feats = self.memory.extract_feats(frame, rects)
self.classifier.eval()
sample_scores1, _ = self.classifier(feats) # last frame classifier
sample_scores1 = sample_scores1[:, 1].detach()
sample_scores = sample_scores1 # * opts['score_ratio'] + sample_scores2 * (1 - opts['score_ratio'])
top_scores, top_idx = sample_scores.topk(5)
top_idx = top_idx.cpu().numpy()
target_score = top_scores.mean()
target_bbox = rects[top_idx].mean(axis=0)
success = target_score > opts['success_thr']
# Expand search area at failure
if success:
self.sample_generator.set_trans(opts['trans'])
else:
self.sample_generator.expand_trans(opts['trans_limit'])
# Bbox regression
if success:
self.fail_num = 0
bbreg_samples = rects[top_idx]
bbreg_feats = self.memory.extract_feats(frame, bbreg_samples)
bbreg_samples = self.bbreg.predict(bbreg_feats, bbreg_samples)
bbreg_bbox = bbreg_samples.mean(axis=0)
else:
self.fail_num += 1
pos_feats, neg_feats, pos_ious, neg_ious = self.memory.get_features(type='supdate',
samples=opts['n_frames_update'])
self.train_cls(pos_feats, neg_feats, params=[pos_ious, neg_ious], iter=opts['update_freq'])
# self.train_cls_ohem(pos_feats, neg_feats, params=[pos_ious, neg_ious], iter=opts['update_freq'])
# if not success, then search in a larger range
rects = np.random.permutation(np.concatenate([
gen_bboxes(self.sample_generator, old_bbreg_bbox, opts['n_samples'] * 3),
gen_bboxes(SampleGenerator('uniform', self.img_sz, opts['trans_neg_init'], opts['scale_neg_init']),
old_bbox, opts['n_samples'], opts['overlap_neg_init']),
gen_bboxes(SampleGenerator('uniform', self.img_sz, opts['trans_neg_init'], opts['scale_neg_init']),
old_bbox, opts['n_samples'], opts['overlap_neg_init']),
]))
rects = np.random.permutation(rects)
feats = self.memory.extract_feats(frame, rects)
sample_scores1, _ = self.classifier(feats)
sample_scores1 = sample_scores1[:, 1].detach()
sample_scores = sample_scores1 # * opts['score_ratio'] + sample_scores2 * (1 - opts['score_ratio'])
top_scores, top_idx = sample_scores.topk(5)
top_idx = top_idx.cpu().numpy()
target_score = top_scores.mean()
success = target_score > opts['success_thr'] # and sample_scores.mean().item() > 0.1
if success:
target_bbox = rects[top_idx].mean(axis=0)
bbreg_samples = rects[top_idx]
bbreg_feats = self.memory.extract_feats(frame, rects)
bbreg_samples = self.bbreg.predict(bbreg_feats[top_idx], bbreg_samples)
bbreg_bbox = bbreg_samples.mean(axis=0)
# Still not Success, use kalman filter's results
if not success:
target_bbox = old_bbox
bbreg_bbox = old_bbreg_bbox
return target_bbox, bbreg_bbox, target_score
# Data collect
if | |
- 10*x**2 + 1, x, domain='QQ'))
assert Poly(x**2-3, extension=sqrt(2)).sqf_norm() == \
(1, Poly(x**2 - 2*sqrt(2)*x - 1, x, extension=sqrt(2)),
Poly(x**4 - 10*x**2 + 1, x, domain='QQ'))
def test_sqf():
f = x**5 - x**3 - x**2 + 1
g = x**3 + 2*x**2 + 2*x + 1
h = x - 1
p = x**4 + x**3 - x - 1
F, G, H, P = map(Poly, (f, g, h, p))
assert F.sqf_part() == P
assert sqf_part(f) == p
assert sqf_part(f, x) == p
assert sqf_part(f, (x,)) == p
assert sqf_part(F) == P
assert sqf_part(f, polys=True) == P
assert sqf_part(F, polys=False) == p
assert F.sqf_list() == (1, [(G, 1), (H, 2)])
assert sqf_list(f) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, x) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, (x,)) == (1, [(g, 1), (h, 2)])
assert sqf_list(F) == (1, [(G, 1), (H, 2)])
assert sqf_list(f, polys=True) == (1, [(G, 1), (H, 2)])
assert sqf_list(F, polys=False) == (1, [(g, 1), (h, 2)])
assert sqf_list(f, include=True) == [(g, 1), (h, 2)]
raises(GeneratorsNeeded, "sqf_part(4)")
raises(GeneratorsNeeded, "sqf_list(4)")
assert sqf(1) == 1
assert sqf(1, frac=True) == 1
assert sqf(f) == g*h**2
assert sqf(f, x) == g*h**2
assert sqf(f, (x,)) == g*h**2
d = x**2 + y**2
assert sqf(f/d, frac=True) == (g*h**2)/d
assert sqf(f/d, x, frac=True) == (g*h**2)/d
assert sqf(f/d, (x,), frac=True) == (g*h**2)/d
assert sqf(x - 1) == x - 1
assert sqf(-x - 1) == -x - 1
assert sqf(x - 1) != Mul(1, x - 1, evaluate=False)
assert sqf(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert sqf((6*x - 10)/(3*x - 6), frac=True) == S(2)/3*((3*x - 5)/(x - 2))
def test_factor():
f = x**5 - x**3 - x**2 + 1
u = x + 1
v = x - 1
w = x**2 + x + 1
F, U, V, W = map(Poly, (f, u, v, w))
assert F.factor_list() == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, x) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, (x,)) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(F) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(f, polys=True) == (1, [(U, 1), (V, 2), (W, 1)])
assert factor_list(F, polys=False) == (1, [(u, 1), (v, 2), (w, 1)])
assert factor_list(f, include=True) == [(u, 1), (v, 2), (w, 1)]
raises(GeneratorsNeeded, "factor_list(4)")
assert factor(1) == 1
assert factor(1, frac=True) == 1
assert factor(f) == u*v**2*w
assert factor(f, x) == u*v**2*w
assert factor(f, (x,)) == u*v**2*w
g, p, q = x**2 - y**2, x - y, x + y
assert factor(f/g, frac=True) == (u*v**2*w)/(p*q)
assert factor(f/g, x, frac=True) == (u*v**2*w)/(p*q)
assert factor(f/g, (x,), frac=True) == (u*v**2*w)/(p*q)
f = Poly(sin(1)*x + 1, x, domain=EX)
assert f.factor_list() == (1, [(f, 1)])
f = x**4 + 1
assert factor(f) == f
assert factor(f, extension=I) == (x**2 - I)*(x**2 + I)
assert factor(f, gaussian=True) == (x**2 - I)*(x**2 + I)
assert factor(f, extension=sqrt(2)) == (x**2 + sqrt(2)*x + 1)*(x**2 - sqrt(2)*x + 1)
f = x**2 + 2*sqrt(2)*x + 2
assert factor(f, extension=sqrt(2)) == (x + sqrt(2))**2
assert factor(f**3, extension=sqrt(2)) == (x + sqrt(2))**6
assert factor(x**2 - 2*y**2, extension=sqrt(2)) == \
(x + sqrt(2)*y)*(x - sqrt(2)*y)
assert factor(2*x**2 - 4*y**2, extension=sqrt(2)) == \
2*((x + sqrt(2)*y)*(x - sqrt(2)*y))
assert factor(x - 1) == x - 1
assert factor(-x - 1) == -x - 1
assert factor(x - 1) != Mul(1, x - 1, evaluate=False)
assert factor(6*x - 10) == Mul(2, 3*x - 5, evaluate=False)
assert factor((6*x - 10)/(3*x - 6), frac=True) == S(2)/3*((3*x - 5)/(x - 2))
assert factor(x**11 + x + 1, modulus=65537, symmetric=True) == \
(x**2 + x + 1)*(x**9 - x**8 + x**6 - x**5 + x**3 - x** 2 + 1)
assert factor(x**11 + x + 1, modulus=65537, symmetric=False) == \
(x**2 + x + 1)*(x**9 + 65536*x**8 + x**6 + 65536*x**5 + x**3 + 65536*x** 2 + 1)
def test_intervals():
f = Poly((2*x/5 - S(17)/3)*(4*x + S(1)/257))
assert f.intervals(sqf=True) == [(-1, 0), (14, 15)]
assert f.intervals(sqf=False) == [((-1, 0), 1), ((14, 15), 1)]
assert f.intervals(eps=S(1)/10) == f.intervals(eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/100) == f.intervals(eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/1000) == f.intervals(eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert f.intervals(eps=S(1)/10000) == f.intervals(eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = (2*x/5 - S(17)/3)*(4*x + S(1)/257)
assert intervals(f, sqf=True) == [(-1, 0), (14, 15)]
assert intervals(f, sqf=False) == [((-1, 0), 1), ((14, 15), 1)]
assert intervals(f, eps=S(1)/10) == intervals(f, eps=0.1) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/100) == intervals(f, eps=0.01) == \
[((-S(1)/258, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/1000) == intervals(f, eps=0.001) == \
[((-S(1)/1005, 0), 1), ((S(85)/6, S(85)/6), 1)]
assert intervals(f, eps=S(1)/10000) == intervals(f, eps=0.0001) == \
[((-S(1)/1028, -S(1)/1028), 1), ((S(85)/6, S(85)/6), 1)]
f = Poly((x**2 - 2)*(x**2-3)**7*(x+1)*(7*x+3)**3)
assert f.intervals() == \
[((-2, -S(3)/2), 7), ((-S(3)/2, -1), 1),
((-1, -1), 1), ((-1, 0), 3),
((1, S(3)/2), 1), ((S(3)/2, 2), 7)]
raises(GeneratorsNeeded, "intervals(0)")
def test_nroots():
assert Poly(x**2 - 1, x).nroots() == [-1.0, 1.0]
assert Poly(x**2 + 1, x).nroots() == [-I, I]
roots, error = Poly(x**2 - 1, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
roots, error = Poly(x**2 + 1, x).nroots(error=True)
assert roots == [-I, I] and error < 1e25;
roots, error = Poly(x**2/3 - S(1)/3, x).nroots(error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
roots, error = Poly(x**2/3 + S(1)/3, x).nroots(error=True)
assert roots == [-I, I] and error < 1e25;
assert Poly(x**2 + 2*I, x).nroots() == [-1.0 + I, 1.0 - I]
assert Poly(x**2 + 2*I, x, extension=I).nroots() == [-1.0 + I, 1.0 - I]
assert Poly(0.2*x + 0.1).nroots() == [-0.5]
raises(DomainError, "Poly(x+y, x).nroots()")
raises(PolynomialError, "Poly(x+y).nroots()")
assert nroots(x**2 - 1) == [-1.0, 1.0]
roots, error = nroots(x**2 - 1, error=True)
assert roots == [-1.0, 1.0] and error < 1e25;
raises(GeneratorsNeeded, "nroots(0)")
def test_cancel():
assert cancel(0) == 0
assert cancel(7) == 7
assert cancel(x) == x
assert cancel(oo) == oo
assert cancel((2, 3)) == (1, 2, 3)
assert cancel((1, 0), x) == (1, 1, 0)
assert cancel((0, 1), x) == (1, 0, 1)
f, g, p, q = 4*x**2-4, 2*x-2, 2*x+2, 1
F, G, P, Q = [ Poly(u, x) for u in (f, g, p, q) ]
assert F.cancel(G) == (1, P, Q)
assert cancel((f, g)) == (1, p, q)
assert cancel((f, g), x) == (1, p, q)
assert cancel((f, g), (x,)) == (1, p, q)
assert cancel((F, G)) == (1, P, Q)
assert cancel((f, g), polys=True) == (1, P, Q)
assert cancel((F, G), polys=False) == (1, p, q)
f = (x**2 - 2)/(x + sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x - sqrt(2)
f = (x**2 - 2)/(x - sqrt(2))
assert cancel(f) == f
assert cancel(f, greedy=False) == x + sqrt(2)
assert cancel((x**2/4 - 1, x/2 - 1)) == (S(1)/2, x + 2, 1)
assert cancel((x**2-y)/(x-y)) == 1/(x - y)*(x**2 - y)
assert cancel((x**2-y**2)/(x-y), x) == x + y
assert cancel((x**2-y**2)/(x-y), y) == x + y
assert cancel((x**2-y**2)/(x-y)) == x + y
assert cancel((x**3-1)/(x**2-1)) == (x**2+x+1)/(x+1)
assert cancel((x**3/2-S(1)/2)/(x**2-1)) == (x**2+x+1)/(2*x+2)
assert cancel((exp(2*x) + 2*exp(x) + 1)/(exp(x) + 1)) == exp(x) + 1
f = Poly(x**2 - a**2, x)
g = Poly(x - a, x)
F = Poly(x + a, x)
G = Poly(1, x)
assert cancel((f, g)) == (1, F, G)
f = x**3 + (sqrt(2) - 2)*x**2 - (2*sqrt(2) + 3)*x - 3*sqrt(2)
g = x**2 - 2
assert cancel((f, g), extension=True) == (1, x**2 - 2*x - 3, x - sqrt(2))
def test_reduced():
raises(PolynomialError, "reduced(x, [x], x, modulus=3)")
f = 2*x**4 + y**2 - x**2 + y**3
G = [x**3 - x, y**3 - y]
Q = [2*x, 1]
r = x**2 + y**2 + y
assert | |
# -*- coding: utf-8 -*-
# $Id: btresolver.py 70517 2018-01-10 14:14:45Z vboxsync $
# pylint: disable=C0302
"""
Backtrace resolver using external debugging symbols and RTLdrFlt.
"""
__copyright__ = \
"""
Copyright (C) 2016-2017 Oracle Corporation
This file is part of VirtualBox Open Source Edition (OSE), as
available from http://www.virtualbox.org. This file is free software;
you can redistribute it and/or modify it under the terms of the GNU
General Public License (GPL) as published by the Free Software
Foundation, in version 2 as it comes in the "COPYING" file of the
VirtualBox OSE distribution. VirtualBox OSE is distributed in the
hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
The contents of this file may alternatively be used under the terms
of the Common Development and Distribution License Version 1.0
(CDDL) only, as it comes in the "COPYING.CDDL" file of the
VirtualBox OSE distribution, in which case the provisions of the
CDDL are applicable instead of those of the GPL.
You may elect to license modified versions of this file under the
terms and conditions of either the GPL or the CDDL or both.
"""
__version__ = "$Revision: 70517 $"
# Standard Python imports.
import os;
import re;
import shutil;
import subprocess;
# Validation Kit imports.
from common import utils;
def getRTLdrFltPath(asPaths):
"""
Returns the path to the RTLdrFlt tool looking in the provided paths
or None if not found.
"""
for sPath in asPaths:
for sDirPath, _, asFiles in os.walk(sPath):
if 'RTLdrFlt' in asFiles:
return os.path.join(sDirPath, 'RTLdrFlt');
return None;
class BacktraceResolverOs(object):
"""
Base class for all OS specific resolvers.
"""
def __init__(self, sScratchPath, sBuildRoot, fnLog = None):
self.sScratchPath = sScratchPath;
self.sBuildRoot = sBuildRoot;
self.fnLog = fnLog;
def log(self, sText):
"""
Internal logger callback.
"""
if self.fnLog is not None:
self.fnLog(sText);
class BacktraceResolverOsLinux(BacktraceResolverOs):
"""
Linux specific backtrace resolver.
"""
def __init__(self, sScratchPath, sBuildRoot, fnLog = None):
"""
Constructs a Linux host specific backtrace resolver.
"""
BacktraceResolverOs.__init__(self, sScratchPath, sBuildRoot, fnLog);
self.asDbgFiles = {};
def prepareEnv(self):
"""
Prepares the environment for annotating Linux reports.
"""
fRc = False;
try:
sDbgArchive = os.path.join(self.sBuildRoot, 'bin', 'VirtualBox-dbg.tar.bz2');
# Extract debug symbol archive if it was found.
if os.path.exists(sDbgArchive):
asMembers = utils.unpackFile(sDbgArchive, self.sScratchPath, self.fnLog,
self.fnLog);
if asMembers:
# Populate the list of debug files.
for sMember in asMembers:
if os.path.isfile(sMember):
self.asDbgFiles[os.path.basename(sMember)] = sMember;
fRc = True;
except:
self.log('Failed to setup debug symbols');
return fRc;
def cleanupEnv(self):
"""
Cleans up the environment.
"""
fRc = False;
try:
shutil.rmtree(self.sScratchPath, True);
fRc = True;
except:
pass;
return fRc;
def getDbgSymPathFromBinary(self, sBinary, sArch):
"""
Returns the path to file containing the debug symbols for the specified binary.
"""
_ = sArch;
sDbgFilePath = None;
try:
sDbgFilePath = self.asDbgFiles[sBinary];
except:
pass;
return sDbgFilePath;
def getBinaryListWithLoadAddrFromReport(self, asReport):
"""
Parses the given VM state report and returns a list of binaries and their
load address.
Returns a list if tuples containing the binary and load addres or an empty
list on failure.
"""
asListBinaries = [];
# Look for the line "Mapped address spaces:"
iLine = 0;
while iLine < len(asReport):
if asReport[iLine].startswith('Mapped address spaces:'):
break;
iLine += 1;
for sLine in asReport[iLine:]:
asCandidate = sLine.split();
if len(asCandidate) == 5 \
and asCandidate[0].startswith('0x') \
and asCandidate[1].startswith('0x') \
and asCandidate[2].startswith('0x') \
and (asCandidate[3] == '0x0' or asCandidate[3] == '0')\
and 'VirtualBox' in asCandidate[4]:
asListBinaries.append((asCandidate[0], os.path.basename(asCandidate[4])));
return asListBinaries;
class BacktraceResolverOsDarwin(BacktraceResolverOs):
"""
Darwin specific backtrace resolver.
"""
def __init__(self, sScratchPath, sBuildRoot, fnLog = None):
"""
Constructs a Linux host specific backtrace resolver.
"""
BacktraceResolverOs.__init__(self, sScratchPath, sBuildRoot, fnLog);
self.asDbgFiles = {};
def prepareEnv(self):
"""
Prepares the environment for annotating Darwin reports.
"""
fRc = False;
try:
#
# Walk the scratch path directory and look for .dSYM directories, building a
# list of them.
#
asDSymPaths = [];
for sDirPath, asDirs, _ in os.walk(self.sBuildRoot):
for sDir in asDirs:
if sDir.endswith('.dSYM'):
asDSymPaths.append(os.path.join(sDirPath, sDir));
# Expand the dSYM paths to full DWARF debug files in the next step
# and add them to the debug files dictionary.
for sDSymPath in asDSymPaths:
sBinary = os.path.basename(sDSymPath).strip('.dSYM');
self.asDbgFiles[sBinary] = os.path.join(sDSymPath, 'Contents', 'Resources',
'DWARF', sBinary);
fRc = True;
except:
self.log('Failed to setup debug symbols');
return fRc;
def cleanupEnv(self):
"""
Cleans up the environment.
"""
fRc = False;
try:
shutil.rmtree(self.sScratchPath, True);
fRc = True;
except:
pass;
return fRc;
def getDbgSymPathFromBinary(self, sBinary, sArch):
"""
Returns the path to file containing the debug symbols for the specified binary.
"""
# Hack to exclude executables as RTLdrFlt has some problems with it currently.
_ = sArch;
sDbgSym = None;
try:
sDbgSym = self.asDbgFiles[sBinary];
except:
pass;
if sDbgSym is not None and sDbgSym.endswith('.dylib'):
return sDbgSym;
return None;
def _getReportVersion(self, asReport):
"""
Returns the version of the darwin report.
"""
# Find the line starting with "Report Version:"
iLine = 0;
iVersion = 0;
while iLine < len(asReport):
if asReport[iLine].startswith('Report Version:'):
break;
iLine += 1;
if iLine < len(asReport):
# Look for the start of the number
sVersion = asReport[iLine];
iStartVersion = len('Report Version:');
iEndVersion = len(sVersion);
while iStartVersion < len(sVersion) \
and not sVersion[iStartVersion:iStartVersion+1].isdigit():
iStartVersion += 1;
while iEndVersion > 0 \
and not sVersion[iEndVersion-1:iEndVersion].isdigit():
iEndVersion -= 1;
iVersion = int(sVersion[iStartVersion:iEndVersion]);
else:
self.log('Couldn\'t find the report version');
return iVersion;
def _getListOfBinariesFromReportPreSierra(self, asReport):
"""
Returns a list of loaded binaries with their load address obtained from
a pre Sierra report.
"""
asListBinaries = [];
# Find the line starting with "Binary Images:"
iLine = 0;
while iLine < len(asReport):
if asReport[iLine].startswith('Binary Images:'):
break;
iLine += 1;
if iLine < len(asReport):
# List starts after that
iLine += 1;
# A line for a loaded binary looks like the following:
# 0x100042000 - 0x100095fff +VBoxDDU.dylib (4.3.15) <EB19C44D-F882-0803-DBDD-9995723111B7> /Application...
# We need the start address and the library name.
# To distinguish between our own libraries and ones from Apple we check whether the path at the end starts with
# /Applications/VirtualBox.app/Contents/MacOS
oRegExpPath = re.compile(r'/VirtualBox.app/Contents/MacOS');
oRegExpAddr = re.compile(r'0x\w+');
oRegExpBinPath = re.compile(r'VirtualBox.app/Contents/MacOS/\S*');
while iLine < len(asReport):
asMatches = oRegExpPath.findall(asReport[iLine]);
if asMatches:
# Line contains the path, extract start address and path to binary
sAddr = oRegExpAddr.findall(asReport[iLine]);
sPath = oRegExpBinPath.findall(asReport[iLine]);
if sAddr and sPath:
# Construct the path in into the build cache containing the debug symbols
oRegExp = re.compile(r'\w+\.{0,1}\w*$');
sFilename = oRegExp.findall(sPath[0]);
asListBinaries.append((sAddr[0], sFilename[0]));
else:
break; # End of image list
iLine += 1;
else:
self.log('Couldn\'t find the list of loaded binaries in the given report');
return asListBinaries;
def _getListOfBinariesFromReportSierra(self, asReport):
"""
Returns a list of loaded binaries with their load address obtained from
a Sierra+ report.
"""
asListBinaries = [];
# A line for a loaded binary looks like the following:
# 4 VBoxXPCOMIPCC.dylib 0x00000001139f17ea 0x1139e4000 + 55274
# We need the start address and the library name.
# To distinguish between our own libraries and ones from Apple we check whether the library
# name contains VBox or VirtualBox
iLine = 0;
while iLine < len(asReport):
asStackTrace = asReport[iLine].split();
# Check whether the line is made up of 6 elements separated by whitespace
# and the first one is a number.
if len(asStackTrace) == 6 and asStackTrace[0].isdigit() \
and (asStackTrace[1].find('VBox') != -1 or asStackTrace[1].find('VirtualBox') != -1) \
and asStackTrace[3].startswith('0x'):
# Check whether the library is already in our list an only add new ones
fFound = False;
for _, sLibrary in asListBinaries:
if asStackTrace[1] == sLibrary:
fFound = True;
break;
if not fFound:
asListBinaries.append((asStackTrace[3], asStackTrace[1]));
iLine += 1;
return asListBinaries;
def getBinaryListWithLoadAddrFromReport(self, asReport):
"""
Parses the given VM state report and returns a list of binaries and their
load address.
Returns a list if tuples containing the binary and load addres or an empty
list on failure.
"""
asListBinaries = [];
iVersion = self._getReportVersion(asReport);
if iVersion > 0:
if iVersion <= 11:
self.log('Pre Sierra Report');
asListBinaries = self._getListOfBinariesFromReportPreSierra(asReport);
elif iVersion == 12:
self.log('Sierra report');
asListBinaries = self._getListOfBinariesFromReportSierra(asReport);
else:
self.log('Unsupported report version %s' % (iVersion, ));
return asListBinaries;
class BacktraceResolverOsSolaris(BacktraceResolverOs):
"""
Solaris specific backtrace resolver.
"""
def __init__(self, sScratchPath, sBuildRoot, fnLog = None):
"""
Constructs a Linux host specific backtrace resolver.
"""
BacktraceResolverOs.__init__(self, sScratchPath, sBuildRoot, fnLog);
self.asDbgFiles = {};
def prepareEnv(self):
"""
Prepares the | |
import numpy as np
import cv2
import os
from tqdm import tqdm
import random
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.patches as patches
import pickle
from glob import glob
import imgaug as ia
from imgaug import augmenters as iaa
from shapely.geometry import Polygon
cardW=63
cardH=87
cornerXmin=1
cornerXmax=8.95
cornerYmin=3
cornerYmax=23
# We convert the measures from mm to pixels: multiply by an arbitrary factor 'zoom'
# You shouldn't need to change this
zoom=4
cardW*=zoom
cardH*=zoom
cornerXmin=int(cornerXmin*zoom)
cornerXmax=int(cornerXmax*zoom)
cornerYmin=int(cornerYmin*zoom)
cornerYmax=int(cornerYmax*zoom)
data_dir='../data/card_data'
cards_pck_fn=data_dir+"/cards.pkl"
backgrounds_pck_fn=data_dir+"/backgrounds.pkl"
imgW=416
imgH=416
refCard=np.array([[0,0],[cardW,0],[cardW,cardH],[0,cardH]],dtype=np.float32)
refCardRot=np.array([[cardW,0],[cardW,cardH],[0,cardH],[0,0]],dtype=np.float32)
refCornerHL=np.array([[cornerXmin,cornerYmin],[cornerXmax,cornerYmin],[cornerXmax,cornerYmax],[cornerXmin,cornerYmax]],dtype=np.float32)
refCornerLR=np.array([[cardW-cornerXmax,cardH-cornerYmax],[cardW-cornerXmin,cardH-cornerYmax],[cardW-cornerXmin,cardH-cornerYmin],[cardW-cornerXmax,cardH-cornerYmin]],dtype=np.float32)
refCorners=np.array([refCornerHL,refCornerLR])
class Cards():
def __init__(self,cards_pck_fn=cards_pck_fn):
self._cards=pickle.load(open(cards_pck_fn,'rb'))
# self._cards is a dictionary where keys are card names (ex:'Kc') and values are lists of (img,hullHL,hullLR)
self._nb_cards_by_value={k:len(self._cards[k]) for k in self._cards}
print("cards loaded per suit/rank:", self._nb_cards_by_value) # >>>
def get_random(self, card_name=None, display=False):
if card_name is None:
card_name= random.choice(list(self._cards.keys()))
card,hull1,hull2=self._cards[card_name][random.randint(0,self._nb_cards_by_value[card_name]-1)]
if display:
if display: display_img(card,[hull1,hull2],"rgb")
return card,card_name,hull1,hull2
class Backgrounds():
def __init__(self,backgrounds_pck_fn=backgrounds_pck_fn):
self._images=pickle.load(open(backgrounds_pck_fn,'rb'))
self._nb_images=len(self._images)
print("images loaded:", self._nb_images)
def get_random(self, display=False):
bg=self._images[random.randint(0,self._nb_images-1)]
if display: plt.imshow(bg)
return bg
def display_img(img,polygons=[],channels="bgr",size=9):
"""
Function to display an inline image, and draw optional polygons (bounding boxes, convex hulls) on it.
Use the param 'channels' to specify the order of the channels ("bgr" for an image coming from OpenCV world)
"""
if not isinstance(polygons,list):
polygons=[polygons]
if channels=="bgr": # bgr (cv2 image)
nb_channels=img.shape[2]
if nb_channels==4:
img=cv2.cvtColor(img,cv2.COLOR_BGRA2RGBA)
else:
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
fig,ax=plt.subplots(figsize=(size,size))
ax.set_facecolor((0,0,0))
ax.imshow(img)
for polygon in polygons:
# An polygon has either shape (n,2),
# either (n,1,2) if it is a cv2 contour (like convex hull).
# In the latter case, reshape in (n,2)
if len(polygon.shape)==3:
polygon=polygon.reshape(-1,2)
patch=patches.Polygon(polygon,linewidth=1,edgecolor='g',facecolor='none')
ax.add_patch(patch)
def give_me_filename(dirname, suffixes, prefix=""):
"""
Function that returns a filename or a list of filenames in directory 'dirname'
that does not exist yet. If 'suffixes' is a list, one filename per suffix in 'suffixes':
filename = dirname + "/" + prefix + random number + "." + suffix
Same random number for all the file name
Ex:
> give_me_filename("dir","jpg", prefix="prefix")
'dir/prefix408290659.jpg'
> give_me_filename("dir",["jpg","xml"])
['dir/877739594.jpg', 'dir/877739594.xml']
"""
if not isinstance(suffixes, list):
suffixes=[suffixes]
suffixes=[p if p[0]=='.' else '.'+p for p in suffixes]
while True:
bname="%09d"%random.randint(0,999999999)
fnames=[]
for suffix in suffixes:
fname=os.path.join(dirname,prefix+bname+suffix)
if not os.path.isfile(fname):
fnames.append(fname)
if len(fnames) == len(suffixes): break
if len(fnames)==1:
return fnames[0]
else:
return fnames
def varianceOfLaplacian(img):
"""
Compute the Laplacian of the image and then return the focus
measure, which is simply the variance of the Laplacian
Source: A.Rosebrock, https://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/
"""
return cv2.Laplacian(img, cv2.CV_64F).var()
def extract_card (img, alphamask, output_fn=None, min_focus=120, debug=False):
"""
"""
imgwarp=None
# Check the image is not too blurry
focus=varianceOfLaplacian(img)
if focus < min_focus:
if debug: print("Focus too low :", focus)
return False,None
# Convert in gray color
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Noise-reducing and edge-preserving filter
gray=cv2.bilateralFilter(gray,11,17,17)
# Edge extraction
edge=cv2.Canny(gray,30,200)
# Find the contours in the edged image
cnts, _ = cv2.findContours(edge.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# We suppose that the contour with largest area corresponds to the contour delimiting the card
cnt = sorted(cnts, key = cv2.contourArea, reverse = True)[0]
# We want to check that 'cnt' is the contour of a rectangular shape
# First, determine 'box', the minimum area bounding rectangle of 'cnt'
# Then compare area of 'cnt' and area of 'box'
# Both areas sould be very close
rect=cv2.minAreaRect(cnt)
box=cv2.boxPoints(rect)
box=np.int0(box)
areaCnt=cv2.contourArea(cnt)
areaBox=cv2.contourArea(box)
valid=areaCnt/areaBox>0.95
if valid:
# We want transform the zone inside the contour into the reference rectangle of dimensions (cardW,cardH)
((xr,yr),(wr,hr),thetar)=rect
# Determine 'Mp' the transformation that transforms 'box' into the reference rectangle
if wr>hr:
Mp=cv2.getPerspectiveTransform(np.float32(box),refCard)
else:
Mp=cv2.getPerspectiveTransform(np.float32(box),refCardRot)
# Determine the warped image by applying the transformation to the image
imgwarp=cv2.warpPerspective(img,Mp,(cardW,cardH))
# Add alpha layer
imgwarp=cv2.cvtColor(imgwarp,cv2.COLOR_BGR2BGRA)
# Shape of 'cnt' is (n,1,2), type=int with n = number of points
# We reshape into (1,n,2), type=float32, before feeding to perspectiveTransform
cnta=cnt.reshape(1,-1,2).astype(np.float32)
# Apply the transformation 'Mp' to the contour
cntwarp=cv2.perspectiveTransform(cnta,Mp)
cntwarp=cntwarp.astype(np.int)
# We build the alpha channel so that we have transparency on the
# external border of the card
# First, initialize alpha channel fully transparent
alphachannel=np.zeros(imgwarp.shape[:2],dtype=np.uint8)
# Then fill in the contour to make opaque this zone of the card
cv2.drawContours(alphachannel,cntwarp,0,255,-1)
# Apply the alphamask onto the alpha channel to clean it
alphachannel=cv2.bitwise_and(alphachannel,alphamask)
# Add the alphachannel to the warped image
imgwarp[:,:,3]=alphachannel
# Save the image to file
if output_fn is not None:
cv2.imwrite(output_fn,imgwarp)
if debug:
cv2.imshow("Gray",gray)
cv2.imshow("Canny",edge)
edge_bgr=cv2.cvtColor(edge,cv2.COLOR_GRAY2BGR)
cv2.drawContours(edge_bgr,[box],0,(0,0,255),3)
cv2.drawContours(edge_bgr,[cnt],0,(0,255,0),-1)
cv2.imshow("Contour with biggest area",edge_bgr)
if valid:
cv2.imshow("Alphachannel",alphachannel)
cv2.imshow("Extracted card",imgwarp)
return valid, imgwarp
def findHull(img, corner=refCornerHL, debug="no"):
"""
Find in the zone 'corner' of image 'img' and return, the convex hull delimiting
the value and suit symbols
'corner' (shape (4,2)) is an array of 4 points delimiting a rectangular zone,
takes one of the 2 possible values : refCornerHL or refCornerLR
debug=
"""
kernel = np.ones((3,3),np.uint8)
corner=corner.astype(np.int)
# We will focus on the zone of 'img' delimited by 'corner'
x1=int(corner[0][0])
y1=int(corner[0][1])
x2=int(corner[2][0])
y2=int(corner[2][1])
w=x2-x1
h=y2-y1
zone=img[y1:y2,x1:x2].copy()
strange_cnt=np.zeros_like(zone)
gray=cv2.cvtColor(zone,cv2.COLOR_BGR2GRAY)
thld=cv2.Canny(gray,30,200)
thld = cv2.dilate(thld,kernel,iterations=1)
if debug!="no": cv2.imshow("thld",thld)
# Find the contours
contours,_=cv2.findContours(thld.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
min_area=30 # We will reject contours with small area. TWEAK, 'zoom' dependant
min_solidity=.3 # Reject contours with a low solidity. TWEAK
concat_contour=None # We will aggregate in 'concat_contour' the contours that we want to keep
ok=True
for c in contours:
area=cv2.contourArea(c)
hull = cv2.convexHull(c)
hull_area = cv2.contourArea(hull)
solidity = float(area)/hull_area
# Determine the center of gravity (cx,cy) of the contour
M=cv2.moments(c)
cx=int(M['m10']/M['m00'])
cy=int(M['m01']/M['m00'])
# abs(w/2-cx)<w*0.3 and abs(h/2-cy)<h*0.4 : TWEAK, the idea here is to keep only the contours which are closed to the center of the zone
if area >= min_area and abs(w/2-cx)<w*0.3 and abs(h/2-cy)<h*0.4 and solidity>min_solidity:
if debug != "no" :
cv2.drawContours(zone,[c],0,(255,0,0),-1)
if concat_contour is None:
concat_contour=c
else:
concat_contour=np.concatenate((concat_contour,c))
if debug != "no" and solidity <= min_solidity :
print("Solidity",solidity)
cv2.drawContours(strange_cnt,[c],0,255,2)
cv2.imshow("Strange contours",strange_cnt)
if concat_contour is not None:
# At this point, we suppose that 'concat_contour' contains only the contours corresponding the value and suit symbols
# We can now determine the hull
hull=cv2.convexHull(concat_contour)
hull_area=cv2.contourArea(hull)
# If the area of the hull is to small or too big, there may be a problem
min_hull_area=950 # TWEAK, deck and 'zoom' dependant
max_hull_area=2000 # TWEAK, deck and 'zoom' dependant
if hull_area < min_hull_area or hull_area > max_hull_area:
ok=False
if debug!="no":
print("Hull area=",hull_area,"too large or too small")
# So far, the coordinates of the hull are relative to 'zone'
# We need the coordinates relative to the image -> 'hull_in_img'
hull_in_img=hull+corner[0]
else:
ok=False
if debug != "no" :
if concat_contour is not None:
cv2.drawContours(zone,[hull],0,(0,255,0),1)
cv2.drawContours(img,[hull_in_img],0,(0,255,0),1)
cv2.imshow("Zone",zone)
cv2.imshow("Image",img)
if ok and debug!="pause_always":
key=cv2.waitKey(1)
else:
key=cv2.waitKey(0)
if key==27:
return None
if ok == False:
return None
return hull_in_img
xml_body_1="""<annotation>
<folder>FOLDER</folder>
<filename>{FILENAME}</filename>
<path>{PATH}</path>
<source>
<database>Unknown</database>
</source>
<size>
<width>{WIDTH}</width>
<height>{HEIGHT}</height>
<depth>3</depth>
</size>
"""
xml_object=""" <object>
<name>{CLASS}</name>
<pose>Unspecified</pose>
<truncated>0</truncated>
<difficult>0</difficult>
<bndbox>
<xmin>{XMIN}</xmin>
<ymin>{YMIN}</ymin>
<xmax>{XMAX}</xmax>
<ymax>{YMAX}</ymax>
</bndbox>
</object>
"""
xml_body_2="""</annotation>
"""
def create_voc_xml(xml_file, img_file,listbba,display=False):
with open(xml_file,"w") as f:
f.write(xml_body_1.format(**{'FILENAME':os.path.basename(img_file), 'PATH':img_file,'WIDTH':imgW,'HEIGHT':imgH}))
for bba in listbba:
f.write(xml_object.format(**{'CLASS':bba.classname,'XMIN':bba.x1,'YMIN':bba.y1,'XMAX':bba.x2,'YMAX':bba.y2}))
f.write(xml_body_2)
if display: print("New xml",xml_file)
# Scenario with 2 cards:
# The original image of a card has the shape (cardH,cardW,4)
# We first paste it in a zero image of shape (imgH,imgW,4) at position decalX, decalY
# so that the original image is centerd in the zero image
decalX=int((imgW-cardW)/2)
decalY=int((imgH-cardH)/2)
# Scenario with 3 cards : decal values are different
decalX3=int(imgW/2)
decalY3=int(imgH/2-cardH)
def kps_to_polygon(kps):
"""
Convert imgaug keypoints to shapely polygon
"""
pts=[(kp.x,kp.y) for kp in kps]
return Polygon(pts)
def hull_to_kps(hull, decalX=decalX, decalY=decalY):
"""
Convert hull to imgaug keypoints
"""
# hull is a cv2.Contour, shape : Nx1x2
kps=[ia.Keypoint(x=p[0]+decalX,y=p[1]+decalY) for p in hull.reshape(-1,2)]
kps=ia.KeypointsOnImage(kps, shape=(imgH,imgW,3))
return kps
def kps_to_BB(kps):
"""
Determine imgaug bounding box from imgaug keypoints
"""
extend=3 # To make the bounding box a little bit bigger
kpsx=[kp.x for kp in kps.keypoints]
minx=max(0,int(min(kpsx)-extend))
maxx=min(imgW,int(max(kpsx)+extend))
kpsy=[kp.y for kp in kps.keypoints]
miny=max(0,int(min(kpsy)-extend))
maxy=min(imgH,int(max(kpsy)+extend))
if minx==maxx or miny==maxy:
return None
else:
return ia.BoundingBox(x1=minx,y1=miny,x2=maxx,y2=maxy)
# imgaug keypoints of the bounding box of a whole card
cardKP = ia.KeypointsOnImage([
ia.Keypoint(x=decalX,y=decalY),
ia.Keypoint(x=decalX+cardW,y=decalY),
ia.Keypoint(x=decalX+cardW,y=decalY+cardH),
ia.Keypoint(x=decalX,y=decalY+cardH)
], shape=(imgH,imgW,3))
# imgaug transformation for one card in scenario with 2 cards
transform_1card = iaa.Sequential([
iaa.Affine(scale=[0.65,1]),
iaa.Affine(rotate=(-180,180)),
iaa.Affine(translate_percent={"x":(-0.25,0.25),"y":(-0.25,0.25)}),
])
# For the 3 cards scenario, we use 3 imgaug transforms, the first 2 are for individual cards,
# and the third one for the group of 3 cards
trans_rot1 = iaa.Sequential([
iaa.Affine(translate_px={"x": (10, 20)}),
iaa.Affine(rotate=(22,30))
])
trans_rot2 = iaa.Sequential([
iaa.Affine(translate_px={"x": (0, 5)}),
iaa.Affine(rotate=(10,15))
])
transform_3cards = iaa.Sequential([
iaa.Affine(translate_px={"x":decalX-decalX,"y":decalY-decalY}),
iaa.Affine(scale=[0.65,1]),
iaa.Affine(rotate=(-180,180)),
iaa.Affine(translate_percent={"x":(-0.2,0.2),"y":(-0.2,0.2)})
])
# imgaug transformation for the background
scaleBg=iaa.Resize({"height": imgH, "width": imgW})
def augment(img, list_kps, | |
from graphics import *
import random
#
# CS 177 - milestone2.py
# <NAME> 0029855549
# Following the Coding Standards and Guidelines
# This program will create a trivia quiz. First there will be a control window
# asking the user to play or exit. If play, they will take a quiz with 5
# questions. The # correct will be converted into chips which can be used to
# drop a ball through pins, into various bins for points. The final score will
# be displayed and then the window will close with a click and the control
# will be displayed again. My custom version will include a hardmode version
# which will deduct a point during the take quiz if a question is wrong.
#Create the basic game window with the black and yellow banner,
#and return both the graphics window and exit rectangle
def board():
gamewin = GraphWin("{:^130}".format("Game Board"), 500, 700)
gamewin.setBackground('light grey')
banner = Image(Point(250,50),"Letsplay.gif")
banner.draw(gamewin)
white = Rectangle(Point(0,700),Point(500,600))
white.setFill("white")
white.draw(gamewin)
line1 = Line(Point(0,100),Point(500,100))
line1.setWidth(3)
line2 = Line(Point(0,600),Point(500,600))
line2.setWidth(3)
line1.draw(gamewin)
line2.draw(gamewin)
chipz = Text(Point(50,650),'Chips: 0')
chipz.draw(gamewin)
scorez = Text(Point(445,650),'Score: 0')
scorez.draw(gamewin)
return gamewin, chipz, scorez
#create separate function to see if click is within rectangle
def isclicked(clickpt, rect):
if not clickpt:
return False
mx,my = clickpt.getX(),clickpt.getY()
x1,y1 = rect.getP1().getX(),rect.getP1().getY()
x2,y2 = rect.getP2().getX(),rect.getP2().getY()
return (x1 < mx < x2) and (y1 < my < y2)
#create gray bins
def bins(graphicwindow):
for i in range(0,8):
line1 = Line(Point(5+70*i,100),Point(5+70*i,139))
line1.draw(graphicwindow)
for i in range(0,8):
line2 = Line(Point(5+70*i,560),Point(5+70*i,599))
line2.draw(graphicwindow)
num40 = Text(Point(40,575),"40")
numo1 = Text(Point(110,575),"0")
num60 = Text(Point(180,575),"60")
num100 = Text(Point(250,575),"100")
num20 = Text(Point(320,575),"20")
numo2 = Text(Point(390,575),"0")
num80 = Text(Point(460,575),"80")
num40.draw(graphicwindow)
numo1.draw(graphicwindow)
num60.draw(graphicwindow)
num100.draw(graphicwindow)
num20.draw(graphicwindow)
numo2.draw(graphicwindow)
num80.draw(graphicwindow)
#draw all the pins
def pins(window):
emptylist = []
#loop the circles and append list
for i in range(0,5):
for o in range (40,461,70):
c = Circle(Point(o,180+(80*i)),5)
c.setFill("black")
c.draw(window)
emptylist.append(c)
for i in range(0,5):
for o in range (75,426,70):
c = Circle(Point(o,220+(80*i)),5)
c.setFill("black")
c.draw(window)
emptylist.append(c)
return emptylist
#create the graphics of the trivia board
def makeQuiz():
#make all the basic rectangles, colors, and text objects and draw them
backg = GraphWin("Quiz Window", 400, 400)
backg.setBackground("grey")
triviat = Image(Point(200,40),'TriviaTime.gif')
triviat.draw(backg)
displayq = Text(Point(200,135),"")
displayq.draw(backg)
recta = Rectangle(Point(0,175),Point(400,220))
rectb = Rectangle(Point(0,220),Point(400,265))
rectc = Rectangle(Point(0,265),Point(400,310))
rectd = Rectangle(Point(0,310),Point(400,355))
recta.setFill("gold")
rectb.setFill("gold")
rectc.setFill("gold")
rectd.setFill("gold")
recta.draw(backg)
rectb.draw(backg)
rectc.draw(backg)
rectd.draw(backg)
choicea = Text(Point(200,197.5),"")
choiceb = Text(Point(200,242.5),"")
choicec = Text(Point(200,287.5),"")
choiced = Text(Point(200,332.5),"")
choicea.draw(backg)
choiceb.draw(backg)
choicec.draw(backg)
choiced.draw(backg)
blackrect = Rectangle(Point(0,360),Point(400,400))
blackrect.setFill('black')
blackrect.draw(backg)
qnum = Text(Point(50,380),'Question #1')
qnum.setTextColor('white')
qnum.draw(backg)
correct = Text(Point(350,380),'Correct: 0')
correct.setTextColor('white')
correct.draw(backg)
#return all the objects that the user will interact with
return backg,displayq,recta,rectb,rectc,rectd,choicea,choiceb,choicec,choiced,qnum,correct
#function that accepts data file
def pickQs(filename):
#open the file then close
openses = open(filename,'r', encoding="utf8")
#read into variable
allqs = openses.read()
openses.close()
#split the string based on rows, delete last line which is " "
listqs = allqs.split('\n')
del listqs[-1]
listqs2 = []
#initialize empty list, and select five random question/answers string using the
#import random sample method
listqs2 += random.sample(listqs,5)
listqs3 = []
#loop through each question/answer string and split based on commas
for each in listqs2:
listqs3.append(each.split(','))
#create a new list of lists of question/answer combos
newdic = {}
#initialize a dictionary and add the items of the list by index
for each in listqs3:
newdic[each[0]] = each[1],each[2],each[3],each[4],each[5]
return newdic
#define the area of a rectangle
def isclicked(clickpt, rect):
if not clickpt:
return False
mx,my = clickpt.getX(),clickpt.getY()
x1,y1 = rect.getP1().getX(),rect.getP1().getY()
x2,y2 = rect.getP2().getX(),rect.getP2().getY()
return (x1 < mx < x2) and (y1 < my < y2)
def takeQuiz():
#call the previous functions and assign variables to objects/dictionary
backg,displayq,recta,rectb,rectc,rectd,choicea,choiceb,choicec,choiced,qnum,correct = makeQuiz()
newdic = pickQs('questions.txt')
#initialize/create all the graphics objects such as correct, incorrect, all done
whiteback = Rectangle(Point(95,95),Point(305,205))
whiteback.setFill('white')
yay = Rectangle(Point(100,100),Point(300,200))
yay.setFill('gold')
boo = Rectangle(Point(100,100),Point(300,200))
boo.setFill('red')
hooray = Text(Point(200,135),"You are correct!")
hooray.setSize(14)
hooray.setStyle('bold')
sorry = Text(Point(200,135),"Sorry, that is incorrect")
sorry.setSize(13)
sorry.setStyle('bold')
continu = Text(Point(200,165),"Click to continue")
continu.setStyle('italic')
#initialize the score and also question number
score = 0
qupd = int(qnum.getText()[-1])
#loop through key in dictionary
for each in newdic:
#split key into list based on words
banana = each.split(' ')
#initialize blank string and counter
t = ""
n = 0
#while the counter is less than the length of the list (until it reaches end)
while n<len(banana):
#loop through each word in list
for item in banana:
#concentate into the blank string and add a space after, update counter
t += item + ' '
n += 1
#when the counter is a factor of five, add backspace
if n % 5 == 0:
t += '\n'
#in the for loop, continually update each question using the dictionary index
displayq.setText(t)
choicea.setText(newdic[each][0])
choiceb.setText(newdic[each][1])
choicec.setText(newdic[each][2])
choiced.setText(newdic[each][3])
#update question number
qnum.setText("Question #{}".format(qupd))
#initialize click, set x counter to 0
click = backg.checkMouse()
x = 0
#while the condition x has not been met
while x!=1:
#continually update click
click = backg.checkMouse()
#if the correct answer is A
if newdic[each][4] == 'A':
#if the correct rectangle was clicked, draw all the 'correct' graphics
#and set x to 1 to exit the while loop, update the score
if isclicked(click,recta):
whiteback.draw(backg)
yay.draw(backg)
hooray.draw(backg)
continu.draw(backg)
x = 1
score += 1
#otherwise display 'incorrect' graphics, set x to 1 to exit the while loop
elif (isclicked(click,rectb)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
elif (isclicked(click,rectc)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
elif (isclicked(click,rectd)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
#repeat for all letters
if newdic[each][4] == 'B':
if isclicked(click,rectb):
whiteback.draw(backg)
yay.draw(backg)
hooray.draw(backg)
continu.draw(backg)
x = 1
score += 1
elif (isclicked(click,recta)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
elif (isclicked(click,rectc)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
elif (isclicked(click,rectd)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
if newdic[each][4] == 'C':
if isclicked(click,rectc):
whiteback.draw(backg)
yay.draw(backg)
hooray.draw(backg)
continu.draw(backg)
x = 1
score += 1
elif (isclicked(click,rectb)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
elif (isclicked(click,recta)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
elif (isclicked(click,rectd)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
if newdic[each][4] == 'D':
if isclicked(click,rectd):
whiteback.draw(backg)
yay.draw(backg)
hooray.draw(backg)
continu.draw(backg)
x = 1
score += 1
elif (isclicked(click,rectb)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
elif (isclicked(click,recta)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
elif (isclicked(click,rectc)):
whiteback.draw(backg)
boo.draw(backg)
sorry.draw(backg)
continu.draw(backg)
x = 1
#update the correct: with the score
correct.setText("Correct: {}".format(score))
#again check mouse, while loop to detect a click in the popup box
click = backg.checkMouse()
while not(isclicked(click,yay) or isclicked(click,boo)):
click = backg.checkMouse()
#undraw everything and move on to next question
boo.undraw()
yay.undraw()
whiteback.undraw()
hooray.undraw()
sorry.undraw()
continu.undraw()
qupd +=1
#create/draw all the all done graphics
whiteback.draw(backg)
alldonee = Rectangle(Point(100,100),Point(300,200))
alldonee.setFill('gold')
alldonee.draw(backg)
final = Text(Point(200,135),"Your final score is: {}".format(score))
final.setSize(13)
final.setStyle('bold')
final.draw(backg)
continu.draw(backg)
qnum.setText('All done!')
#new click, while the box is not clicked keep checking, then close
click2 = backg.checkMouse()
while not isclicked(click2,alldonee):
click2 = backg.checkMouse()
backg.close()
#return score variable
return score
#control function that opens up game control display
def control():
#Create all the graphics objects (boxes, text, etc.)
gamecon = GraphWin('Game Control',500,200)
gamecon.setBackground('light grey')
title1 = Image(Point(250,50),'TrinkoTitle.gif')
title1.draw(gamecon)
exi2 = Rectangle(Point(270,115),Point(380,145))
exi2.setFill('red')
exi2.draw(gamecon)
play2 = Rectangle(Point(120,115),Point(230,145))
play2.setFill('green')
play2.draw(gamecon)
exi2t = Text(Point(325,130),'EXIT')
exi2t.setTextColor('white')
exi2t.draw(gamecon)
play2t = Text(Point(175,130),'PLAY')
play2t.setTextColor('white')
play2t.draw(gamecon)
#The hard mode box is for my custom feature
hardbox = Rectangle(Point(190,160),Point(310,190))
hardbox.draw(gamecon)
hardbox.setFill('gold')
hardtext = Text(Point(250,175),'Hard Mode')
hardtext.draw(gamecon)
#return the exit box, play box, hard mode box, and graphic window
return play2,exi2,gamecon,hardbox
#the bounce controls the reaction of the ball to the pin. Accepts ball pin and
#dx/dy direction
def bounce(ball,apin,dx,dy):
#Get the center of the ball and the pin
center=ball.getCenter()
center2 = apin.getCenter()
#Assign the x and y values of each center
cx,cy = center.getX(),center.getY()
c2x,c2y = center2.getX(),center2.getY()
#If the euclidean distance between the pin and ball center is less than radius
if ((cx-c2x)**2+(cy-c2y)**2)**.5 <= (ball.getRadius()+apin.getRadius()):
#if the ball is on the left side of the pin, move it left by increment
if cx < c2x:
dx += -2.5
#if it's on the right side of the pin, move it right
else:
dx += 2.5
#move the y value up
dy *= -.7
#set the pin to red
apin.setFill('red')
#otherwise, when the condition is not met
else:
#set the pin to black
apin.setFill('black')
#if the ball is too close to the sides
if cx < 1 or cx > 699:
#change the x direction to the opposite
dx *= -1
#return dx,dy
return dx,dy
#drop function to control the | |
# -*- coding: utf-8 -*-
# Copyright 2014 <NAME> - c01db33f (at) gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""reil.x86.operands - x86 and x86_64 translators
This module generates REIL (reverse engineering intermediate language)
IL from x86 and x86_64 machine code.
This file contains helpers for reading and writing instruction
operands.
"""
import capstone
from reil.error import *
from reil.shorthand import *
from reil.utilities import *
from reil.x86.utilities import *
def _reg_id_from_name(name):
register_lookup = {
'al':capstone.x86.X86_REG_AL,
'ah':capstone.x86.X86_REG_AH,
'bl':capstone.x86.X86_REG_BL,
'bh':capstone.x86.X86_REG_BH,
'cl':capstone.x86.X86_REG_CL,
'ch':capstone.x86.X86_REG_CH,
'dl':capstone.x86.X86_REG_DL,
'dh':capstone.x86.X86_REG_DH,
'sil':capstone.x86.X86_REG_SIL,
'dil':capstone.x86.X86_REG_DIL,
'bpl':capstone.x86.X86_REG_BPL,
'spl':capstone.x86.X86_REG_SPL,
'r8b':capstone.x86.X86_REG_R8B,
'r9b':capstone.x86.X86_REG_R9B,
'r10b':capstone.x86.X86_REG_R10B,
'r11b':capstone.x86.X86_REG_R11B,
'r12b':capstone.x86.X86_REG_R12B,
'r13b':capstone.x86.X86_REG_R13B,
'r14b':capstone.x86.X86_REG_R14B,
'r15b':capstone.x86.X86_REG_R15B,
'ax':capstone.x86.X86_REG_AX,
'bx':capstone.x86.X86_REG_BX,
'cx':capstone.x86.X86_REG_CX,
'dx':capstone.x86.X86_REG_DX,
'si':capstone.x86.X86_REG_SI,
'di':capstone.x86.X86_REG_DI,
'bp':capstone.x86.X86_REG_BP,
'sp':capstone.x86.X86_REG_SP,
'r8w':capstone.x86.X86_REG_R8W,
'r9w':capstone.x86.X86_REG_R9W,
'r10w':capstone.x86.X86_REG_R10W,
'r11w':capstone.x86.X86_REG_R11W,
'r12w':capstone.x86.X86_REG_R12W,
'r13w':capstone.x86.X86_REG_R13W,
'r14w':capstone.x86.X86_REG_R14W,
'r15w':capstone.x86.X86_REG_R15W,
'eax':capstone.x86.X86_REG_EAX,
'ebx':capstone.x86.X86_REG_EBX,
'ecx':capstone.x86.X86_REG_ECX,
'edx':capstone.x86.X86_REG_EDX,
'esi':capstone.x86.X86_REG_ESI,
'edi':capstone.x86.X86_REG_EDI,
'ebp':capstone.x86.X86_REG_EBP,
'esp':capstone.x86.X86_REG_ESP,
'r8d':capstone.x86.X86_REG_R8,
'r9d':capstone.x86.X86_REG_R9,
'r10d':capstone.x86.X86_REG_R10D,
'r11d':capstone.x86.X86_REG_R11D,
'r12d':capstone.x86.X86_REG_R12D,
'r13d':capstone.x86.X86_REG_R13D,
'r14d':capstone.x86.X86_REG_R14D,
'r15d':capstone.x86.X86_REG_R15D,
'rax':capstone.x86.X86_REG_AX,
'rbx':capstone.x86.X86_REG_BX,
'rcx':capstone.x86.X86_REG_CX,
'rdx':capstone.x86.X86_REG_DX,
'rsi':capstone.x86.X86_REG_SI,
'rdi':capstone.x86.X86_REG_DI,
'rbp':capstone.x86.X86_REG_BP,
'rsp':capstone.x86.X86_REG_SP,
'r8':capstone.x86.X86_REG_R8,
'r9':capstone.x86.X86_REG_R9,
'r10':capstone.x86.X86_REG_R10,
'r11':capstone.x86.X86_REG_R11,
'r12':capstone.x86.X86_REG_R12,
'r13':capstone.x86.X86_REG_R13,
'r14':capstone.x86.X86_REG_R14,
'r15':capstone.x86.X86_REG_R15,
'rip':capstone.x86.X86_REG_RIP
}
if name not in register_lookup:
raise TranslationError('Invalid Register {}'.format(name))
return register_lookup[name]
def _memory_address(ctx, i, opnd):
address = None
if opnd.mem.disp != 0 and opnd.mem.base == 0:
address = imm(opnd.mem.disp & mask(ctx.word_size), ctx.word_size)
elif opnd.mem.disp == 0 and opnd.mem.base != 0:
address = _get_register(ctx, i, opnd.mem.base)
elif opnd.mem.disp != 0 and opnd.mem.base != 0:
base = _get_register(ctx, i, opnd.mem.base)
tmp0 = ctx.tmp(ctx.word_size * 2)
address = ctx.tmp(ctx.word_size)
ctx.emit( add_ (base,
imm(opnd.mem.disp & mask(ctx.word_size), ctx.word_size),
tmp0))
ctx.emit( and_ (tmp0,
imm(mask(ctx.word_size), ctx.word_size * 2),
address))
else:
address = imm(0, ctx.word_size)
if opnd.mem.segment != 0:
tmp0 = ctx.tmp(ctx.word_size * 2)
prev_address = address
address = ctx.tmp(ctx.word_size)
ctx.emit( add_ (prev_address,
ctx.registers[opnd.mem.segment],
tmp0))
ctx.emit( and_ (tmp0,
imm(mask(ctx.word_size), ctx.word_size * 2),
address))
if opnd.mem.index != 0:
index = _get_register(ctx, i, opnd.mem.index)
tmp0 = ctx.tmp(ctx.word_size * 2)
tmp1 = ctx.tmp(ctx.word_size)
tmp2 = ctx.tmp(ctx.word_size * 2)
prev_address = address
address = ctx.tmp(ctx.word_size)
ctx.emit( mul_ (index,
imm(opnd.mem.scale, ctx.word_size),
tmp0))
ctx.emit( and_ (tmp0,
imm(mask(ctx.word_size), ctx.word_size * 2),
tmp1))
ctx.emit( add_ (tmp1, prev_address, tmp2))
ctx.emit( and_ (tmp2,
imm(mask(ctx.word_size), ctx.word_size * 2),
address))
return address
def _get_memory_size(ctx, i, opnd):
if 'byte' in i.op_str:
return 8
elif 'dword' in i.op_str:
return 32
elif 'qword' in i.op_str:
return 64
elif 'xmmword' in i.op_str:
return 128
elif 'word' in i.op_str:
return 16
else:
return ctx.word_size
def _get_register(ctx, i, reg):
# we need to handle rip first to shortcut native register handling.
if reg == capstone.x86.X86_REG_RIP and not ctx.use_rip:
qword_reg = ctx.tmp(64)
ctx.emit( str_ (imm(i.address + i.size, 64), qword_reg))
return qword_reg
# full native registers
if reg in ctx.registers:
return ctx.registers[reg]
# 8-bit low parts
low_bytes = {
capstone.x86.X86_REG_AL:ctx.accumulator,
capstone.x86.X86_REG_BL:ctx.base,
capstone.x86.X86_REG_CL:ctx.counter,
capstone.x86.X86_REG_DL:ctx.data,
capstone.x86.X86_REG_SIL:ctx.source,
capstone.x86.X86_REG_DIL:ctx.destination,
capstone.x86.X86_REG_BPL:ctx.frame_ptr,
capstone.x86.X86_REG_SPL:ctx.stack_ptr,
capstone.x86.X86_REG_R8B:r('r8', 64),
capstone.x86.X86_REG_R9B:r('r9', 64),
capstone.x86.X86_REG_R10B:r('r10', 64),
capstone.x86.X86_REG_R11B:r('r11', 64),
capstone.x86.X86_REG_R12B:r('r12', 64),
capstone.x86.X86_REG_R13B:r('r13', 64),
capstone.x86.X86_REG_R14B:r('r14', 64),
capstone.x86.X86_REG_R15B:r('r15', 64),
}
if reg in low_bytes:
byte_reg = ctx.tmp(8)
ctx.emit( str_ (low_bytes[reg], byte_reg))
return byte_reg
# 8-bit high parts
high_bytes = {
capstone.x86.X86_REG_AH:ctx.accumulator,
capstone.x86.X86_REG_BH:ctx.base,
capstone.x86.X86_REG_CH:ctx.counter,
capstone.x86.X86_REG_DH:ctx.data
}
if reg in high_bytes:
full_reg = high_bytes[reg]
word_reg = ctx.tmp(16)
byte_reg = ctx.tmp(8)
ctx.emit( str_ (full_reg, word_reg))
ctx.emit( lshr_ (word_reg, imm(8, 8), byte_reg))
return byte_reg
# 16-byte low parts
low_words = {
capstone.x86.X86_REG_AX:ctx.accumulator,
capstone.x86.X86_REG_BX:ctx.base,
capstone.x86.X86_REG_CX:ctx.counter,
capstone.x86.X86_REG_DX:ctx.data,
capstone.x86.X86_REG_SI:ctx.source,
capstone.x86.X86_REG_DI:ctx.destination,
capstone.x86.X86_REG_BP:ctx.frame_ptr,
capstone.x86.X86_REG_SP:ctx.stack_ptr,
capstone.x86.X86_REG_R8W:r('r8', 64),
capstone.x86.X86_REG_R9W:r('r9', 64),
capstone.x86.X86_REG_R10W:r('r10', 64),
capstone.x86.X86_REG_R11W:r('r11', 64),
capstone.x86.X86_REG_R12W:r('r12', 64),
capstone.x86.X86_REG_R13W:r('r13', 64),
capstone.x86.X86_REG_R14W:r('r14', 64),
capstone.x86.X86_REG_R15W:r('r15', 64),
}
if reg in low_words:
word_reg = ctx.tmp(16)
ctx.emit( str_ (low_words[reg], word_reg))
return word_reg
# 32-byte low parts
low_dwords = {
capstone.x86.X86_REG_EAX:ctx.accumulator,
capstone.x86.X86_REG_EBX:ctx.base,
capstone.x86.X86_REG_ECX:ctx.counter,
capstone.x86.X86_REG_EDX:ctx.data,
capstone.x86.X86_REG_ESI:ctx.source,
capstone.x86.X86_REG_EDI:ctx.destination,
capstone.x86.X86_REG_EBP:ctx.frame_ptr,
capstone.x86.X86_REG_ESP:ctx.stack_ptr,
capstone.x86.X86_REG_R8D:r('r8', 64),
capstone.x86.X86_REG_R9D:r('r9', 64),
capstone.x86.X86_REG_R10D:r('r10', 64),
capstone.x86.X86_REG_R11D:r('r11', 64),
capstone.x86.X86_REG_R12D:r('r12', 64),
capstone.x86.X86_REG_R13D:r('r13', 64),
capstone.x86.X86_REG_R14D:r('r14', 64),
capstone.x86.X86_REG_R15D:r('r15', 64),
}
if reg in low_dwords:
dword_reg = ctx.tmp(32)
ctx.emit( str_ (low_dwords[reg], dword_reg))
return dword_reg
raise TranslationError('Unsupported register!')
def _get_register_size(ctx, i, reg_id):
# full native registers
if reg_id in ctx.registers:
return ctx.registers[reg_id].size
# 8-bit low parts
low_bytes = [
capstone.x86.X86_REG_AL,
capstone.x86.X86_REG_BL,
capstone.x86.X86_REG_CL,
capstone.x86.X86_REG_DL,
capstone.x86.X86_REG_SIL,
capstone.x86.X86_REG_DIL,
capstone.x86.X86_REG_BPL,
capstone.x86.X86_REG_SPL,
capstone.x86.X86_REG_R8B,
capstone.x86.X86_REG_R9B,
capstone.x86.X86_REG_R10B,
capstone.x86.X86_REG_R11B,
capstone.x86.X86_REG_R12B,
capstone.x86.X86_REG_R13B,
capstone.x86.X86_REG_R14B,
capstone.x86.X86_REG_R15B,
]
if reg_id in low_bytes:
return 8
# 8-bit high parts
high_bytes = [
capstone.x86.X86_REG_AH,
capstone.x86.X86_REG_BH,
capstone.x86.X86_REG_CH,
capstone.x86.X86_REG_DH
]
if reg_id in high_bytes:
return 8
# 16-byte low parts
low_words = {
capstone.x86.X86_REG_AX,
capstone.x86.X86_REG_BX,
capstone.x86.X86_REG_CX,
capstone.x86.X86_REG_DX,
capstone.x86.X86_REG_SI,
capstone.x86.X86_REG_DI,
capstone.x86.X86_REG_BP,
capstone.x86.X86_REG_SP,
capstone.x86.X86_REG_R8W,
capstone.x86.X86_REG_R9W,
capstone.x86.X86_REG_R10W,
capstone.x86.X86_REG_R11W,
capstone.x86.X86_REG_R12W,
capstone.x86.X86_REG_R13W,
capstone.x86.X86_REG_R14W,
capstone.x86.X86_REG_R15W,
}
if reg_id in low_words:
return 16
# 32-byte low parts
low_dwords = {
capstone.x86.X86_REG_EAX,
capstone.x86.X86_REG_EBX,
capstone.x86.X86_REG_ECX,
capstone.x86.X86_REG_EDX,
capstone.x86.X86_REG_ESI,
capstone.x86.X86_REG_EDI,
capstone.x86.X86_REG_EBP,
capstone.x86.X86_REG_ESP,
capstone.x86.X86_REG_R8D,
capstone.x86.X86_REG_R9D,
capstone.x86.X86_REG_R10D,
capstone.x86.X86_REG_R11D,
capstone.x86.X86_REG_R12D,
capstone.x86.X86_REG_R13D,
capstone.x86.X86_REG_R14D,
capstone.x86.X86_REG_R15D,
}
if reg_id in low_dwords:
return 32
if reg_id is capstone.x86.X86_REG_RIP:
return 64
raise TranslationError('Unsupported register!')
def _get_immediate(ctx, i, opnd, size=0):
if size == 0:
# TODO: This does not work. How to do this better?
# maybe all immediates should be the minimum possible size to
# represent them?
bs = opnd.imm.bit_length()
if bs == 0:
size = ctx.word_size
else:
for i in [8, 16, 32, 64, 128]:
if bs < i:
size = i
break
return imm(opnd.imm, size)
def _get_memory(ctx, i, opnd):
address = _memory_address(ctx, i, opnd)
value = ctx.tmp(_get_memory_size(ctx, i, opnd))
ctx.emit( ldm_ (address, value))
return value
def get_address(ctx, i, index):
opnd = i.operands[index]
address = _memory_address(ctx, i, opnd)
return address
def get_register(ctx, i, name):
reg_id = _reg_id_from_name(name)
return _get_register(ctx, i, reg_id)
def get(ctx, i, index, size=0):
opnd = i.operands[index]
if opnd.type == capstone.x86.X86_OP_REG:
return _get_register(ctx, i, opnd.reg)
elif opnd.type == capstone.x86.X86_OP_IMM:
return _get_immediate(ctx, i, opnd, size)
elif opnd.type == capstone.x86.X86_OP_MEM:
return _get_memory(ctx, i, opnd)
else:
raise TranslationError(
'Unsupported operand type!')
def get_size(ctx, i, index, size=0):
opnd = i.operands[index]
if opnd.type == capstone.x86.X86_OP_REG:
return _get_register_size(ctx, i, opnd.reg)
elif opnd.type == capstone.x86.X86_OP_IMM:
return _get_immediate(ctx, i, opnd, size).size
elif opnd.type == capstone.x86.X86_OP_MEM:
return _get_memory_size(ctx, i, opnd)
else:
raise TranslationError(
'Unsupported operand type!')
def is_register(ctx, i, index):
return i.operands[index].type == capstone.x86.X86_OP_REG
def is_immediate(ctx, i, index):
return i.operands[index].type == capstone.x86.X86_OP_IMM
def is_memory(ctx, i, index):
return i.operands[index].type == capstone.x86.X86_OP_MEM
def _set_register(ctx, i, reg_id, value, clear=False, sign_extend=False):
low_bytes = {
capstone.x86.X86_REG_AL:ctx.accumulator,
capstone.x86.X86_REG_BL:ctx.base,
capstone.x86.X86_REG_CL:ctx.counter,
capstone.x86.X86_REG_DL:ctx.data,
capstone.x86.X86_REG_SIL:ctx.source,
capstone.x86.X86_REG_DIL:ctx.destination,
capstone.x86.X86_REG_BPL:ctx.frame_ptr,
capstone.x86.X86_REG_SPL:ctx.stack_ptr,
capstone.x86.X86_REG_R8B:r('r8', 64),
capstone.x86.X86_REG_R9B:r('r9', 64),
capstone.x86.X86_REG_R10B:r('r10', 64),
capstone.x86.X86_REG_R11B:r('r11', 64),
capstone.x86.X86_REG_R12B:r('r12', 64),
capstone.x86.X86_REG_R13B:r('r13', 64),
capstone.x86.X86_REG_R14B:r('r14', 64),
capstone.x86.X86_REG_R15B:r('r15', 64),
}
high_bytes = {
capstone.x86.X86_REG_AH:ctx.accumulator,
capstone.x86.X86_REG_BH:ctx.base,
capstone.x86.X86_REG_CH:ctx.counter,
capstone.x86.X86_REG_DH:ctx.data
}
low_words = {
capstone.x86.X86_REG_AX:ctx.accumulator,
capstone.x86.X86_REG_BX:ctx.base,
capstone.x86.X86_REG_CX:ctx.counter,
capstone.x86.X86_REG_DX:ctx.data,
capstone.x86.X86_REG_SI:ctx.source,
capstone.x86.X86_REG_DI:ctx.destination,
capstone.x86.X86_REG_BP:ctx.frame_ptr,
capstone.x86.X86_REG_SP:ctx.stack_ptr,
capstone.x86.X86_REG_R8W:r('r8', 64),
capstone.x86.X86_REG_R9W:r('r9', 64),
capstone.x86.X86_REG_R10W:r('r10', 64),
capstone.x86.X86_REG_R11W:r('r11', 64),
capstone.x86.X86_REG_R12W:r('r12', 64),
capstone.x86.X86_REG_R13W:r('r13', 64),
capstone.x86.X86_REG_R14W:r('r14', 64),
capstone.x86.X86_REG_R15W:r('r15', 64),
}
low_dwords = {
capstone.x86.X86_REG_EAX:ctx.accumulator,
capstone.x86.X86_REG_EBX:ctx.base,
capstone.x86.X86_REG_ECX:ctx.counter,
capstone.x86.X86_REG_EDX:ctx.data,
capstone.x86.X86_REG_ESI:ctx.source,
capstone.x86.X86_REG_EDI:ctx.destination,
capstone.x86.X86_REG_EBP:ctx.frame_ptr,
capstone.x86.X86_REG_ESP:ctx.stack_ptr,
capstone.x86.X86_REG_R8D:r('r8', 64),
capstone.x86.X86_REG_R9D:r('r9', 64),
capstone.x86.X86_REG_R10D:r('r10', 64),
capstone.x86.X86_REG_R11D:r('r11', 64),
capstone.x86.X86_REG_R12D:r('r12', 64),
capstone.x86.X86_REG_R13D:r('r13', 64),
capstone.x86.X86_REG_R14D:r('r14', 64),
capstone.x86.X86_REG_R15D:r('r15', 64),
}
sse_regs = {
capstone.x86.X86_REG_XMM0:r('xmm0', 128),
capstone.x86.X86_REG_XMM1:r('xmm1', 128),
capstone.x86.X86_REG_XMM2:r('xmm2', 128),
capstone.x86.X86_REG_XMM3:r('xmm3', 128),
capstone.x86.X86_REG_XMM4:r('xmm4', 128),
capstone.x86.X86_REG_XMM5:r('xmm5', 128),
capstone.x86.X86_REG_XMM6:r('xmm6', 128),
capstone.x86.X86_REG_XMM7:r('xmm7', 128),
capstone.x86.X86_REG_XMM8:r('xmm8', 128),
capstone.x86.X86_REG_XMM9:r('xmm9', 128),
capstone.x86.X86_REG_XMM10:r('xmm10', 128),
capstone.x86.X86_REG_XMM11:r('xmm11', 128),
capstone.x86.X86_REG_XMM12:r('xmm12', 128),
capstone.x86.X86_REG_XMM13:r('xmm13', 128),
capstone.x86.X86_REG_XMM14:r('xmm14', 128),
capstone.x86.X86_REG_XMM15:r('xmm15', 128),
}
def truncate_value(value, size):
if value.size > size:
prev_value = value
value = ctx.tmp(size)
ctx.emit( str_ (prev_value, value))
return value
# full native registers
if reg_id in ctx.registers:
reg = ctx.registers[reg_id]
set_mask = imm(mask(reg.size), reg.size)
# 8-bit low parts
elif reg_id in low_bytes:
reg = low_bytes[reg_id]
set_mask = imm(~mask(8), reg.size)
value = truncate_value(value, 8)
# 8-bit high parts
elif reg_id in high_bytes:
reg = high_bytes[reg_id]
value = truncate_value(value, 8)
prev_value = value
value = ctx.tmp(reg.size)
tmp0 = ctx.tmp(reg.size)
tmp1 = ctx.tmp(reg.size)
ctx.emit( and_ (reg, imm(mask(reg.size) ^ 0xff00, reg.size), tmp0))
ctx.emit( str_ (prev_value, tmp1))
ctx.emit( lshl_ (tmp1, imm(8, 8), tmp1))
ctx.emit( or_ (tmp0, tmp1, value))
# 16-bit low parts
elif reg_id in low_words:
reg = low_words[reg_id]
set_mask = imm(~mask(16), reg.size)
value = truncate_value(value, 16)
# 32-bit low parts
elif reg_id in low_dwords:
# NB: this code is only reached in x86_64 mode.
# CF: Intel Manual... 32-bit operands generate a 32-bit result,
# zero-extended to a 64-bit result in the destination register.
reg = low_dwords[reg_id]
set_mask = imm(mask(64), reg.size)
value = truncate_value(value, 32)
clear = True
else:
raise TranslationError('Unsupported register!')
if reg_id in sse_regs:
# NB: We | |
8, 'efConstruction': 100, 'post' : 0},
query_time_params = {'efSearch': 100},
verbose = False,
):
super().__init__(
#jaccard init params
nmslib_method='hnsw',
nmslib_space='l2',
nmslib_data_type=nmslib.DataType.DENSE_VECTOR,
nmslib_dtype = nmslib.DistType.FLOAT,
nmslib_space_params = {},
#other params
X_prep_function = None,
n_neighbors = n_neighbors,
index_time_params = index_time_params,
query_time_params = query_time_params,
verbose = verbose,
)
return
class FastKLDivNN(NMSLibSklearnWrapper):
def __init__(
self,
n_neighbors = 30,
index_time_params = {'indexThreadQty': 4, 'efConstruction': 100},
query_time_params = {'efSearch': 100},
verbose = False,
):
super().__init__(
#kldib init params
nmslib_method='sw-graph',
nmslib_space='kldivgenfast',
nmslib_data_type=nmslib.DataType.DENSE_VECTOR,
nmslib_dtype = nmslib.DistType.FLOAT,
nmslib_space_params = {},
#other params
X_prep_function = None,#_preprocess_sparse_to_idx_str,
n_neighbors = n_neighbors,
index_time_params = index_time_params,
query_time_params = query_time_params,
verbose = verbose,
)
return
# Cell
from pathlib import Path
import time
import numpy as np
from scipy import sparse
import nmslib
from sklearn.base import BaseEstimator, TransformerMixin
# Cell
def sparsify(*arrs):
'''
makes input arrs sparse
'''
arrs = list(arrs)
for i in range(len(arrs)):
if not sparse.issparse(arrs[i]):
arrs[i] = sparse.csr_matrix(arrs[i])
return arrs
def _robust_stack(blocks, stack_method = 'stack', **kwargs):
if any(sparse.issparse(i) for i in blocks):
stacked = getattr(sparse, stack_method)(blocks, **kwargs)
else:
stacked = getattr(np, stack_method)(blocks, **kwargs)
return stacked
def hstack(blocks, **kwargs):
return _robust_stack(blocks, stack_method = 'hstack', **kwargs)
def vstack(blocks, **kwargs):
return _robust_stack(blocks, stack_method = 'vstack', **kwargs)
def stack(blocks, **kwargs):
return _robust_stack(blocks, stack_method = 'stack', **kwargs)
# Cell
class NMSLibSklearnWrapper(BaseEstimator):
'''
Generic wrapper for nmslib nearest neighbors under sklearn NN API.
for distance types avalible, refer to https://github.com/nmslib/nmslib/blob/master/manual/spaces.md
'''
def __init__(
self,
#init index params
nmslib_method='hnsw',
nmslib_space='jaccard_sparse',
nmslib_data_type=nmslib.DataType.OBJECT_AS_STRING,
nmslib_dtype = nmslib.DistType.FLOAT,
nmslib_space_params = {},
#index creation params
index_time_params = {'M': 30, 'indexThreadQty': 4, 'efConstruction': 100, 'post' : 0},
query_time_params = {'efSearch': 100},
#
n_neighbors = 30,
verbose = False,
#x_prep_function
X_prep_function = None
):
self.nmslib_method = nmslib_method
self.nmslib_space=nmslib_space
self.nmslib_data_type=nmslib_data_type
self.nmslib_space_params = nmslib_space_params
self.nmslib_dtype = nmslib_dtype
#index creation params
self.index_time_params = index_time_params
self.query_time_params = query_time_params
#
self.n_neighbors = n_neighbors
self.verbose = verbose
#x_prep_function
self.X_prep_function = X_prep_function
pass
def _preprocess_X(self, X):
'''
encodes sparse rows into str of id of nonzero columns
'''
if not self.X_prep_function is None:
X = self.X_prep_function(X)
return X
def _instantiate_index(self,):
'''
method for instantiating index.
usefull for pickling
'''
index = nmslib.init(
method = self.nmslib_method,
space = self.nmslib_space,
data_type = self.nmslib_data_type,
space_params = self.nmslib_space_params,
dtype = self.nmslib_dtype,
)
return index
def fit(self, X, y = None, **kwargs):
'''
instantiates and creates index
'''
#instantiate index
index = self._instantiate_index()
# preprocess X
X_prep = self._preprocess_X(X)
#add points to index
index.addDataPointBatch(X_prep)
# Create an index
index.createIndex(self.index_time_params, self.verbose)
#handle None for y (data to save under indexes)
if y is None:
y = np.zeros((X.shape[0], 0)) #empty array
# save states
self.index_ = index
self.y_ = y
self.X_ = X
self.n_samples_fit_ = self.X_.shape[0]
return self
def partial_fit(self, X, y = None, **kwargs):
'''
adds new datapoints to index and y.
estimator needs to be fit prior to calling partial fit,
so first call fit in the first batch of data, then call partial fit
passing the subsequent batches
'''
#assume index is already instantiated
# preprocess X
X_prep = self._preprocess_X(X)
#add points to index
self.index_.addDataPointBatch(X_prep)
# Create an index
self.index_.createIndex(self.index_time_params, self.verbose)
#handle None for y (data to save under indexes)
if y is None:
y = np.ones((X.shape[0], 0)) #empty array
# save states
self.y_ = vstack([self.y_, y])
self.X_ = vstack([self.X_, X])
self.n_samples_fit_ = self.X_.shape[0]
return self
def kneighbors(self, X = None, n_neighbors = None, return_distance = True, query_time_params = None, n_jobs = 4):
'''
query neighbors, if X is None, will return the neighbors of each point in index
'''
if query_time_params is None:
query_time_params = self.query_time_params
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is None:
X = self.X_
#preprocess X
X = self._preprocess_X(X)
self.index_.setQueryTimeParams(query_time_params)
# Querying
start = time.time()
nbrs = self.index_.knnQueryBatch(X, k = n_neighbors, num_threads = n_jobs)
end = time.time()
if self.verbose:
try:
query_qty = len(X)
except:
query_qty = X.shape[0]
print('kNN time total=%f (sec), per query=%f (sec), per query adjusted for thread number=%f (sec)' %
(end-start, float(end-start)/query_qty, n_jobs*float(end-start)/query_qty))
if return_distance:
distances = [nb[1] for nb in nbrs]
nbrs = [nb[0] for nb in nbrs]
return distances, nbrs
else:
nbrs = [nb[0] for nb in nbrs]
return nbrs
def kneighbors_graph(self, X=None, n_neighbors=None, mode="connectivity"):
"""Compute the (weighted) graph of k-Neighbors for points in X.
Parameters
----------
X : array-like of shape (n_queries, n_features), \
or (n_queries, n_indexed) if metric == 'precomputed', \
default=None
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
For ``metric='precomputed'`` the shape should be
(n_queries, n_indexed). Otherwise the shape should be
(n_queries, n_features).
n_neighbors : int, default=None
Number of neighbors for each sample. The default is the value
passed to the constructor.
mode : {'connectivity', 'distance','similarity'}, default='connectivity'
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are distances between points, type of distance
depends on the selected subclass. Similarity will return 1 - distance
Returns
-------
A : sparse-matrix of shape (n_queries, n_samples_fit)
`n_samples_fit` is the number of samples in the fitted data.
`A[i, j]` gives the weight of the edge connecting `i` to `j`.
The matrix is of CSR format.
See Also
--------
NearestNeighbors.radius_neighbors_graph : Compute the (weighted) graph
of Neighbors for points in X.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from nmslearn.neighbors import FastL2NN
>>> neigh = FastL2NN(n_neighbors=2)
>>> neigh.fit(X)
FastL2NN(n_neighbors=2)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[1., 0., 1.],
[0., 1., 1.],
[1., 0., 1.]])
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# check the input only in self.kneighbors
# construct CSR matrix representation of the k-NN graph
if mode == "connectivity":
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
n_queries = A_ind.shape[0]
A_data = np.ones(n_queries * n_neighbors)
elif mode == "distance":
A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
elif mode == "similarity":
A_data, A_ind = self.kneighbors(X, n_neighbors, return_distance=True)
A_data = 1 - np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", "similarity" '
'or "distance" but got "%s" instead' % mode
)
n_queries = len(A_ind)
n_samples_fit = self.n_samples_fit_
n_nonzero = n_queries * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
kneighbors_graph = sparse.csr_matrix(
(A_data, np.ravel(A_ind), A_indptr), shape=(n_queries, n_samples_fit)
)
return kneighbors_graph
def __getstate__(self,):
'''
creates binary file for index and then saves into object attribute to be pickled alongside
other attributes.
'''
#read tempfiles with binaries to save binary str inside object
tempfile_name = fr'.~nmslib_index_{str(int(time.time()*1e7))}'
self.index_.saveIndex(tempfile_name, save_data = True)
with open(tempfile_name, 'rb') as f:
fb = f.read()
with open(tempfile_name+'.dat', 'rb') as f:
fb_dat = f.read()
#save binary as attribute (index and data)
self.index_ = (fb,fb_dat)
#delete tempfiles
Path(tempfile_name).unlink()
Path(tempfile_name+'.dat').unlink()
return self.__dict__
def __setstate__(self,d):
'''
sets state during unpickling.
instantiates index and loads binary index
'''
self.__dict__ = d
#write tempfiles with binaries to load from index.loadIndex
tempfile_name = fr'.~nmslib_index_{str(int(time.time()*1e7))}'
with open(tempfile_name, 'wb') as f:
f.write(self.index_[0])
with open(tempfile_name+'.dat', 'wb') as f:
f.write(self.index_[1])
index = self._instantiate_index()
index.loadIndex(tempfile_name, load_data = True)
#sets self.index_
self.index_ = index
#delete tempfile
Path(tempfile_name).unlink()
Path(tempfile_name+'.dat').unlink()
return
# Cell
def _preprocess_sparse_to_idx_str(X):
'''
encodes sparse rows into str of id of nonzero columns
'''
#ensure is sparse
X = sparse.csr_matrix(X)
indptr = X.indptr
cols = X.tocoo().col.astype(str)
id_strs = [*(' '.join(cols[slice(*indptr[i:i+2])]) for i in range(len(indptr)-1))]
return id_strs
class FastCosineNN(NMSLibSklearnWrapper):
def __init__(
self,
n_neighbors = 30,
index_time_params = {'M': 30, 'indexThreadQty': 4, 'efConstruction': 100, 'post' : 0},
query_time_params = {'efSearch': 100},
verbose = False,
):
super().__init__(
#jaccard init params
nmslib_method='hnsw',
nmslib_space= 'cosinesimil_sparse_fast',
nmslib_data_type=nmslib.DataType.OBJECT_AS_STRING,
nmslib_dtype = nmslib.DistType.FLOAT,
nmslib_space_params = {},
#other params
X_prep_function = _preprocess_sparse_to_idx_str,
n_neighbors = n_neighbors,
index_time_params = index_time_params,
query_time_params = query_time_params,
verbose = verbose,
)
return
class FastJaccardNN(NMSLibSklearnWrapper):
def __init__(
self,
n_neighbors = 30,
index_time_params = {'M': 30, 'indexThreadQty': 4, 'efConstruction': 100, 'post' : 0},
query_time_params = {'efSearch': 100},
verbose = False,
):
super().__init__(
#jaccard init params
nmslib_method='hnsw',
nmslib_space= 'jaccard_sparse',
nmslib_data_type=nmslib.DataType.OBJECT_AS_STRING,
nmslib_dtype = nmslib.DistType.FLOAT,
nmslib_space_params = {},
#other params
X_prep_function = _preprocess_sparse_to_idx_str,
n_neighbors = n_neighbors,
index_time_params = index_time_params,
query_time_params = query_time_params,
verbose = verbose,
)
return
def kneighbors(self, X = None, n_neighbors = None, return_distance | |
= [stacked_heads[base_id][i] for i in range(len(stacked_heads[base_id]))]
new_stacked_heads[cc].pop()
if self.grandPar:
new_grand_parents[cc] = [grand_parents[base_id][i] for i in range(len(grand_parents[base_id]))]
new_grand_parents[cc].pop()
if self.sibling:
new_siblings[cc] = [siblings[base_id][i] for i in range(len(siblings[base_id]))]
if self.skipConnect:
new_skip_connects[cc] = [skip_connects[base_id][i] for i in range(len(skip_connects[base_id]))]
new_children[cc] = children[base_id]
new_children[cc, t] = child_id
hypothesis_scores[cc] = new_hyp_score
ids.append(id)
cc += 1
elif valid_hyp(base_id, child_id, head):
int_head = int(head)
new_parent_hn_dict[cc] = copy.copy(parent_hn_dict[base_id])
if int_head not in new_parent_hn_dict[cc]:
new_parent_hn_dict[cc][int_head] = []
for i in range(len(hx_parent)):
new_parent_hn_dict[cc][int_head].append(hx_parent[i][:,base_id,:].clone())
new_sib_hn_dict[cc] = copy.copy(sib_hn_dict[base_id])
new_sib_hn_dict[cc][int_head] = []
for i in range(len(hx)):
new_sib_hn_dict[cc][int_head].append(hx[i][:,base_id,:].clone())
new_constraints[cc] = constraints[base_id]
new_constraints[cc, child_id] = True
new_child_orders[cc] = child_orders[base_id]
new_child_orders[cc, head] = child_id
new_stacked_heads[cc] = [stacked_heads[base_id][i] for i in range(len(stacked_heads[base_id]))]
new_stacked_heads[cc].append(child_id)
if self.grandPar:
new_grand_parents[cc] = [grand_parents[base_id][i] for i in range(len(grand_parents[base_id]))]
new_grand_parents[cc].append(head)
if self.sibling:
new_siblings[cc] = [siblings[base_id][i] for i in range(len(siblings[base_id]))]
new_siblings[cc].append(child_id)
new_siblings[cc].append(0)
if self.skipConnect:
new_skip_connects[cc] = [skip_connects[base_id][i] for i in range(len(skip_connects[base_id]))]
# hack to handle LSTM
if isinstance(hx, tuple):
new_skip_connects[cc].append(hx[0][:, base_id, :].unsqueeze(1))
else:
new_skip_connects[cc].append(hx[:, base_id, :].unsqueeze(1))
new_skip_connects[cc].append(h0)
new_children[cc] = children[base_id]
new_children[cc, t] = child_id
hypothesis_scores[cc] = new_hyp_score
ids.append(id)
cc += 1
if cc == beam:
break
# [num_hyp]
num_hyp = len(ids)
if num_hyp == 0:
return None
else:
index = torch.from_numpy(np.array(ids)).type_as(base_index)
base_index = base_index[index]
child_index = child_index[index]
# predict types for new hypotheses
# compute output for type [num_hyp, num_labels]
out_type = self.bilinear(type_h[base_index], type_c[child_index])
hyp_type_scores = F.log_softmax(out_type, dim=1)
# compute the prediction of types [num_hyp]
hyp_type_scores, hyp_types = hyp_type_scores.max(dim=1)
hypothesis_scores[:num_hyp] = hypothesis_scores[:num_hyp] + hyp_type_scores
sib_hn_dict = new_sib_hn_dict
parent_hn_dict = new_parent_hn_dict
for i in range(num_hyp):
base_id = base_index[i]
new_stacked_types[i] = stacked_types[base_id]
new_stacked_types[i, t] = hyp_types[i]
stacked_heads = [[new_stacked_heads[i][j] for j in range(len(new_stacked_heads[i]))] for i in range(num_hyp)]
if self.grandPar:
grand_parents = [[new_grand_parents[i][j] for j in range(len(new_grand_parents[i]))] for i in range(num_hyp)]
if self.sibling:
siblings = [[new_siblings[i][j] for j in range(len(new_siblings[i]))] for i in range(num_hyp)]
if self.skipConnect:
skip_connects = [[new_skip_connects[i][j] for j in range(len(new_skip_connects[i]))] for i in range(num_hyp)]
constraints = new_constraints
child_orders = new_child_orders
children.copy_(new_children)
stacked_types.copy_(new_stacked_types)
# hx [decoder_layers, num_hyp, hidden_size]
# hack to handle LSTM
if isinstance(hx, tuple):
hx, cx = hx
hx = hx[:, base_index, :]
cx = cx[:, base_index, :]
hx = (hx, cx)
else:
hx = hx[:, base_index, :]
children = children.cpu().numpy()[0]
stacked_types = stacked_types.cpu().numpy()[0]
heads = np.zeros(length, dtype=np.int32)
types = np.zeros(length, dtype=np.int32)
stack = [0]
for i in range(num_step):
head = stack[-1]
child = children[i]
type = stacked_types[i]
if child != head:
heads[child] = head
types[child] = type
stack.append(child)
else:
stacked_types[i] = 0
stack.pop()
return heads, types, length, children, stacked_types
class HPtrNetPSGate(StackPtrNet):
""" receive hidden state from sibling and parent. Using gate. """
def __init__(self, *args, **kwargs):
super(HPtrNetPSGate, self).__init__(*args, **kwargs)
self.parent_hn_dense = nn.Linear(self.hidden_size, self.hidden_size)
self.sib_hn_dense = nn.Linear(self.hidden_size, self.hidden_size)
self.pre_hn_dense = nn.Linear(self.hidden_size, self.hidden_size)
self.parent_hn_dense_gate = nn.Linear(self.hidden_size, self.hidden_size)
self.sib_hn_dense_gate = nn.Linear(self.hidden_size, self.hidden_size)
self.pre_hn_dense_gate = nn.Linear(self.hidden_size, self.hidden_size)
self.bias_gate = nn.Parameter(torch.Tensor(self.hidden_size))
self.dropout_hn = nn.Dropout2d(p=0.33)
def _get_decoder_output(self, output_enc, heads, heads_stack, siblings, hx, mask_d=None, length_d=None):
batch, _, _ = output_enc.size()
# create batch index [batch]
batch_index = torch.arange(0, batch).type_as(output_enc).long()
# get vector for heads [batch, length_decoder, input_dim],
src_encoding = output_enc[batch_index, heads_stack.t()].transpose(0, 1)
if self.sibling:
# [batch, length_decoder, hidden_size * 2]
mask_sibs = siblings.ne(0).float().unsqueeze(2)
output_enc_sibling = output_enc[batch_index, siblings.t()].transpose(0, 1) * mask_sibs
src_encoding = src_encoding + output_enc_sibling
if self.grandPar:
# [length_decoder, batch]
gpars = heads[batch_index, heads_stack.t()]
# [batch, length_decoder, hidden_size * 2]
output_enc_gpar = output_enc[batch_index, gpars].transpose(0, 1)
src_encoding = src_encoding + output_enc_gpar
# transform to decoder input
# [batch, length_decoder, dec_dim]
src_encoding = F.elu(self.src_dense(src_encoding))
_, length_decoder, _ = src_encoding.shape
# output from rnn [batch, length, hidden_size]
output = torch.zeros(batch, length_decoder, self.hidden_size).type_as(src_encoding)
hn = hx
parent_hn_dict = {}
parent_hn_dict[0] = {}
for b in range(batch):
parent_hn_dict[0][b] = []
for i in range(len(hn)):
parent_hn_dict[0][b].append(hn[i][:,b].clone())
sib_hn_dict = {}
for step in range(length_decoder):
head = heads_stack[:, step]
hn_pre = [hn[0].clone(), hn[1].clone()]
hn_parent = [torch.zeros(hn[0].shape).type_as(hn[0]), torch.zeros(hn[1].shape).type_as(hn[1])]
hn_sib = [torch.zeros(hn[0].shape).type_as(hn[0]), torch.zeros(hn[1].shape).type_as(hn[1])]
for b in range(batch):
curr_head = int(head[b])
if (curr_head in parent_hn_dict) and (b in parent_hn_dict[curr_head]):
for i in range(len(hn)):
hn_parent[i][:,b] = parent_hn_dict[curr_head][b][i].clone()
if (curr_head in sib_hn_dict) and (b in sib_hn_dict[curr_head]):
for i in range(len(hn)):
hn_sib[i][:,b] = sib_hn_dict[curr_head][b][i].clone()
hn = list(hn)
for i in range(len(hn)):
tmp_hn_pre = self.pre_hn_dense(hn_pre[i])
tmp_hn_parent = self.parent_hn_dense(hn_parent[i].clone())
tmp_hn_sib = self.sib_hn_dense(hn_sib[i].clone())
tmp_hn_pre_gate = self.pre_hn_dense_gate(hn_pre[i])
tmp_hn_parent_gate = self.parent_hn_dense_gate(hn_parent[i].clone())
tmp_hn_sib_gate = self.sib_hn_dense_gate(hn_sib[i].clone())
gate = F.sigmoid(tmp_hn_parent_gate + tmp_hn_sib_gate + self.bias_gate)
hn[i] = self.dropout_hn(F.tanh(tmp_hn_parent * gate + tmp_hn_sib * gate))
hn = tuple(hn)
for b in range(batch):
curr_head = int(head[b])
if curr_head not in parent_hn_dict:
parent_hn_dict[curr_head] = {}
if b not in parent_hn_dict[curr_head]:
parent_hn_dict[curr_head][b] = []
for i in range(len(hn_parent)):
parent_hn_dict[curr_head][b].append(hn_parent[i][:,b].clone())
step_output, hn = self.decoder(src_encoding[:, step, :].unsqueeze(1),
mask_d[:, step].unsqueeze(1),
hx=hn)
for b in range(batch):
curr_head = int(head[b])
if curr_head not in sib_hn_dict:
sib_hn_dict[curr_head] = {}
sib_hn_dict[curr_head][b] = []
for i in range(len(hn)):
sib_hn_dict[curr_head][b].append(hn[i][:,b].clone())
for b in range(batch):
output[b, step, :] = step_output[b]
# apply dropout
# [batch, length, hidden_size] --> [batch, hidden_size, length] --> [batch, length, hidden_size]
output = self.dropout_out(output.transpose(1, 2)).transpose(1, 2)
return output, hn, mask_d, length_d
def _decode_per_sentence(self, output_enc, arc_c, type_c, hx, length, beam, ordered, leading_symbolic):
def valid_hyp(base_id, child_id, head):
if constraints[base_id, child_id]:
return False
elif not ordered or self.prior_order == PriorOrder.DEPTH or child_orders[base_id, head] == 0:
return True
elif self.prior_order == PriorOrder.LEFT2RIGTH:
return child_id > child_orders[base_id, head]
else:
if child_id < head:
return child_id < child_orders[base_id, head] < head
else:
return child_id > child_orders[base_id, head]
# output_enc [length, hidden_size * 2]
# arc_c [length, arc_space]
# type_c [length, type_space]
# hx [decoder_layers, hidden_size]
if length is not None:
output_enc = output_enc[:length]
arc_c = arc_c[:length]
type_c = type_c[:length]
else:
length = output_enc.size(0)
# [decoder_layers, 1, hidden_size]
# hack to handle LSTM
if isinstance(hx, tuple):
hx, cx = hx
hx = hx.unsqueeze(1)
cx = cx.unsqueeze(1)
h0 = hx
hx = (hx, cx)
else:
hx = hx.unsqueeze(1)
h0 = hx
stacked_heads = [[0] for _ in range(beam)]
grand_parents = [[0] for _ in range(beam)] if self.grandPar else None
siblings = [[0] for _ in range(beam)] if self.sibling else None
skip_connects = [[h0] for _ in range(beam)] if self.skipConnect else None
children = torch.zeros(beam, int(2 * length - 1)).type_as(output_enc).long()
stacked_types = children.new_zeros(children.size())
hypothesis_scores = output_enc.new_zeros(beam)
constraints = np.zeros([beam, length], dtype=np.bool)
constraints[:, 0] = True
child_orders = np.zeros([beam, length], dtype=np.int64)
# temporal tensors for each step.
new_stacked_heads = [[] for _ in range(beam)]
new_grand_parents = [[] for _ in range(beam)] if self.grandPar else None
new_siblings = [[] for _ in range(beam)] if self.sibling else None
new_skip_connects = [[] for _ in range(beam)] if self.skipConnect else None
new_children = children.new_zeros(children.size())
new_stacked_types = stacked_types.new_zeros(stacked_types.size())
num_hyp = 1
num_step = 2 * length - 1
parent_hn_dict = [{} for i in range(beam)]
sib_hn_dict = [{} for i in range(beam)]
# init parent_hn_dict for first step
for n in range(num_hyp):
parent_hn_dict[n][0] = []
for i in range(len(hx)):
parent_hn_dict[n][0].append(hx[i][:,n,:].clone())
for t in range(num_step):
# [num_hyp]
heads = torch.LongTensor([stacked_heads[i][-1] for i in range(num_hyp)]).type_as(children)
gpars = torch.LongTensor([grand_parents[i][-1] for i in range(num_hyp)]).type_as(children) if self.grandPar else None
sibs = torch.LongTensor([siblings[i].pop() for i in range(num_hyp)]).type_as(children) if self.sibling else None
# [decoder_layers, num_hyp, hidden_size]
hs = torch.cat([skip_connects[i].pop() for i in range(num_hyp)], dim=1) if self.skipConnect else None
# [num_hyp, hidden_size * 2]
src_encoding = output_enc[heads]
if self.sibling:
mask_sibs = sibs.ne(0).float().unsqueeze(1)
output_enc_sibling = output_enc[sibs] * mask_sibs
src_encoding = src_encoding + output_enc_sibling
if self.grandPar:
output_enc_gpar = output_enc[gpars]
src_encoding = src_encoding + output_enc_gpar
# transform to decoder input
# [num_hyp, dec_dim]
src_encoding = F.elu(self.src_dense(src_encoding))
hx_pre = (hx[0].clone(), hx[1].clone())
hx_parent = [torch.zeros(hx[0].shape).type_as(hx[0]), torch.zeros(hx[1].shape).type_as(hx[1])]
hx_sib = [torch.zeros(hx[0].shape).type_as(hx[0]), torch.zeros(hx[1].shape).type_as(hx[1])]
# update parent hidden states
for n in range(num_hyp):
head = int(heads[n])
if head in parent_hn_dict[n]:
for i in range(len(hx)):
hx_parent[i][:,n,:] = parent_hn_dict[n][head][i].clone()
if head in sib_hn_dict[n]:
for i in range(len(hx)):
hx_sib[i][:,n,:] = sib_hn_dict[n][head][i].clone()
hx = list(hx)
# update hidden states
for i in range(len(hx)):
tmp_hn_pre = self.pre_hn_dense(hx_pre[i])
tmp_hn_parent = self.parent_hn_dense(hx_parent[i].clone())
tmp_hn_sib= self.sib_hn_dense(hx_sib[i].clone())
tmp_hn_pre_gate = self.pre_hn_dense_gate(hx_pre[i])
tmp_hn_parent_gate = self.parent_hn_dense_gate(hx_parent[i].clone())
tmp_hn_sib_gate = self.sib_hn_dense_gate(hx_sib[i].clone())
gate = F.sigmoid(tmp_hn_parent_gate + tmp_hn_sib_gate + self.bias_gate)
hx[i] = self.dropout_hn(F.tanh(tmp_hn_parent * gate + tmp_hn_sib * gate))
hx = tuple(hx)
# output [num_hyp, hidden_size]
# hx [decoder_layer, num_hyp, hidden_size]
output_dec, hx = self.decoder.step(src_encoding, hx=hx, hs=hs) if self.skipConnect else self.decoder.step(src_encoding, hx=hx)
# | |
ctx.notify_all(f'[Server]: GO')
async def missing(ctx: Context, client: Client, locations: list):
await ctx.send_msgs(client, [['Missing', {
'locations': json.dumps(locations)
}]])
def get_players_string(ctx: Context):
auth_clients = {(c.team, c.slot) for c in ctx.endpoints if c.auth}
player_names = sorted(ctx.player_names.keys())
current_team = -1
text = ''
for team, slot in player_names:
player_name = ctx.player_names[team, slot]
if team != current_team:
text += f':: Team #{team + 1}: '
current_team = team
if (team, slot) in auth_clients:
text += f'{player_name} '
else:
text += f'({player_name}) '
return f'{len(auth_clients)} players of {len(ctx.player_names)} connected ' + text[:-1]
def get_received_items(ctx: Context, team: int, player: int) -> typing.List[ReceivedItem]:
return ctx.received_items.setdefault((team, player), [])
def tuplize_received_items(items):
return [(item.item, item.location, item.player) for item in items]
def send_new_items(ctx: Context):
for client in ctx.endpoints:
if not client.auth:
continue
items = get_received_items(ctx, client.team, client.slot)
if len(items) > client.send_index:
asyncio.create_task(ctx.send_msgs(client, [
['ReceivedItems', (client.send_index, tuplize_received_items(items)[client.send_index:])]]))
client.send_index = len(items)
def forfeit_player(ctx: Context, team: int, slot: int):
all_locations = {values[0] for values in Regions.location_table.values() if type(values[0]) is int}
ctx.notify_all("%s (Team #%d) has forfeited" % (ctx.player_names[(team, slot)], team + 1))
register_location_checks(ctx, team, slot, all_locations)
def get_remaining(ctx: Context, team: int, slot: int) -> typing.List[int]:
items = []
for (location, location_slot) in ctx.locations:
if location_slot == slot and location not in ctx.location_checks[team, slot]:
items.append(ctx.locations[location, slot][0]) # item ID
return sorted(items)
def register_location_checks(ctx: Context, team: int, slot: int, locations):
found_items = False
new_locations = set(locations) - ctx.location_checks[team, slot]
if new_locations:
ctx.client_activity_timers[team, slot] = datetime.datetime.now(datetime.timezone.utc)
for location in new_locations:
if (location, slot) in ctx.locations:
target_item, target_player = ctx.locations[(location, slot)]
if target_player != slot or slot in ctx.remote_items:
found = False
recvd_items = get_received_items(ctx, team, target_player)
for recvd_item in recvd_items:
if recvd_item.location == location and recvd_item.player == slot:
found = True
break
if not found:
new_item = ReceivedItem(target_item, location, slot)
recvd_items.append(new_item)
if slot != target_player:
ctx.broadcast_team(team, [['ItemSent', (slot, location, target_player, target_item)]])
logging.info('(Team #%d) %s sent %s to %s (%s)' % (
team + 1, ctx.player_names[(team, slot)], get_item_name_from_id(target_item),
ctx.player_names[(team, target_player)], get_location_name_from_address(location)))
found_items = True
elif target_player == slot: # local pickup, notify clients of the pickup
if location not in ctx.location_checks[team, slot]:
for client in ctx.endpoints:
if client.team == team and client.wants_item_notification:
asyncio.create_task(
ctx.send_msgs(client, [['ItemFound', (target_item, location, slot)]]))
ctx.location_checks[team, slot] |= new_locations
send_new_items(ctx)
if found_items:
for client in ctx.endpoints:
if client.team == team and client.slot == slot:
asyncio.create_task(ctx.send_msgs(client, [["HintPointUpdate", (get_client_points(ctx, client),)]]))
ctx.save()
def notify_team(ctx: Context, team: int, text: str):
logging.info("Notice (Team #%d): %s" % (team + 1, text))
ctx.broadcast_team(team, [['Print', text]])
def collect_hints(ctx: Context, team: int, slot: int, item: str) -> typing.List[Utils.Hint]:
hints = []
seeked_item_id = Items.item_table[item][3]
for check, result in ctx.locations.items():
item_id, receiving_player = result
if receiving_player == slot and item_id == seeked_item_id:
location_id, finding_player = check
found = location_id in ctx.location_checks[team, finding_player]
entrance = ctx.er_hint_data.get(finding_player, {}).get(location_id, "")
hints.append(Utils.Hint(receiving_player, finding_player, location_id, item_id, found, entrance))
return hints
def collect_hints_location(ctx: Context, team: int, slot: int, location: str) -> typing.List[Utils.Hint]:
hints = []
seeked_location = Regions.location_table[location][0]
for check, result in ctx.locations.items():
location_id, finding_player = check
if finding_player == slot and location_id == seeked_location:
item_id, receiving_player = result
found = location_id in ctx.location_checks[team, finding_player]
entrance = ctx.er_hint_data.get(finding_player, {}).get(location_id, "")
hints.append(Utils.Hint(receiving_player, finding_player, location_id, item_id, found, entrance))
break # each location has 1 item
return hints
def format_hint(ctx: Context, team: int, hint: Utils.Hint) -> str:
text = f"[Hint]: {ctx.player_names[team, hint.receiving_player]}'s " \
f"{Items.lookup_id_to_name[hint.item]} is " \
f"at {get_location_name_from_address(hint.location)} " \
f"in {ctx.player_names[team, hint.finding_player]}'s World"
if hint.entrance:
text += f" at {hint.entrance}"
return text + (". (found)" if hint.found else ".")
def get_intended_text(input_text: str, possible_answers: typing.Iterable[str]= console_names) -> typing.Tuple[str, bool, str]:
picks = fuzzy_process.extract(input_text, possible_answers, limit=2)
if len(picks) > 1:
dif = picks[0][1] - picks[1][1]
if picks[0][1] == 100:
return picks[0][0], True, "Perfect Match"
elif picks[0][1] < 75:
return picks[0][0], False, f"Didn't find something that closely matches, " \
f"did you mean {picks[0][0]}? ({picks[0][1]}% sure)"
elif dif > 5:
return picks[0][0], True, "Close Match"
else:
return picks[0][0], False, f"Too many close matches, did you mean {picks[0][0]}? ({picks[0][1]}% sure)"
else:
if picks[0][1] > 90:
return picks[0][0], True, "Only Option Match"
else:
return picks[0][0], False, f"Did you mean {picks[0][0]}? ({picks[0][1]}% sure)"
class CommandMeta(type):
def __new__(cls, name, bases, attrs):
commands = attrs["commands"] = {}
for base in bases:
commands.update(base.commands)
commands.update({name[5:].lower(): method for name, method in attrs.items() if
name.startswith("_cmd_")})
return super(CommandMeta, cls).__new__(cls, name, bases, attrs)
def mark_raw(function):
function.raw_text = True
return function
class CommandProcessor(metaclass=CommandMeta):
commands: typing.Dict[str, typing.Callable]
marker = "/"
def output(self, text: str):
print(text)
def __call__(self, raw: str) -> typing.Optional[bool]:
if not raw:
return
try:
command = raw.split()
basecommand = command[0]
if basecommand[0] == self.marker:
method = self.commands.get(basecommand[1:].lower(), None)
if not method:
self._error_unknown_command(basecommand[1:])
else:
if getattr(method, "raw_text", False): # method is requesting unprocessed text data
arg = raw.split(maxsplit=1)
if len(arg) > 1:
return method(self, arg[1]) # argument text was found, so pass it along
else:
return method(self) # argument may be optional, try running without args
else:
return method(self, *command[1:]) # pass each word as argument
else:
self.default(raw)
except Exception as e:
self._error_parsing_command(e)
def get_help_text(self) -> str:
s = ""
for command, method in self.commands.items():
spec = inspect.signature(method).parameters
argtext = ""
for argname, parameter in spec.items():
if argname == "self":
continue
if isinstance(parameter.default, str):
if not parameter.default:
argname = f"[{argname}]"
else:
argname += "=" + parameter.default
argtext += argname
argtext += " "
s += f"{self.marker}{command} {argtext}\n {method.__doc__}\n"
return s
def _cmd_help(self):
"""Returns the help listing"""
self.output(self.get_help_text())
def _cmd_license(self):
"""Returns the licensing information"""
license = getattr(CommandProcessor, "license", None)
if not license:
with open(Utils.local_path("LICENSE")) as f:
CommandProcessor.license = license = f.read()
self.output(CommandProcessor.license)
def default(self, raw: str):
self.output("Echo: " + raw)
def _error_unknown_command(self, raw: str):
self.output(f"Could not find command {raw}. Known commands: {', '.join(self.commands)}")
def _error_parsing_command(self, exception: Exception):
self.output(str(exception))
class ClientMessageProcessor(CommandProcessor):
marker = "!"
ctx: Context
def __init__(self, ctx: Context, client: Client):
self.ctx = ctx
self.client = client
def output(self, text):
self.ctx.notify_client(self.client, text)
def default(self, raw: str):
pass # default is client sending just text
def _cmd_players(self) -> bool:
"""Get information about connected and missing players"""
if len(self.ctx.player_names) < 10:
self.ctx.notify_all(get_players_string(self.ctx))
else:
self.output(get_players_string(self.ctx))
return True
def _cmd_forfeit(self) -> bool:
"""Surrender and send your remaining items out to their recipients"""
if "enabled" in self.ctx.forfeit_mode:
forfeit_player(self.ctx, self.client.team, self.client.slot)
return True
elif "disabled" in self.ctx.forfeit_mode:
self.output(
"Sorry, client forfeiting has been disabled on this server. You can ask the server admin for a /forfeit")
return False
else: # is auto or goal
if self.ctx.client_game_state[self.client.team, self.client.slot] == CLIENT_GOAL:
forfeit_player(self.ctx, self.client.team, self.client.slot)
return True
else:
self.output(
"Sorry, client forfeiting requires you to have beaten the game on this server."
" You can ask the server admin for a /forfeit")
if self.client.version < [2, 1, 0]:
self.output(
"Your client is too old to send game beaten information. Please update, load you savegame and reconnect.")
return False
def _cmd_remaining(self) -> bool:
"""List remaining items in your game, but not their location or recipient"""
if self.ctx.remaining_mode == "enabled":
remaining_item_ids = get_remaining(self.ctx, self.client.team, self.client.slot)
if remaining_item_ids:
self.output("Remaining items: " + ", ".join(Items.lookup_id_to_name.get(item_id, "unknown item")
for item_id in remaining_item_ids))
else:
self.output("No remaining items found.")
return True
elif self.ctx.remaining_mode == "disabled":
self.output(
"Sorry, !remaining has been disabled on this server.")
return False
else: # is goal
if self.ctx.client_game_state[self.client.team, self.client.slot] == CLIENT_GOAL:
remaining_item_ids = get_remaining(self.ctx, self.client.team, self.client.slot)
if remaining_item_ids:
self.output("Remaining items: " + ", ".join(Items.lookup_id_to_name.get(item_id, "unknown item")
for item_id in remaining_item_ids))
else:
self.output("No remaining items found.")
return True
else:
self.output(
"Sorry, !remaining requires you to have beaten the game on this server")
if self.client.version < [2, 1, 0]:
self.output(
"Your client is too old to send game beaten information. Please update, load you savegame and reconnect.")
return False
def _cmd_countdown(self, seconds: str = "10") -> bool:
"""Start a countdown in seconds"""
try:
timer = int(seconds, 10)
except ValueError:
timer = 10
asyncio.create_task(countdown(self.ctx, timer))
return True
def _cmd_missing(self) -> bool:
"""List all missing location checks from the server's perspective"""
locations = []
for location_id, location_name in Regions.lookup_id_to_name.items(): # cheat console is -1, keep in mind
if location_id != -1 and location_id not in self.ctx.location_checks[self.client.team, self.client.slot]:
locations.append(location_name)
if len(locations) > 0:
if self.client.version < [2, 3, 0]:
buffer = ""
for | |
"""!
@brief Neural Network: Self-Organized Feature Map
@details Implementation based on paper @cite article::nnet::som::1, @cite article::nnet::som::2.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
import math
import random
import matplotlib.pyplot as plt
import pyclustering.core.som_wrapper as wrapper
from pyclustering.core.wrapper import ccore_library
from pyclustering.utils import euclidean_distance_square
from pyclustering.utils.dimension import dimension_info
from enum import IntEnum
class type_conn(IntEnum):
"""!
@brief Enumeration of connection types for SOM.
@see som
"""
## Grid type of connections when each oscillator has connections with left, upper, right, lower neighbors.
grid_four = 0
## Grid type of connections when each oscillator has connections with left, upper-left, upper, upper-right, right, right-lower, lower, lower-left neighbors.
grid_eight = 1
## Grid type of connections when each oscillator has connections with left, upper-left, upper-right, right, right-lower, lower-left neighbors.
honeycomb = 2
## Grid type of connections when existance of each connection is defined by the SOM rule on each step of simulation.
func_neighbor = 3
class type_init(IntEnum):
"""!
@brief Enumeration of initialization types for SOM.
@see som
"""
## Weights are randomly distributed using Gaussian distribution (0, 1).
random = 0
## Weights are randomly distributed using Gaussian distribution (input data centroid, 1).
random_centroid = 1
## Weights are randomly distrbiuted using Gaussian distribution (input data centroid, surface of input data).
random_surface = 2
## Weights are distributed as a uniform grid that covers whole surface of the input data.
uniform_grid = 3
class som_parameters:
"""!
@brief Represents SOM parameters.
"""
def __init__(self):
"""!
@brief Creates SOM parameters.
"""
## Defines an initialization way for neuron weights (random, random in center of the input data, random distributed in data, ditributed in line with uniform grid).
self.init_type = type_init.uniform_grid
## Initial radius. If the initial radius is not specified (equals to `None`) then it will be calculated by SOM.
self.init_radius = None
## Rate of learning.
self.init_learn_rate = 0.1
## Condition that defines when the learining process should be stopped. It is used when the autostop mode is on.
self.adaptation_threshold = 0.001
## Seed for random state (by default is `None`, current system time is used).
self.random_state = None
class som:
"""!
@brief Represents self-organized feature map (SOM).
@details The self-organizing feature map (SOM) method is a powerful tool for the visualization of
of high-dimensional data. It converts complex, nonlinear statistical relationships between
high-dimensional data into simple geometric relationships on a low-dimensional display.
@details `ccore` option can be specified in order to control using C++ implementation of pyclustering library. By
default C++ implementation is on. C++ implementation improves performance of the self-organized feature
map.
Example:
@code
import random
from pyclustering.utils import read_sample
from pyclustering.nnet.som import som, type_conn, type_init, som_parameters
from pyclustering.samples.definitions import FCPS_SAMPLES
# read sample 'Lsun' from file
sample = read_sample(FCPS_SAMPLES.SAMPLE_LSUN)
# create SOM parameters
parameters = som_parameters()
# create self-organized feature map with size 7x7
rows = 10 # five rows
cols = 10 # five columns
structure = type_conn.grid_four; # each neuron has max. four neighbors.
network = som(rows, cols, structure, parameters)
# train network on 'Lsun' sample during 100 epouchs.
network.train(sample, 100)
# simulate trained network using randomly modified point from input dataset.
index_point = random.randint(0, len(sample) - 1)
point = sample[index_point] # obtain randomly point from data
point[0] += random.random() * 0.2 # change randomly X-coordinate
point[1] += random.random() * 0.2 # change randomly Y-coordinate
index_winner = network.simulate(point)
# check what are objects from input data are much close to randomly modified.
index_similar_objects = network.capture_objects[index_winner]
# neuron contains information of encoded objects
print("Point '%s' is similar to objects with indexes '%s'." % (str(point), str(index_similar_objects)))
print("Coordinates of similar objects:")
for index in index_similar_objects: print("\tPoint:", sample[index])
# result visualization:
# show distance matrix (U-matrix).
network.show_distance_matrix()
# show density matrix (P-matrix).
network.show_density_matrix()
# show winner matrix.
network.show_winner_matrix()
# show self-organized map.
network.show_network()
@endcode
There is a visualization of 'Target' sample that was done by the self-organized feature map:
@image html target_som_processing.png
"""
@property
def size(self):
"""!
@brief Return size of self-organized map that is defined by total number of neurons.
@return (uint) Size of self-organized map (number of neurons).
"""
if self.__ccore_som_pointer is not None:
self._size = wrapper.som_get_size(self.__ccore_som_pointer)
return self._size
@property
def weights(self):
"""!
@brief Return weight of each neuron.
@return (list) Weights of each neuron.
"""
if self.__ccore_som_pointer is not None:
self._weights = wrapper.som_get_weights(self.__ccore_som_pointer)
return self._weights
@property
def awards(self):
"""!
@brief Return amount of captured objects by each neuron after training.
@return (list) Amount of captured objects by each neuron.
@see train()
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
return self._award
@property
def capture_objects(self):
"""!
@brief Returns indexes of captured objects by each neuron.
@details For example, a network with size 2x2 has been trained on a sample with five objects. Suppose neuron #1
won an object with index `1`, neuron #2 won objects `0`, `3`, `4`, neuron #3 did not won anything and
finally neuron #4 won an object with index `2`. Thus, for this example we will have the following
output `[[1], [0, 3, 4], [], [2]]`.
@return (list) Indexes of captured objects by each neuron.
"""
if self.__ccore_som_pointer is not None:
self._capture_objects = wrapper.som_get_capture_objects(self.__ccore_som_pointer)
return self._capture_objects
def __init__(self, rows, cols, conn_type=type_conn.grid_eight, parameters=None, ccore=True):
"""!
@brief Constructor of self-organized map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@param[in] conn_type (type_conn): Type of connection between oscillators in the network (grid four, grid eight, honeycomb, function neighbour).
@param[in] parameters (som_parameters): Other specific parameters.
@param[in] ccore (bool): If True simulation is performed by CCORE library (C++ implementation of pyclustering).
"""
# some of these parameters are required despite core implementation, for example, for network visualization.
self._cols = cols
self._rows = rows
self._size = cols * rows
self._conn_type = conn_type
self._data = None
self._neighbors = None
self._local_radius = 0.0
self._learn_rate = 0.0
self.__ccore_som_pointer = None
self._params = parameters or som_parameters()
if self._params.init_radius is None:
self._params.init_radius = self.__initialize_initial_radius(rows, cols)
if (ccore is True) and ccore_library.workable():
self.__ccore_som_pointer = wrapper.som_create(rows, cols, conn_type, self._params)
else:
# location
self._location = self.__initialize_locations(rows, cols)
# default weights
self._weights = [[0.0]] * self._size
# awards
self._award = [0] * self._size
# captured objects
self._capture_objects = [[] for i in range(self._size)]
# distances - calculate and store them only during training
self._sqrt_distances = None
# connections
if conn_type != type_conn.func_neighbor:
self._create_connections(conn_type)
def __del__(self):
"""!
@brief Destructor of the self-organized feature map.
"""
if self.__ccore_som_pointer is not None:
wrapper.som_destroy(self.__ccore_som_pointer)
def __len__(self):
"""!
@brief Returns size of the network that defines by amount of neuron in it.
@return (uint) Size of self-organized map (amount of neurons).
"""
return self._size
def __getstate__(self):
"""
@brief Returns state of SOM network that can be used to store network.
"""
if self.__ccore_som_pointer is not None:
self.__download_dump_from_ccore()
return self.__get_dump_from_python(True)
return self.__get_dump_from_python(False)
def __setstate__(self, som_state):
"""
@brief Set state of SOM network that can be used to load network.
"""
if som_state['ccore'] is True and ccore_library.workable():
self.__upload_dump_to_ccore(som_state['state'])
else:
self.__upload_dump_to_python(som_state['state'])
def __initialize_initial_radius(self, rows, cols):
"""!
@brief Initialize initial radius using map sizes.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) Value of initial radius.
"""
if (cols + rows) / 4.0 > 1.0:
return 2.0
elif (cols > 1) and (rows > 1):
return 1.5
else:
return 1.0
def __initialize_locations(self, rows, cols):
"""!
@brief Initialize locations (coordinates in SOM grid) of each neurons in the map.
@param[in] rows (uint): Number of neurons in the column (number of rows).
@param[in] cols (uint): Number of neurons in the row (number of columns).
@return (list) List of coordinates of each neuron in map.
"""
location = list()
for i in range(rows):
for j in range(cols):
location.append([float(i), float(j)])
return location
def __initialize_distances(self, size, | |
# Classes
Similar to functions where we introduced functions like `len()` and `type()` before discussing how to define your own functions, there are many available object types (either in the standard library or that can be imported - like `date`-type objects) *and* you can define your own objects and their associated attributes and methods. Just as we used `def` to define functions, in this chapter we'll discuss `class` to create your own classes of objects.
We'll also introduce the concept of instances and return to the concept of object-oriented programming and what that means in python.
As discussed earlier, objects are a way to combine associated data (attributes) and procedures (methods) that can be carried out on that data in a systematized and organized manner.
## `class` defintion
With the definition of objects fresh in our mind, we can delve into how to create our own object types. To do this, we use the `class` keyword. This opens a code block within which we can specify the instructions to create objects of our specified type.
<div class="alert alert-success">
<b>Classes</b> define objects. The <code>class</code> keyword opens a code block for instructions on how to create objects of a particular type.
</div>
A helpful analogy is to think of classes as the _blueprint_ for creating and defining objects and their properties (methods, attributes, etc.). They specify the plan, explain how each component fits together, and keeps everything organized.
## `class`: Dog
Here, we introduce our first object type. We use `class` to define a `Dog`-type object.
Note that, unlike variables and functions (where we use snake_case), for `class`es, we'll use CapWords to specify class names.
Here, the `class` keyword indicates that we're defining a new `class` of objects that we're going to call `Dog`. The colon (`:`) opens up the code block.
Within the `class` definition, we define one attribute `sound`. Note that since attributes store pieces of data, attribute definition will take the format of variable assignment.
After defining the `sound` attrribute, we see what looks like a function definition. This is because methods *are* functions. They're just a specific kind of function, one that is defined within a `class` definition and designed to operate directly on the object type within which it is defined.
However, one unique aspect about `class` definitions is the concept of an **instance**. We'll define this in more detail in a second, but this comes into play in the method definition below. We see the method `speak` takes an input parameter `self`. This `self` specifies that we want to operate on the current object (or the current 'instance' of the object'). Thus, within the function, whenever you see `self`, you know it refers to the current `Dog`. We'll delve into this more momentarily!
A final note about `class` objects for now is that, like functions, a new namespace is created within a `class`. They only have access to the variables passed into or definied within them.
# Define a class with `class`.
class Dog():
# Class attributes for objects of type Dog
sound = 'Woof'
# Class methods for objects of type Dog
def speak(self):
print('Dog says:', self.sound)
After a `class` has been defined, we can create objects of that type. For example, here we create a `Dog` type object, storing that object in the varaible `lexi`
# Initialize a dog object
lexi = Dog()
After `lexi` has been defined, we're able to access information stored in this object type. We do this using the syntax described in the previous chapter, first specifying the object, followed by a `.` and then the attribute name.
# lexi, has 'sound' attribute(s) from Dog()
print(lexi.sound)
As a reminder, when we access an attribute, we are looking up information about the object and that information is returned. There are no operations being carried out.
Alternatively, when we execute a method, we are running the code within the method specified directly on the object itself.
For example, when we call `speak()` on `lexi`, the code within the `speak()` method executes.
# lexi, has 'Dog' method(s)
# remember we used `self`
lexi.speak()
### **`class`** Summary:
- classes tend to use **CapWords** convention
- instead of snake_case (functions and variable names)
- `()` after `Dog` indicate that this is callable
- like functions, Classes must be executed before any objects are created
- can define **attributes** & **methods** within `class`
- `self` is a special parameter for use by an object
- refers to the thing (object) itself
- like functions, a new namespace is created within a Class
### Using our `Dog` Object
Now that we've defined a `Dog`-type object, we can create multiple `Dog`s.
For example, we could create a list of four different `Dog` type objects. Note that every time `Dog()` is called, a new `Dog`-type object is created. This is what we refer to as an **instance** of an object. When you call `Dog()` here, you are creating another **instance** of a `Dog()`-type object.
# Initialize a group of dogs
pack_of_dogs = [Dog(), Dog(), Dog(), Dog()]
After defining `pack_of_dogs`, if we were to take a look at what is stored within this list, you'll notice that each element of the list indicates that a `Dog` type object is being stored.
# take a look at this
pack_of_dogs
We can then iterate over this list (as we've done for lists previously) and call a method on each `dog` in our list, using the following approach:
for dog in pack_of_dogs:
dog.speak()
We demonstrate this here for two reasons: 1) to indicate that all of the code constructs previously introduced (loops, conditionals, etc.) are still available when working with classes and 2) to demonstrate that methods operate directoy on their associated objects using the syntax `object.method()`.
## Instances & `self`
While referenced above, we've yet to formally define what an **instance** is. An **instance** refers to a particular instantiation of a `class` object. Every time a `class` object is executed (created), a new **instance** of that `class` object is created.
To refer to the *current* instance, we use the word `self`.
<div class="alert alert-success">
An <b>instance</b> is particular instantiation of a class object. <code>self</code> refers to the current instance.
</div>
# Initialize a dog object
lexi = Dog()
From the example we discussed above:
- Dog is the `class` object we created
- `lexi` was an _instance_ of that class
- `self` refers to whatever the _current_ instance is
### Instance Attributes
With this concept of instances now made a little more clear, we can introduce the concept of **instance attributes**. So far, we have only demonstrated how to define a **class attribute**. A **class attribute** is an attribute that *all* objects of this `class` will share. For our `Dog` example, all `Dog`-type objects shared the class attribute 'sound'. Every dog stored 'Woof' in the attribute `sound`.
Alternatively, an instance attribute specific to the current instance of the `class` object. This allows for different instances of the `class` object to store different data in an instance attribute.
To do use we use the special `__init__` method when defining instance attributes. (Note that those are two leading and two trailing underscores around the word 'init'.)
<div class="alert alert-success">
Instance attributes are attributes that we can make be different for each instance of a class. <code>__init__</code> is a special method used to define instance attributes.
</div>
class Dog():
# Class attributes for Dogs
sound = 'Woof'
# Initializer, allows us to specify instance-specific attributes
# leading and trailing double underscores indicates that this is special to Python
def __init__(self, name):
self.name = name
def speak(self):
print('Dog says:', self.sound)
With this updated `Dog` class definition, we can now initialize a new instance of `Dog()`:
# Initialize a dog
gary = Dog(name = 'Gary')
In the code above, we now see that we have to pass `name` when we create an instance of the `Dog()` object, specifying what the `name` is of *this* particular instance of `Dog()`.
As with class attributes, we can access what is stored in | |
#!/usr/bin/env python
# encoding: utf-8
#
# bpt.py
#
# Created by <NAME> on 19 Jan 2017.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from packaging.version import parse
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.axes_grid1 import ImageGrid
from marvin.core.exceptions import MarvinDeprecationWarning, MarvinError
from marvin.utils.plot import bind_to_figure
__ALL__ = ('get_snr', 'kewley_sf_nii', 'kewley_sf_sii', 'kewley_sf_oi',
'kewley_comp_nii', 'kewley_agn_sii', 'kewley_agn_oi',
'bpt_kewley06')
def get_snr(snr_min, emission_line, default=3):
"""Convenience function to get the minimum SNR for a certain emission line.
If ``snr_min`` is a dictionary and ``emision_line`` is one of the keys,
returns that value. If the emission line is not included in the dictionary,
returns ``default``. If ``snr_min`` is a float, returns that value
regardless of the ``emission_line``.
"""
if not isinstance(snr_min, dict):
return snr_min
if emission_line in snr_min:
return snr_min[emission_line]
else:
return default
def get_masked(maps, emline, snr=1):
"""Convenience function to get masked arrays without negative values."""
gflux = maps['emline_gflux_' + emline]
gflux_masked = gflux.masked
# Masks spaxels with flux <= 0
gflux_masked.mask |= (gflux_masked.data <= 0)
# Masks all spaxels that don't reach the cutoff SNR
gflux_masked.mask |= gflux.snr < snr
gflux_masked.mask |= gflux.ivar == 0
return gflux_masked
def _get_kewley06_axes(use_oi=True):
"""Creates custom axes for displaying Kewley06 plots."""
fig = plt.figure(None, (8.5, 10))
fig.clf()
plt.subplots_adjust(top=0.99, bottom=0.08, hspace=0.01)
# The axes for the three classification plots
imgrid_kwargs = {'add_all': True} if parse(matplotlib.__version__) < parse('3.5.0') else {}
grid_bpt = ImageGrid(fig, 211,
nrows_ncols=(1, 3) if use_oi else (1, 2),
direction='row',
axes_pad=0.1,
label_mode='L',
share_all=False, **imgrid_kwargs)
# The axes for the galaxy display
gal_bpt = ImageGrid(fig, 212, nrows_ncols=(1, 1))
# Plots the classification boundary lines
xx_sf_nii = np.linspace(-1.281, 0.045, int(1e4))
xx_sf_sii = np.linspace(-2, 0.315, int(1e4))
xx_sf_oi = np.linspace(-2.5, -0.7, int(1e4))
xx_comp_nii = np.linspace(-2, 0.4, int(1e4))
xx_agn_sii = np.array([-0.308, 1.0])
xx_agn_oi = np.array([-1.12, 0.5])
grid_bpt[0].plot(xx_sf_nii, kewley_sf_nii(xx_sf_nii), 'k--', zorder=90)
grid_bpt[1].plot(xx_sf_sii, kewley_sf_sii(xx_sf_sii), 'r-', zorder=90)
if use_oi:
grid_bpt[2].plot(xx_sf_oi, kewley_sf_oi(xx_sf_oi), 'r-', zorder=90)
grid_bpt[0].plot(xx_comp_nii, kewley_comp_nii(xx_comp_nii), 'r-', zorder=90)
grid_bpt[1].plot(xx_agn_sii, kewley_agn_sii(xx_agn_sii), 'b-', zorder=80)
if use_oi:
grid_bpt[2].plot(xx_agn_oi, kewley_agn_oi(xx_agn_oi), 'b-', zorder=80)
# Adds captions
grid_bpt[0].text(-1, -0.5, 'SF', ha='center', fontsize=12, zorder=100, color='c')
grid_bpt[0].text(0.5, 0.5, 'AGN', ha='left', fontsize=12, zorder=100)
grid_bpt[0].text(-0.08, -1.2, 'Comp', ha='left', fontsize=12, zorder=100, color='g')
grid_bpt[1].text(-1.2, -0.5, 'SF', ha='center', fontsize=12, zorder=100)
grid_bpt[1].text(-1, 1.2, 'Seyfert', ha='left', fontsize=12, zorder=100, color='r')
grid_bpt[1].text(0.3, -1, 'LINER', ha='left', fontsize=12, zorder=100, color='m')
if use_oi:
grid_bpt[2].text(-2, -0.5, 'SF', ha='center', fontsize=12, zorder=100)
grid_bpt[2].text(-1.5, 1, 'Seyfert', ha='left', fontsize=12, zorder=100)
grid_bpt[2].text(-0.1, -1, 'LINER', ha='right', fontsize=12, zorder=100)
# Sets the ticks, ticklabels, and other details
xtick_limits = ((-2, 1), (-1.5, 1), (-2.5, 0.5))
axes = [0, 1, 2] if use_oi else [0, 1]
for ii in axes:
grid_bpt[ii].get_xaxis().set_tick_params(direction='in')
grid_bpt[ii].get_yaxis().set_tick_params(direction='in')
grid_bpt[ii].set_xticks(np.arange(xtick_limits[ii][0], xtick_limits[ii][1] + 0.5, 0.5))
grid_bpt[ii].set_xticks(np.arange(xtick_limits[ii][0],
xtick_limits[ii][1] + 0.1, 0.1), minor=True)
grid_bpt[ii].set_yticks(np.arange(-1.5, 2.0, 0.5))
grid_bpt[ii].set_yticks(np.arange(-1.5, 1.6, 0.1), minor=True)
grid_bpt[ii].grid(which='minor', alpha=0.2)
grid_bpt[ii].grid(which='major', alpha=0.5)
grid_bpt[ii].set_xlim(xtick_limits[ii][0], xtick_limits[ii][1])
grid_bpt[ii].set_ylim(-1.5, 1.6)
if use_oi:
grid_bpt[ii].set_ylim(-1.5, 1.8)
grid_bpt[ii].spines['top'].set_visible(True)
if ii in [0, 1]:
if not use_oi and ii == 1:
continue
grid_bpt[ii].get_xticklabels()[-1].set_visible(False)
grid_bpt[0].set_ylabel(r'log([OIII]/H$\beta$)')
grid_bpt[0].set_xlabel(r'log([NII]/H$\alpha$)')
grid_bpt[1].set_xlabel(r'log([SII]/H$\alpha$)')
if use_oi:
grid_bpt[2].set_xlabel(r'log([OI]/H$\alpha$)')
gal_bpt[0].grid(False)
return fig, grid_bpt, gal_bpt[0]
def kewley_sf_nii(log_nii_ha):
"""Star forming classification line for log([NII]/Ha)."""
return 0.61 / (log_nii_ha - 0.05) + 1.3
def kewley_sf_sii(log_sii_ha):
"""Star forming classification line for log([SII]/Ha)."""
return 0.72 / (log_sii_ha - 0.32) + 1.3
def kewley_sf_oi(log_oi_ha):
"""Star forming classification line for log([OI]/Ha)."""
return 0.73 / (log_oi_ha + 0.59) + 1.33
def kewley_comp_nii(log_nii_ha):
"""Composite classification line for log([NII]/Ha)."""
return 0.61 / (log_nii_ha - 0.47) + 1.19
def kewley_agn_sii(log_sii_ha):
"""Seyfert/LINER classification line for log([SII]/Ha)."""
return 1.89 * log_sii_ha + 0.76
def kewley_agn_oi(log_oi_ha):
"""Seyfert/LINER classification line for log([OI]/Ha)."""
return 1.18 * log_oi_ha + 1.30
def bpt_kewley06(maps, snr_min=3, return_figure=True, use_oi=True, **kwargs):
"""Returns a classification of ionisation regions, as defined in Kewley+06.
Makes use of the classification system defined by
`Kewley et al. (2006) <https://ui.adsabs.harvard.edu/#abs/2006MNRAS.372..961K/abstract>`_
to return classification masks for different ionisation mechanisms. If ``return_figure=True``,
produces and returns a matplotlib figure with the classification plots (based on
Kewley+06 Fig. 4) and the 2D spatial distribution of classified spaxels (i.e., a map of the
galaxy in which each spaxel is colour-coded based on its emission mechanism).
While it is possible to call this function directly, its normal use will be via the
:func:`~marvin.tools.maps.Maps.get_bpt` method.
Parameters:
maps (a Marvin :class:`~marvin.tools.maps.Maps` object)
The Marvin Maps object that contains the emission line maps to be used to determine
the BPT classification.
snr_min (float or dict):
The signal-to-noise cutoff value for the emission lines used to generate the BPT
diagram. If ``snr_min`` is a single value, that signal-to-noise will be used for all
the lines. Alternatively, a dictionary of signal-to-noise values, with the
emission line channels as keys, can be used.
E.g., ``snr_min={'ha': 5, 'nii': 3, 'oi': 1}``. If some values are not provided,
they will default to ``SNR>=3``. Note that the value ``sii`` will be applied to both
``[SII 6718]`` and ``[SII 6732]``.
return_figure (bool):
If ``True``, it also returns the matplotlib figure_ of the BPT diagram plot,
which can be used to modify the style of the plot.
use_oi (bool):
If ``True``, uses the OI diagnostic diagram for spaxel classification.
Returns:
bpt_return:
``bpt_kewley06`` returns a dictionary of dictionaries of classification masks.
The classification masks (not to be confused with bitmasks) are boolean arrays with the
same shape as the Maps or Cube (without the spectral dimension) that can be used
to select spaxels belonging to a certain excitation process (e.g., star forming).
The returned dictionary has the following keys: ``'sf'`` (star forming), ``'comp'``
(composite), ``'agn'``, ``'seyfert'``, ``'liner'``, ``'invalid'``
(spaxels that are masked out at the DAP level), and ``'ambiguous'`` (good spaxels that
do not fall in any classification or fall in more than one). Each key provides access
to a new dictionary with keys ``'nii'`` (for the constraints in the diagram NII/Halpha
vs OIII/Hbeta), ``'sii'`` (SII/Halpha vs OIII/Hbeta), ``'oi'`` (OI/Halpha vs
OIII/Hbeta; only if ``use_oi=True``), and ``'global'``, which applies all the previous
constraints at once. The ``'ambiguous'`` mask only contains the ``'global'``
subclassification, while the ``'comp'`` dictionary only contains ``'nii'``.
``'nii'`` is not available for ``'seyfert'`` and ``'liner'``. All the global masks are
unique (a spaxel can only belong to one of them) with the exception of ``'agn'``, which
intersects with ``'seyfert'`` and ``'liner'``. Additionally, if ``return_figure=True``,
``bpt_kewley06`` will also return the matplotlib figure for the generated plot, and a
list of axes for each one of the subplots.
Example:
>>> maps_8485_1901 = Maps(plateifu='8485-1901')
>>> bpt_masks, fig, axes = bpt_kewley06(maps_8485_1901)
Gets the global mask for star forming spaxels
>>> sf = bpt_masks['sf']['global']
Gets the seyfert mask based only on the SII/Halpha vs OIII/Hbeta diagnostics
>>> seyfert_sii = bpt_masks['seyfert']['sii']
"""
if 'snr' in kwargs:
warnings.warn('snr is deprecated. Use snr_min instead. '
'snr will be removed in a future version of marvin',
MarvinDeprecationWarning)
snr_min = kwargs.pop('snr')
if len(kwargs.keys()) > 0:
raise MarvinError('unknown keyword {0}'.format(list(kwargs.keys())[0]))
# Gets the necessary emission line maps
oiii = get_masked(maps, 'oiii_5008', snr=get_snr(snr_min, 'oiii'))
nii = get_masked(maps, 'nii_6585', snr=get_snr(snr_min, 'nii'))
ha = get_masked(maps, 'ha_6564', snr=get_snr(snr_min, 'ha'))
hb = get_masked(maps, 'hb_4862', snr=get_snr(snr_min, 'hb'))
oi = get_masked(maps, 'oi_6302', snr=get_snr(snr_min, 'oi'))
sii_6718 = get_masked(maps, 'sii_6718', snr=get_snr(snr_min, 'sii'))
sii_6732 = get_masked(maps, 'sii_6732', snr=get_snr(snr_min, 'sii'))
sii = sii_6718 + sii_6732
# Calculate masked logarithms
log_oiii_hb = np.ma.log10(oiii / hb)
log_nii_ha = np.ma.log10(nii / ha)
log_sii_ha = np.ma.log10(sii / ha)
log_oi_ha = np.ma.log10(oi / ha)
# Calculates masks for each emission mechanism according to the paper boundaries.
# The log_nii_ha < 0.05, log_sii_ha < 0.32, etc are necessary because the classification lines
# diverge and we only want the region before the asymptota.
sf_mask_nii = ((log_oiii_hb < kewley_sf_nii(log_nii_ha)) & (log_nii_ha < 0.05)).filled(False)
sf_mask_sii = ((log_oiii_hb < kewley_sf_sii(log_sii_ha)) & (log_sii_ha < 0.32)).filled(False)
sf_mask_oi = ((log_oiii_hb < kewley_sf_oi(log_oi_ha)) & (log_oi_ha < -0.59)).filled(False)
sf_mask = sf_mask_nii & sf_mask_sii & sf_mask_oi if use_oi else sf_mask_nii & sf_mask_sii
comp_mask = ((log_oiii_hb > kewley_sf_nii(log_nii_ha)) & (log_nii_ha < 0.05)).filled(False) & \
((log_oiii_hb < kewley_comp_nii(log_nii_ha)) & (log_nii_ha < 0.465)).filled(False)
comp_mask &= (sf_mask_sii & sf_mask_oi) if use_oi else sf_mask_sii
agn_mask_nii = ((log_oiii_hb > kewley_comp_nii(log_nii_ha)) |
(log_nii_ha > 0.465)).filled(False)
agn_mask_sii = ((log_oiii_hb > kewley_sf_sii(log_sii_ha)) |
(log_sii_ha > 0.32)).filled(False)
agn_mask_oi = ((log_oiii_hb > kewley_sf_oi(log_oi_ha)) |
(log_oi_ha > -0.59)).filled(False)
agn_mask = agn_mask_nii & agn_mask_sii & agn_mask_oi if use_oi else agn_mask_nii & agn_mask_sii
| |
<filename>tests/test_fields.py
import re
from datetime import datetime
from decimal import Decimal
import pytest
from bson import ObjectId, Decimal128
from aiomongodel import Document, EmbeddedDocument
from aiomongodel.errors import ValidationError
from aiomongodel.fields import (
AnyField, StrField, IntField, FloatField, BoolField, DateTimeField,
ObjectIdField, EmbDocField, ListField, RefField, EmailField,
DecimalField, SynonymField)
from aiomongodel.utils import _Empty
class EmbDoc(EmbeddedDocument):
int_field = IntField(required=True)
class WrongEmbDoc(EmbeddedDocument):
wrong = StrField(required=True)
class RefDoc(Document):
str_field = StrField(required=False)
class WrongRefDoc(Document):
wrong = IntField(required=False)
dt = datetime.strptime('1985-09-14 12:00:00', '%Y-%m-%d %H:%M:%S')
ref_doc = RefDoc(_id=ObjectId('58ce6d537e592254b67a503d'), str_field='xxx')
emb_doc = EmbDoc(int_field=1)
wrong_ref_doc = RefDoc(_id=ObjectId('58ce6d537e592254b67a503d'), wrong=1)
wrong_emb_doc = EmbDoc(wrong='xxx')
FIELD_DEFAULT = [
(AnyField, 'xxx'),
(StrField, 'xxx'),
(IntField, 13),
(FloatField, 1.3),
(BoolField, True),
(DateTimeField, dt),
(ObjectIdField, ObjectId('58ce6d537e592254b67a503d')),
(EmailField, '<EMAIL>'),
(DecimalField, Decimal('0.005')),
]
@pytest.mark.parametrize('field, expected', [
(StrField(required=False), None),
(IntField(required=False), None),
(FloatField(required=False), None),
(BoolField(required=False), None),
(DateTimeField(required=False), None),
(ObjectIdField(required=False), None),
(EmbDocField(EmbDoc, required=False), None),
(ListField(EmbDocField(EmbDoc), required=False), None),
(RefField(RefDoc, required=False), None),
(EmailField(required=False), None),
])
def test_field_not_exist_get_value(field, expected):
class Doc(Document):
value = field
assert Doc().value is expected
@pytest.mark.parametrize('field, default', FIELD_DEFAULT)
def test_field_attributes(field, default):
class Doc(Document):
value = field(required=False)
assert isinstance(Doc.value, field)
assert Doc.value.name == 'value'
assert Doc.value.mongo_name == 'value'
assert Doc.value.s == 'value'
assert Doc.value.required is False
assert Doc.value.default is _Empty
assert Doc.value.choices is None
assert Doc.value.allow_none is False
class DocWithMongo(Document):
value = field(required=True, default=default, mongo_name='val',
choices=[default], allow_none=True)
assert isinstance(DocWithMongo.value, field)
assert DocWithMongo.value.name == 'value'
assert DocWithMongo.value.mongo_name == 'val'
assert DocWithMongo.value.s == 'val'
assert DocWithMongo.value.required is True
assert DocWithMongo.value.default == default
assert DocWithMongo.value.choices == {default}
assert DocWithMongo.value.allow_none is True
@pytest.mark.parametrize('field, default', FIELD_DEFAULT)
def test_field_default(field, default):
class Doc(Document):
value = field()
assert Doc.value.default is _Empty
class DocWithDefault(Document):
value = field(required=True, default=default)
assert DocWithDefault.value.default == default
class DocWithCallableDefault(Document):
value = field(required=True, default=lambda: default)
assert DocWithCallableDefault.value.default == default
def test_compound_field_name():
class EmbDoc(EmbeddedDocument):
int_field = IntField(mongo_name='intf')
class ComplexEmbDoc(EmbeddedDocument):
emb_field = EmbDocField(EmbDoc, mongo_name='emb')
class ComplexListDoc(EmbeddedDocument):
lst_field = ListField(EmbDocField(ComplexEmbDoc))
class Doc(Document):
int_field = IntField()
emb_field = EmbDocField(EmbDoc, mongo_name='emb')
complex_emb_field = EmbDocField(ComplexEmbDoc, mongo_name='cmplx_emb')
lst_field = ListField(EmbDocField(EmbDoc), mongo_name='lst')
lst_int_field = ListField(IntField(), mongo_name='lst_int')
complex_lst_emb_field = EmbDocField(ComplexListDoc, mongo_name='clef')
assert EmbDoc.int_field.s == 'intf'
assert Doc.int_field.s == 'int_field'
assert Doc.emb_field.s == 'emb'
assert Doc.complex_emb_field.s == 'cmplx_emb'
assert Doc.lst_field.s == 'lst'
assert Doc.lst_int_field.s == 'lst_int'
assert Doc.emb_field.int_field.s == 'emb.intf'
assert Doc.complex_emb_field.emb_field.s == 'cmplx_emb.emb'
assert Doc.lst_field.int_field.s == 'lst.intf'
assert Doc.complex_emb_field.emb_field.int_field.s == 'cmplx_emb.emb.intf'
mn = 'clef.lst_field.emb.intf'
assert (
Doc.complex_lst_emb_field.lst_field.emb_field.int_field.s == mn)
with pytest.raises(AttributeError):
Doc.int_field.wrong_field.s
with pytest.raises(AttributeError):
Doc.emb_field.int_field.wrong_field.s
with pytest.raises(AttributeError):
Doc.lst_int_field.wrong_field.s
with pytest.raises(AttributeError):
Doc.complex_emb_field.emb_field.wrong.s
with pytest.raises(AttributeError):
Doc.complex_lst_emb_field.lst_field.wrong.s
def test_compound_field_document_class():
class Doc(Document):
emb = EmbDocField('test_fields.EmbDoc')
ref = RefField('test_fields.RefDoc')
lst_emb = ListField(EmbDocField('test_fields.EmbDoc'))
lst_ref = ListField(RefField('test_fields.RefDoc'))
lst_int = ListField(IntField())
wrong_emb = EmbDocField('xxx')
wrong_ref = RefField('xxx')
wrong_lst_emb = ListField(EmbDocField('xxx'))
wrong_emb_doc = EmbDocField('test_fields.RefDoc')
wrong_ref_doc = RefField('test_fields.EmbDoc')
assert Doc.emb.document_class is EmbDoc
assert Doc.ref.document_class is RefDoc
assert Doc.lst_emb.document_class is EmbDoc
assert Doc.lst_ref.document_class is None
assert Doc.lst_int.document_class is None
with pytest.raises(ImportError):
Doc.wrong_emb.document_class
with pytest.raises(ImportError):
Doc.wrong_lst_emb.document_class
with pytest.raises(ImportError):
Doc.wrong_ref.document_class
with pytest.raises(TypeError):
class WrongEmbDoc(Document):
wrong_emb = EmbDocField(RefDoc)
with pytest.raises(TypeError):
class WrongRefDoc(Document):
wrong_ref = RefField(EmbDoc)
with pytest.raises(TypeError):
Doc.wrong_ref_doc.document_class
with pytest.raises(TypeError):
Doc.wrong_emb_doc.document_class
@pytest.mark.parametrize('field, value, expected', [
(AnyField(), '1', '1'),
(AnyField(), 1, 1),
(AnyField(), True, True),
(AnyField(), None, None),
(StrField(), 'xxx', 'xxx'),
(StrField(), None, None),
(IntField(), 1, 1),
(IntField(), None, None),
(FloatField(), 13.0, pytest.approx(13.0)),
(FloatField(), None, None),
(BoolField(), True, True),
(BoolField(), False, False),
(BoolField(), None, None),
(DateTimeField(), dt, dt),
(DateTimeField(), None, None),
(ObjectIdField(),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(ObjectIdField(), None, None),
(EmbDocField(EmbDoc), emb_doc, {'int_field': 1}),
(EmbDocField(EmbDoc), None, None),
(ListField(IntField()), [], []),
(ListField(IntField()), [1, 2, 3], [1, 2, 3]),
(ListField(IntField()), None, None),
(ListField(EmbDocField(EmbDoc)), [emb_doc], [{'int_field': 1}]),
(ListField(EmbDocField(EmbDoc)), None, None),
(RefField(RefDoc),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(RefField(RefDoc), ref_doc, ref_doc._id),
(RefField(RefDoc), None, None),
(EmailField(), '<EMAIL>', '<EMAIL>'),
(EmailField(), None, None),
(DecimalField(), Decimal('0.005'), Decimal128(Decimal('0.005'))),
(DecimalField(), None, None),
])
def test_field_to_mongo(field, value, expected):
class Doc(Document):
value = field
assert Doc.value.to_mongo(value) == expected
@pytest.mark.parametrize('field, value, expected', [
(AnyField(), '1', '1'),
(AnyField(), 1, 1),
(AnyField(), True, True),
(AnyField(), None, None),
(StrField(), 'xxx', 'xxx'),
(StrField(), None, None),
(IntField(), 1, 1),
(IntField(), None, None),
(FloatField(), 13.0, pytest.approx(13.0)),
(FloatField(), None, None),
(BoolField(), True, True),
(BoolField(), False, False),
(BoolField(), None, None),
(DateTimeField(), dt, dt),
(DateTimeField(), None, None),
(ObjectIdField(),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(ObjectIdField(), None, None),
(ListField(IntField()), [], []),
(ListField(IntField()), [1, 2, 3], [1, 2, 3]),
(ListField(IntField()), None, None),
(ListField(IntField()), [None], [None]),
(RefField(RefDoc),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(RefField(RefDoc), None, None),
(EmailField(), '<EMAIL>', '<EMAIL>'),
(EmailField(), None, None),
(DecimalField(), Decimal128(Decimal('0.005')), Decimal('0.005')),
(DecimalField(), float(0.005), Decimal('0.005')),
(DecimalField(), str(0.005), Decimal('0.005')),
(DecimalField(), None, None),
(EmbDocField(EmbDoc, allow_none=True), None, None)
])
def test_field_from_mongo(field, value, expected):
class Doc(Document):
value = field
assert Doc.value.from_mongo(value) == expected
FROM_DATA = [
(AnyField(), '1', '1'),
(AnyField(), 1, 1),
(AnyField(), True, True),
(StrField(), '', ''),
(StrField(), 'xxx', 'xxx'),
(StrField(choices=('xxx', 'yyy')), 'xxx', 'xxx'),
(StrField(), 1, '1'),
(StrField(), True, 'True'),
(StrField(allow_blank=False), '', ''),
(StrField(choices=('xxx', 'yyy')), 'zzz', 'zzz'),
(StrField(choices=('xxx', 'yyy')), 1, '1'),
(IntField(), 1, 1),
(IntField(), '1', 1),
(IntField(choices=[*range(10)]), 5, 5),
(IntField(choices=[*range(10)]), 'xxx', 'xxx'),
(IntField(choices=[*range(10)]), 100, 100),
(IntField(), 'xxx', 'xxx'),
(IntField(), 1.3, 1),
(IntField(gte=1, lte=13), 1, 1),
(IntField(gte=1, lte=13), 13, 13),
(IntField(gte=1, lte=13), 10, 10),
(IntField(gte=1, lte=13), 0, 0),
(IntField(gte=1, lte=13), 20, 20),
(IntField(gt=1, lt=13), 10, 10),
(IntField(gt=1, lt=13), 1, 1),
(IntField(gt=1, lt=13), 13, 13),
(IntField(gt=1, lt=13), 0, 0),
(IntField(gt=1, lt=13), 20, 20),
(FloatField(), 1, pytest.approx(1.0)),
(FloatField(), 1.0, pytest.approx(1.0)),
(FloatField(), '1.0', pytest.approx(1.0)),
(FloatField(), '1', pytest.approx(1.0)),
(FloatField(), 'x', 'x'),
(FloatField(gt=1.0, lt=13.0), 10.0, pytest.approx(10.0)),
(FloatField(gt=1.0, lt=13.0), 0.0, pytest.approx(0.0)),
(FloatField(gt=1.0, lt=13.0), 20.0, pytest.approx(20.0)),
(BoolField(), True, True),
(BoolField(), False, False),
(BoolField(), 13, True),
(DateTimeField(), dt, dt),
(DateTimeField(), True, True),
(ObjectIdField(),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(ObjectIdField(), '58ce6d537e592254b67a503d',
ObjectId('58ce6d537e592254b67a503d')),
(ListField(IntField()), [], []),
(ListField(IntField()), [1, 2, 3], [1, 2, 3]),
(ListField(IntField()), ['1', '2', '3'], [1, 2, 3]),
(ListField(IntField()), [0, 'xxx', 1], [0, 'xxx', 1]),
(ListField(IntField(), min_length=3, max_length=5), [0, 1], [0, 1]),
(ListField(IntField(), min_length=3, max_length=5), [0, 1, 2], [0, 1, 2]),
(ListField(IntField(), min_length=3, max_length=5),
[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]),
(ListField(RefField(RefDoc)), [ref_doc], [ref_doc]),
(ListField(RefField(RefDoc)), [1], [1]),
(ListField(EmbDocField(EmbDoc)), [emb_doc], [emb_doc]),
(ListField(EmbDocField(EmbDoc)), [1], [1]),
(RefField(RefDoc),
ObjectId('58ce6d537e592254b67a503d'),
ObjectId('58ce6d537e592254b67a503d')),
(RefField(RefDoc), ref_doc, ref_doc),
(RefField(RefDoc), wrong_ref_doc, wrong_ref_doc),
(RefField(RefDoc), 'xxx', 'xxx'),
(EmbDocField(EmbDoc), emb_doc, emb_doc),
(EmbDocField(EmbDoc), wrong_emb_doc, wrong_emb_doc),
(EmbDocField(EmbDoc), 1, 1),
(EmbDocField(EmbDoc), ref_doc, ref_doc),
(EmailField(), '<EMAIL>', '<EMAIL>'),
(EmailField(), 'example.com', 'example.com'),
(EmailField(), '@example.com', '@example.com'),
(EmailField(), '<EMAIL>', '<EMAIL>'),
(EmailField(), 1, '1'),
(DecimalField(), Decimal(1), Decimal(1)),
(DecimalField(), '0.005', Decimal('0.005')),
(DecimalField(gte=1, lte=13), '1.0', Decimal('1.0')),
(DecimalField(gte=1, lte=13), '13', Decimal('13')),
(DecimalField(gte=1, lte=13), '10.5', Decimal('10.5')),
(DecimalField(gte=Decimal(1), lte=13), 0, 0),
(DecimalField(gte=1, lte=13), Decimal('20.5'), Decimal('20.5')),
(DecimalField(gt=1, lt=13), 10, Decimal(10)),
(DecimalField(gt=1, lt=13), 1, 1),
(DecimalField(gt=1, lt=Decimal('13.0')), 13, 13),
(DecimalField(gt=1, lt=Decimal('13.0')), Decimal('0'), Decimal('0')),
(DecimalField(gt=1, lt=13), Decimal('20'), Decimal('20'))
]
@pytest.mark.parametrize('field, value, expected', FROM_DATA)
def test_field_from_data(field, value, expected):
class Doc(Document):
value = field
assert Doc.value.from_data(value) == expected
@pytest.mark.parametrize('field, value, expected', FROM_DATA)
def test_field_init(field, value, expected):
class Doc(Document):
value = field
assert Doc(value=value).value == expected
@pytest.mark.parametrize('field, value, expected', FROM_DATA)
def test_field_assign(field, value, expected):
class Doc(Document):
value = field
d = Doc(_empty=True)
d.value = value
assert d.value == expected
def test_emb_doc_field():
class Doc(Document):
emb_field = EmbDocField(EmbDoc)
assert isinstance(Doc(emb_field={'int_field': 1}).emb_field, EmbDoc)
d = Doc(_empty=True)
d.emb_field = {'int_field': 1}
assert isinstance(d.emb_field, EmbDoc)
assert isinstance(Doc.emb_field.from_data({'int_field': 1}), EmbDoc)
d = Doc.from_mongo({'emb_field': {'int_field': 1}})
assert isinstance(d.emb_field, EmbDoc)
assert d.emb_field.int_field == 1
def test_list_field():
with pytest.raises(TypeError):
class Doc(Document):
lst_field = ListField(int)
def test_filed_choices():
class Doc(Document):
set_choices = StrField(choices={'xxx', 'yyy'})
dict_choices = StrField(choices={'xxx': 'AAA', 'yyy': 'BBB'})
d = Doc(set_choices='xxx', dict_choices='yyy')
d.validate()
d = Doc(set_choices='AAA', dict_choices='BBB')
with pytest.raises(ValidationError) as excinfo:
d.validate()
assert excinfo.value.as_dict() == {
'set_choices': 'value does not match any variant',
'dict_choices': 'value does not match any variant',
}
@pytest.mark.parametrize('field, value, expected', [
# AnyField
(AnyField(), '1', None),
(AnyField(), 1, None),
(AnyField(), True, None),
(AnyField(allow_none=True), None, None),
(AnyField(allow_none=False), None,
ValidationError('none value is not allowed')),
(AnyField(choices={'xxx', 'yyy'}), 'xxx', None),
(AnyField(choices={'xxx', 'yyy'}), 1,
ValidationError('value does not match any variant')),
# StrField
(StrField(), 'xxx', None),
(StrField(allow_none=True), None, None),
(StrField(allow_blank=True), '', None),
(StrField(choices=('xxx', 'yyy')), 'xxx', None),
(StrField(choices=('xxx', 'yyy'), max_length=2), 'xxx', None),
(StrField(choices=('xxx', 'yyy'), regex=r'zzz'), 'xxx', None),
(StrField(regex=r'[abc]+'), 'aa', None),
(StrField(regex=re.compile(r'[abc]+')), 'aa', None),
(StrField(min_length=2, max_length=3), 'aa', None),
(StrField(allow_none=False), None,
ValidationError('none value is not allowed')),
(StrField(), 1, ValidationError("invalid value type")),
(StrField(allow_none=True), True, ValidationError("invalid value type")),
(StrField(allow_blank=False), '',
ValidationError("blank value is not allowed")),
(StrField(choices=('xxx', 'yyy')), 'zzz',
ValidationError("value does not match any variant")),
(StrField(choices=('xxx', 'yyy')), 1,
ValidationError("invalid value type")),
(StrField(regex=r'[abc]+'), 'd',
ValidationError('value does not match pattern [abc]+')),
(StrField(regex=re.compile(r'[abc]+')), 'd',
ValidationError('value does not match pattern [abc]+')),
(StrField(min_length=2, max_length=3), 'a',
ValidationError('length is less than 2')),
(StrField(min_length=2, max_length=3), 'aaaa',
ValidationError('length is greater than 3')),
# IntField
(IntField(), 1, None),
(IntField(allow_none=True), None, None),
(IntField(choices=[*range(10)]), 5, None),
(IntField(choices=[*range(10)]), 'xxx',
ValidationError("invalid value type")),
(IntField(choices=[*range(10)]), 100,
ValidationError("value does not match any variant")),
(IntField(), 'xxx', ValidationError("invalid value type")),
(IntField(gte=1, lte=13), 1, None),
(IntField(gte=1, lte=13), 13, None),
(IntField(gte=1, lte=13), 10, None),
(IntField(gte=1, lte=13), 0, ValidationError('value is | |
if buf:
blocks.append(Block(buf))
buf = []
j = i
if j + 1 >= len(tokens):
raise BadExpectedToken("### BAD TOKEN at %s" % (t.location))
directive = tokens[j+1].id
if directive == 'define':
if i+2 >= len(tokens):
raise BadExpectedToken("### BAD TOKEN at %s" %
(tokens[i].location))
# Skip '#' and 'define'.
extent = tokens[i].cursor.extent
i += 2
id = ''
# We need to separate the id from the remaining of
# the line, especially for the function-like macro.
if (i + 1 < len(tokens) and tokens[i+1].id == '(' and
(tokens[i].location.column + len(tokens[i].spelling) ==
tokens[i+1].location.column)):
while i < len(tokens):
id += tokens[i].id
if tokens[i].spelling == ')':
i += 1
break
i += 1
else:
id += tokens[i].id
# Advance to the next token that follows the macro id
i += 1
(i, ret) = consume_extent(i, tokens, extent=extent)
blocks.append(Block(ret, directive=directive,
lineno=t.location.line, identifier=id))
else:
(i, ret) = consume_extent(i, tokens)
blocks.append(Block(ret[2:], directive=directive,
lineno=t.location.line))
elif cursor.kind == CursorKind.INCLUSION_DIRECTIVE:
if buf:
blocks.append(Block(buf))
buf = []
directive = tokens[i+1].id
(i, ret) = consume_extent(i, tokens)
blocks.append(Block(ret[2:], directive=directive,
lineno=t.location.line))
elif cursor.kind == CursorKind.VAR_DECL:
if buf:
blocks.append(Block(buf))
buf = []
(i, ret) = consume_extent(i, tokens, detect_change=True)
buf += ret
elif cursor.kind == CursorKind.FUNCTION_DECL:
if buf:
blocks.append(Block(buf))
buf = []
(i, ret) = consume_extent(i, tokens, detect_change=True)
buf += ret
else:
(i, ret) = consume_line(i, tokens)
buf += ret
if buf:
blocks.append(Block(buf))
# _parsed=True indicates a successful parsing, although may result an
# empty BlockList.
self._parsed = True
return BlockList(blocks)
def parse(self, tokzer):
return self.getBlocks(tokzer)
def parseFile(self, path):
return self.getBlocks(CppFileTokenizer(path))
class BlockParserTests(unittest.TestCase):
"""BlockParser unit tests."""
def get_blocks(self, lines):
blocks = BlockParser().parse(CppStringTokenizer('\n'.join(lines)))
return map(lambda a: str(a), blocks)
def test_hash(self):
self.assertEqual(self.get_blocks(["#error hello"]), ["#error hello"])
def test_empty_line(self):
self.assertEqual(self.get_blocks(["foo", "", "bar"]), ["foo bar"])
def test_hash_with_space(self):
# We currently cannot handle the following case with libclang properly.
# Fortunately it doesn't appear in current headers.
#self.assertEqual(self.get_blocks(["foo", " # ", "bar"]), ["foo", "bar"])
pass
def test_with_comment(self):
self.assertEqual(self.get_blocks(["foo",
" # /* ahah */ if defined(__KERNEL__) /* more */",
"bar", "#endif"]),
["foo", "#ifdef __KERNEL__", "bar", "#endif"])
################################################################################
################################################################################
##### #####
##### B L O C K L I S T O P T I M I Z A T I O N #####
##### #####
################################################################################
################################################################################
def find_matching_endif(blocks, i):
"""Traverse the blocks to find out the matching #endif."""
n = len(blocks)
depth = 1
while i < n:
if blocks[i].isDirective():
dir_ = blocks[i].directive
if dir_ in ["if", "ifndef", "ifdef"]:
depth += 1
elif depth == 1 and dir_ in ["else", "elif"]:
return i
elif dir_ == "endif":
depth -= 1
if depth == 0:
return i
i += 1
return i
def optimize_if01(blocks):
"""Remove the code between #if 0 .. #endif in a list of CppBlocks."""
i = 0
n = len(blocks)
result = []
while i < n:
j = i
while j < n and not blocks[j].isIf():
j += 1
if j > i:
logging.debug("appending lines %d to %d", blocks[i].lineno,
blocks[j-1].lineno)
result += blocks[i:j]
if j >= n:
break
expr = blocks[j].expr
r = expr.toInt()
if r is None:
result.append(blocks[j])
i = j + 1
continue
if r == 0:
# if 0 => skip everything until the corresponding #endif
start_dir = blocks[j].directive
j = find_matching_endif(blocks, j + 1)
if j >= n:
# unterminated #if 0, finish here
break
dir_ = blocks[j].directive
if dir_ == "endif":
logging.debug("remove 'if 0' .. 'endif' (lines %d to %d)",
blocks[i].lineno, blocks[j].lineno)
if start_dir == "elif":
# Put an endif since we started with an elif.
result += blocks[j:j+1]
i = j + 1
elif dir_ == "else":
# convert 'else' into 'if 1'
logging.debug("convert 'if 0' .. 'else' into 'if 1' (lines %d "
"to %d)", blocks[i].lineno, blocks[j-1].lineno)
if start_dir == "elif":
blocks[j].directive = "elif"
else:
blocks[j].directive = "if"
blocks[j].expr = CppExpr(CppStringTokenizer("1").tokens)
i = j
elif dir_ == "elif":
# convert 'elif' into 'if'
logging.debug("convert 'if 0' .. 'elif' into 'if'")
if start_dir == "elif":
blocks[j].directive = "elif"
else:
blocks[j].directive = "if"
i = j
continue
# if 1 => find corresponding endif and remove/transform them
k = find_matching_endif(blocks, j + 1)
if k >= n:
# unterminated #if 1, finish here
logging.debug("unterminated 'if 1'")
result += blocks[j+1:k]
break
start_dir = blocks[j].directive
dir_ = blocks[k].directive
if dir_ == "endif":
logging.debug("convert 'if 1' .. 'endif' (lines %d to %d)",
blocks[j].lineno, blocks[k].lineno)
if start_dir == "elif":
# Add the elif in to the results and convert it to an elif 1.
blocks[j].tokens = CppStringTokenizer("1").tokens
result += blocks[j:j+1]
result += optimize_if01(blocks[j+1:k])
if start_dir == "elif":
# Add the endif in to the results.
result += blocks[k:k+1]
i = k + 1
elif dir_ == "else":
# convert 'else' into 'if 0'
logging.debug("convert 'if 1' .. 'else' (lines %d to %d)",
blocks[j].lineno, blocks[k].lineno)
if start_dir == "elif":
# Add the elif in to the results and convert it to an elif 1.
blocks[j].tokens = CppStringTokenizer("1").tokens
result += blocks[j:j+1]
result += optimize_if01(blocks[j+1:k])
if start_dir == "elif":
blocks[k].directive = "elif"
else:
blocks[k].directive = "if"
blocks[k].expr = CppExpr(CppStringTokenizer("0").tokens)
i = k
elif dir_ == "elif":
# convert 'elif' into 'if 0'
logging.debug("convert 'if 1' .. 'elif' (lines %d to %d)",
blocks[j].lineno, blocks[k].lineno)
result += optimize_if01(blocks[j+1:k])
blocks[k].expr = CppExpr(CppStringTokenizer("0").tokens)
i = k
return result
class OptimizerTests(unittest.TestCase):
def parse(self, text, macros=None):
out = utils.StringOutput()
blocks = BlockParser().parse(CppStringTokenizer(text))
blocks.optimizeAll(macros)
blocks.write(out)
return out.get()
def test_if1(self):
text = """\
#if 1
#define GOOD
#endif
"""
expected = """\
#define GOOD
"""
self.assertEqual(self.parse(text), expected)
def test_if0(self):
text = """\
#if 0
#define SHOULD_SKIP1
#define SHOULD_SKIP2
#endif
"""
expected = ""
self.assertEqual(self.parse(text), expected)
def test_if1_else(self):
text = """\
#if 1
#define GOOD
#else
#define BAD
#endif
"""
expected = """\
#define GOOD
"""
self.assertEqual(self.parse(text), expected)
def test_if0_else(self):
text = """\
#if 0
#define BAD
#else
#define GOOD
#endif
"""
expected = """\
#define GOOD
"""
self.assertEqual(self.parse(text), expected)
def test_if_elif1(self):
text = """\
#if defined(something)
#define EXISTS
#elif 1
#define GOOD
#endif
"""
expected = """\
#ifdef something
#define EXISTS
#elif 1
#define GOOD
#endif
"""
self.assertEqual(self.parse(text), expected)
def test_if_elif1_macro(self):
text = """\
#if defined(something)
#define EXISTS
#elif defined(WILL_BE_ONE)
#define GOOD
#endif
"""
expected = """\
#ifdef something
#define EXISTS
#elif 1
#define GOOD
#endif
"""
self.assertEqual(self.parse(text, {"WILL_BE_ONE": "1"}), expected)
def test_if_elif1_else(self):
text = """\
#if defined(something)
#define EXISTS
#elif 1
#define GOOD
#else
#define BAD
#endif
"""
expected = """\
#ifdef something
#define EXISTS
#elif 1
#define GOOD
#endif
"""
self.assertEqual(self.parse(text), expected)
def test_if_elif1_else_macro(self):
text = """\
#if defined(something)
#define EXISTS
#elif defined(WILL_BE_ONE)
#define GOOD
#else
#define BAD
#endif
"""
expected = """\
#ifdef something
#define EXISTS
#elif 1
#define GOOD
#endif
"""
self.assertEqual(self.parse(text, {"WILL_BE_ONE": "1"}), expected)
def test_if_elif1_else_macro(self):
text = """\
#if defined(something)
#define EXISTS
#elif defined(WILL_BE_ONE)
#define GOOD
#else
#define BAD
#endif
"""
expected = """\
#ifdef something
#define EXISTS
#elif 1
#define GOOD
#endif
"""
self.assertEqual(self.parse(text, {"WILL_BE_ONE": "1"}), expected)
def test_macro_set_to_undefined_single(self):
text = """\
#if defined(__KERNEL__)
#define BAD_KERNEL
#endif
"""
expected = ""
macros = {"__KERNEL__": kCppUndefinedMacro}
self.assertEqual(self.parse(text, macros), expected)
def test_macro_set_to_undefined_if(self):
text = """\
#if defined(__KERNEL__) || !defined(__GLIBC__) || (__GLIBC__ < 2)
#define CHECK
#endif
"""
expected = """\
#if !defined(__GLIBC__) || __GLIBC__ < 2
#define CHECK
#endif
"""
macros = {"__KERNEL__": kCppUndefinedMacro}
self.assertEqual(self.parse(text, macros), expected)
def test_endif_comment_removed(self):
text = """\
#ifndef SIGRTMAX
#define SIGRTMAX 123
#endif /* SIGRTMAX */
"""
expected = """\
#ifndef SIGRTMAX
#define SIGRTMAX 123
#endif
"""
self.assertEqual(self.parse(text), expected)
def test_multilevel_if0(self):
text = """\
#if 0
#if 1
#define BAD_6
#endif
#endif
"""
expected = ""
self.assertEqual(self.parse(text), expected)
class RemoveStructsTests(unittest.TestCase):
def parse(self, text, structs):
out = utils.StringOutput()
blocks = BlockParser().parse(CppStringTokenizer(text))
blocks.removeStructs(structs)
blocks.write(out)
return out.get()
def test_remove_struct_from_start(self):
text = """\
struct remove {
int val1;
int val2;
};
struct something {
struct timeval val1;
struct timeval val2;
};
"""
expected = """\
struct something {
struct timeval val1;
struct timeval val2;
};
"""
self.assertEqual(self.parse(text, set(["remove"])), expected)
def test_remove_struct_from_end(self):
text = """\
struct something {
struct timeval val1;
struct timeval val2;
};
struct remove {
int val1;
int val2;
};
"""
expected = """\
struct something {
struct timeval val1;
struct timeval val2;
};
"""
self.assertEqual(self.parse(text, set(["remove"])), expected)
def test_remove_minimal_struct(self):
text = """\
struct remove {
};
"""
expected = "";
self.assertEqual(self.parse(text, set(["remove"])), expected)
def test_remove_struct_with_struct_fields(self):
text = """\
struct something {
struct remove val1;
struct remove val2;
};
struct remove {
int val1;
struct something val3;
int val2;
};
"""
expected = """\
struct something {
struct remove val1;
struct remove val2;
};
"""
self.assertEqual(self.parse(text, set(["remove"])), expected)
def test_remove_consecutive_structs(self):
text = """\
struct keep1 {
struct timeval val1;
struct timeval val2;
};
struct remove1 {
int val1;
int val2;
};
struct remove2 {
int val1;
int val2;
int val3;
};
struct keep2 {
struct timeval val1;
struct timeval val2;
};
"""
expected = """\
struct keep1 {
struct timeval val1;
struct timeval val2;
};
struct keep2 {
struct timeval val1;
struct timeval val2;
};
"""
self.assertEqual(self.parse(text, set(["remove1", "remove2"])), expected)
def test_remove_multiple_structs(self):
text = """\
struct keep1 {
int val;
};
struct remove1 {
int val1;
| |
<reponame>SmartPhoenix/Persistent-Kingdoms<gh_stars>1-10
from module_constants import *
from header_items import *
from header_operations import *
from header_triggers import *
import header_debug as dbg
import header_lazy_evaluation as lazy
####################################################################################################################
# Each item record contains the following fields:
# 1) Item id: used for referencing items in other files.
# The prefix itm_ is automatically added before each item id.
# 2) Item name. Name of item as it'll appear in inventory window
# 3) List of meshes. Each mesh record is a tuple containing the following fields:
# 3.1) Mesh name.
# 3.2) Modifier bits that this mesh matches.
# Note that the first mesh record is the default.
# 4) Item flags. See header_items.py for a list of available flags.
# 5) Item capabilities. Used for which animations this item is used with. See header_items.py for a list of available flags.
# 6) Item value.
# 7) Item stats: Bitwise-or of various stats about the item such as:
# weight, abundance, difficulty, head_armor, body_armor,leg_armor, etc...
# 8) Modifier bits: Modifiers that can be applied to this item.
# 9) [Optional] Triggers: List of simple triggers to be associated with the item.
# 10) [Optional] Factions: List of factions that item can be found as merchandise.
####################################################################################################################
# Some constants for ease of use.
imodbits_none = 0
imodbits_horse_basic = imodbit_swaybacked|imodbit_lame|imodbit_spirited|imodbit_heavy|imodbit_stubborn
imodbits_cloth = imodbit_tattered|imodbit_ragged|imodbit_sturdy|imodbit_thick|imodbit_hardened
imodbits_armor = imodbit_rusty|imodbit_battered|imodbit_crude|imodbit_thick|imodbit_reinforced|imodbit_lordly
imodbits_plate = imodbit_cracked|imodbit_rusty|imodbit_battered|imodbit_crude|imodbit_thick|imodbit_reinforced|imodbit_lordly
imodbits_polearm = imodbit_cracked|imodbit_bent|imodbit_balanced
imodbits_shield = imodbit_cracked|imodbit_battered|imodbit_thick|imodbit_reinforced
imodbits_sword = imodbit_rusty|imodbit_chipped|imodbit_balanced|imodbit_tempered
imodbits_sword_high = imodbit_rusty|imodbit_chipped|imodbit_balanced|imodbit_tempered|imodbit_masterwork
imodbits_axe = imodbit_rusty|imodbit_chipped|imodbit_heavy
imodbits_mace = imodbit_rusty|imodbit_chipped|imodbit_heavy
imodbits_pick = imodbit_rusty|imodbit_chipped|imodbit_balanced|imodbit_heavy
imodbits_bow = imodbit_cracked|imodbit_bent|imodbit_strong|imodbit_masterwork
imodbits_crossbow = imodbit_cracked|imodbit_bent|imodbit_masterwork
imodbits_missile = imodbit_bent|imodbit_large_bag
imodbits_thrown = imodbit_bent|imodbit_heavy|imodbit_balanced|imodbit_large_bag
imodbits_thrown_minus_heavy = imodbit_bent|imodbit_balanced|imodbit_large_bag
imodbits_horse_good = imodbit_spirited|imodbit_heavy
imodbits_good = imodbit_sturdy|imodbit_thick|imodbit_hardened|imodbit_reinforced
imodbits_bad = imodbit_rusty|imodbit_chipped|imodbit_tattered|imodbit_ragged|imodbit_cracked|imodbit_bent
imodbit_female = imodbit_meek
tag_item_class = -100.0
tag_item_herd_animal = -101.0
# Set a class for this item - listed in module_constants prefixed with item_class_ - with an optional associated value.
def itm_class(class_id, value=0):
return (tag_item_class, class_id, value)
# Mark a horse item as a herd animal. Only use for the adult item, not the child.
def itm_herd_animal(child_item=-1, grow_age=10, max_in_herd=20, attack_reaction=animal_reaction_flee, death_sound="snd_cow_slaughter", meat=0, hide=0, wildness=1):
return [[tag_item_herd_animal, child_item, grow_age, max_in_herd, attack_reaction, death_sound, meat, hide, wildness]]
# Display the agent's heraldic banner or color on special item meshes, specifying the appropriate entry from module_tableau_materials.
def init_heraldic_item(tableau):
return [(ti_on_init_item,
[(store_trigger_param_1, ":agent_id"),
(store_trigger_param_2, ":troop_id"),
(call_script, "script_item_set_banner", tableau, ":agent_id", ":troop_id"),
]),
itm_class(item_class_heraldic)]
# Template for faction banner items.
# When adding a new banner texture, remember to add an entry to module_meshes in the correct place, and set an appropriate background color in module_scripts.
def itm_faction_banner(banner_id):
return ["pw_banner_pole_" + banner_id, "Banner", [("pw_banner_pole",0)], itp_type_polearm|itp_two_handed|itp_primary|itp_wooden_parry, itc_parry_polearm|itcf_carry_spear,
1200, weight(7.0)|difficulty(14)|spd_rtng(70)|weapon_length(250)|swing_damage(10, blunt)|thrust_damage(5, blunt), imodbits_none,
[(ti_on_init_item, [(cur_item_set_tableau_material, "tableau_faction_banner_pole", "mesh_banner_" + banner_id)])]]
# Template for castle capture point banner items (display only, not allowing pickup).
def itm_castle_banner(faction, suffix):
return ["pw_banner_castle_" + faction + suffix, "Castle Banner", [("pw_banner_castle",0)], itp_no_pick_up_from_ground, 0,
0, 0, imodbits_none, [(ti_on_init_item, [(cur_item_set_tableau_material, "tableau_castle_banner_" + suffix, faction)])]]
# Template for castle wall banner items (display only, not allowing pickup).
def itm_wall_banner(faction, suffix):
return ["pw_banner_wall_" + faction + suffix, "Wall Banner", [("pw_banner_wall",0)], itp_no_pick_up_from_ground, 0,
0, 0, imodbits_none, [(ti_on_init_item, [(cur_item_set_tableau_material, "tableau_castle_banner_" + suffix, faction)])]]
def itm_throw_wheat_trigger():
return (ti_on_weapon_attack,
[(multiplayer_is_server),
(position_move_x, pos1, -10),
(position_move_y, pos1, 50),
(position_move_z, pos1, -50),
(position_rotate_x, pos1, -30),
(position_rotate_y, pos1, -30),
(particle_system_burst, "psys_throw_wheat", pos1, 50),
])
def itm_read_book_trigger(string_id):
return (ti_on_weapon_attack,
[(store_trigger_param_1, ":agent_id"),
(call_script, "script_cf_read_book", string_id, ":agent_id"),
])
# Swap between different items when swinging, to change visual appearance.
def itm_swap_item_trigger(this_item, other_item):
return (ti_on_weapon_attack,
[(store_trigger_param_1, ":agent_id"),
(assign, ":loop_end", ek_item_3 + 1),
(try_for_range, ":equip_slot", ek_item_0, ":loop_end"),
(agent_get_item_slot, ":equip_item_id", ":agent_id", ":equip_slot"),
(eq, ":equip_item_id", this_item),
(assign, ":loop_end", -1),
(val_add, ":equip_slot", 1),
(agent_unequip_item, ":agent_id", this_item, ":equip_slot"),
(agent_equip_item, ":agent_id", other_item, ":equip_slot"),
(agent_set_wielded_item, ":agent_id", other_item),
(try_end),
])
# Attempt to process a nearby animal when swinging.
def itm_butchering_knife():
return (ti_on_weapon_attack,
[(multiplayer_is_server),
(store_trigger_param_1, ":agent_id"),
(agent_get_troop_id, ":troop_id", ":agent_id"),
(store_skill_level, ":skill", "skl_herding", ":troop_id"),
(gt, ":skill", 0),
(call_script, "script_cf_use_butchering_knife", ":agent_id"),
])
items = [
["no_item", "INVALID ITEM", [("invalid_item", 0)], itp_type_one_handed_wpn|itp_primary|itp_secondary|itp_no_parry, itc_dagger,
0, weight(1)|spd_rtng(1)|weapon_length(1)|swing_damage(1, blunt)|thrust_damage(1, blunt), imodbits_none],
["no_head", "INVALID HEAD", [("invalid_item", 0)], itp_type_head_armor, 0,
0, weight(1)|head_armor(1)|difficulty(0), imodbits_none],
["no_body", "INVALID BODY", [("invalid_item", 0)], itp_type_body_armor, 0,
0, weight(1)|body_armor(1)|difficulty(0), imodbits_none],
["no_foot", "INVALID FOOT", [("invalid_item", 0)], itp_type_foot_armor, 0,
0, weight(1)|leg_armor(1)|difficulty(0), imodbits_none],
["no_hand", "INVALID HAND", [("invalid_item", 0)], itp_type_hand_armor, 0,
0, weight(1)|body_armor(1)|difficulty(0), imodbits_none],
["no_horse", "INVALID HORSE", [("invalid_item", 0)], itp_type_horse, 0,
0, hit_points(1)|body_armor(1)|difficulty(0)|horse_speed(10)|horse_maneuver(40)|horse_charge(1)|horse_scale(1), imodbits_none],
["tattered_headcloth", "Tattered Headcloth", [("headcloth_a_new", 0)], itp_type_head_armor, 0,
14, weight(0.5)|head_armor(3)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_d")],
["ragged_woolen_cap", "Ragged Woolen Cap", [("woolen_cap_new", 0)], itp_type_head_armor, 0,
16, weight(1)|head_armor(4)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_e")],
["stained_felt_hat_b", "Stained Felt Hat", [("felt_hat_b_new", 0)], itp_type_head_armor, 0,
15, weight(1)|head_armor(4)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_b")],
["straw_hat", "Straw Hat", [("straw_hat_new", 0)], itp_type_head_armor, 0,
29, weight(1)|head_armor(2)|difficulty(0), imodbits_cloth],
["head_wrappings", "Head Wrapping", [("head_wrapping", 0)], itp_type_head_armor|itp_fit_to_head, 0,
26, weight(0.25)|head_armor(3), imodbits_cloth],
["headcloth", "Headcloth", [("headcloth_a_new", 0)], itp_type_head_armor, 0,
80, weight(0.5)|head_armor(4)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_d")],
["woolen_cap", "Woolen Cap", [("woolen_cap_new", 0)], itp_type_head_armor, 0,
62, weight(1)|head_armor(6)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_e")],
["sarranid_felt_hat", "Felt Hat", [("sar_helmet3",0)], itp_type_head_armor, 0,
55, weight(1)|head_armor(5)|difficulty(0), imodbits_cloth],
["sarranid_felt_head_cloth", "Head Cloth", [("common_tulbent",0)], itp_type_head_armor|itp_attach_armature, 0,
221, weight(0.5)|head_armor(4)|difficulty(0), imodbits_cloth|imodbit_female],
["sarranid_felt_head_cloth_b", "Head Cloth", [("common_tulbent_b",0)], itp_type_head_armor|itp_attach_armature, 0,
225, weight(0.5)|head_armor(4)|difficulty(0), imodbits_cloth|imodbit_female],
["bride_crown", "Crown of Flowers", [("bride_crown",0)], itp_type_head_armor|itp_doesnt_cover_hair|itp_attach_armature, 0,
302, weight(0.5)|head_armor(0)|difficulty(0), imodbits_cloth|imodbit_female],
["khergit_lady_hat", "Blue Head Scarf", [("khergit_lady_hat",0)], itp_type_head_armor|itp_doesnt_cover_hair|itp_fit_to_head, 0,
239, weight(0.5)|head_armor(1)|difficulty(0), imodbits_cloth|imodbit_female],
["khergit_lady_hat_b", "Leather Head Scarf", [("khergit_lady_hat_b",0)], itp_type_head_armor|itp_doesnt_cover_hair|itp_fit_to_head, 0,
267, weight(0.5)|head_armor(4)|difficulty(0), imodbits_cloth|imodbit_female],
["sarranid_head_cloth", "Lady Head Cloth", [("tulbent",0)], itp_type_head_armor|itp_doesnt_cover_hair|itp_attach_armature, 0,
321, weight(0.5)|head_armor(4)|difficulty(0), imodbits_cloth|imodbit_female],
["sarranid_head_cloth_b", "Lady Head Cloth", [("tulbent_b",0)], itp_type_head_armor|itp_doesnt_cover_hair|itp_attach_armature, 0,
340, weight(0.5)|head_armor(4)|difficulty(0), imodbits_cloth|imodbit_female],
["wimple_a", "Wimple", [("wimple_a_new",0)], itp_type_head_armor|itp_fit_to_head, 0,
230, weight(0.5)|head_armor(4)|difficulty(0), imodbits_cloth|imodbit_female],
["wimple_b", "Wimple", [("wimple_b_new",0)], itp_type_head_armor|itp_fit_to_head, 0,
230, weight(0.5)|head_armor(4)|difficulty(0), imodbits_cloth|imodbit_female],
["barbette", "Barbette", [("barbette_new",0)], itp_type_head_armor|itp_fit_to_head, 0,
403, weight(1.0)|head_armor(8)|difficulty(0), imodbits_cloth|imodbit_female],
["arming_cap", "Arming Cap", [("arming_cap_a_new", 0)], itp_type_head_armor, 0,
78, weight(1)|head_armor(7)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_d")],
["ladys_hood", "Woolen Hood", [("ladys_hood_new", 0)], itp_type_head_armor, 0,
120, weight(1)|head_armor(8)|difficulty(0), imodbits_cloth],
["fur_hat", "Fur Hat", [("fur_hat_a_new", 0)], itp_type_head_armor, 0,
110, weight(0.5)|head_armor(8)|difficulty(0), imodbits_cloth],
["felt_hat", "Felt Hat", [("felt_hat_a_new", 0)], itp_type_head_armor, 0,
74, weight(1)|head_armor(8)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_b")],
["felt_hat_b", "Felt Hat", [("felt_hat_b_new", 0)], itp_type_head_armor, 0,
85, weight(1)|head_armor(8)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_b")],
["leather_cap", "Leather Cap", [("leather_cap_a_new", 0)], itp_type_head_armor, 0,
106, weight(1)|head_armor(10)|difficulty(0), imodbits_cloth],
["common_hood", "Hood", [("hood_new", 0)], itp_type_head_armor, 0,
129, weight(1)|head_armor(10)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_e")],
["hood_b", "Hood", [("hood_b", 0)], itp_type_head_armor, 0,
116, weight(1)|head_armor(10)|difficulty(0), imodbits_cloth],
["hood_c", "Hood", [("hood_c", 0)], itp_type_head_armor, 0,
124, weight(1)|head_armor(10)|difficulty(0), imodbits_cloth],
["hood_d", "Hood", [("hood_d", 0)], itp_type_head_armor, 0,
131, weight(1)|head_armor(10)|difficulty(0), imodbits_cloth],
["nomad_cap", "Nomad Cap", [("nomad_cap_a_new", 0)], itp_type_head_armor, 0,
272, weight(2.25)|head_armor(15)|difficulty(8), imodbits_cloth],
["nomad_cap_b", "Nomad Cap", [("nomad_cap_b_new",0)], itp_type_head_armor, 0,
243, weight(0.75)|head_armor(13)|difficulty(0), imodbits_cloth],
["black_hood", "Black Hood", [("hood_black",0)], itp_type_head_armor, 0,
143, weight(2)|head_armor(11)|difficulty(0), imodbits_cloth],
["surgeon_coif", "Surgeon's Coif", [("pw_surgeon_coif",0)], itp_type_head_armor, 0,
182, weight(1.5)|head_armor(10)|difficulty(0), imodbits_cloth],
["pilgrim_hood", "Pilgrim Hood", [("pilgrim_hood",0)], itp_type_head_armor, 0,
123, weight(1.25)|head_armor(11)|difficulty(0), imodbits_cloth],
["priest_coif", "Priestly Coif", [("pw_priest_coif",0)], itp_type_head_armor, 0,
265, weight(1)|head_armor(10)|difficulty(0), imodbits_cloth],
["padded_coif", "Padded Coif", [("padded_coif_a_new", 0)], itp_type_head_armor, 0,
116, weight(1)|head_armor(11)|difficulty(0), imodbits_cloth, init_heraldic_item("tableau_colored_helmets_new_b")],
["turban", "Turban", [("tuareg_open",0)], itp_type_head_armor, 0,
143, weight(2)|head_armor(11)|difficulty(0), imodbits_cloth],
["leather_steppe_cap_a", "Steppe Cap", [("leather_steppe_cap_a_new",0)], itp_type_head_armor, 0,
203, weight(1)|head_armor(12)|difficulty(7), imodbits_cloth],
["leather_steppe_cap_b", "Steppe Cap", [("tattered_steppe_cap_b_new",0)], itp_type_head_armor, 0,
234, weight(1)|head_armor(14)|difficulty(7), imodbits_cloth],
["nordic_archer_helmet", "Leather Helmet", [("Helmet_A_vs2",0)], itp_type_head_armor, 0,
240, weight(1.25)|head_armor(14)|difficulty(7), imodbits_plate],
["vaegir_fur_cap", "Cap with Fur", [("vaeg_helmet3",0)], itp_type_head_armor, 0,
250, weight(2)|head_armor(15)|difficulty(7), imodbits_plate],
["steppe_cap", "Steppe Cap", [("steppe_cap_a_new", 0)], itp_type_head_armor, 0,
276, weight(1)|head_armor(16)|difficulty(7), imodbits_cloth],
["leather_warrior_cap", "Leather Warrior Cap", [("skull_cap_new_b",0)], itp_type_head_armor, 0,
340, weight(1)|head_armor(18)|difficulty(7), imodbits_cloth],
["sarranid_warrior_cap", "Turban with Warrior Cap", [("tuareg_helmet",0)], itp_type_head_armor|itp_covers_beard, 0,
344, weight(2)|head_armor(19)|difficulty(7), imodbits_plate],
["nordic_veteran_archer_helmet", "Leather Helmet", [("Helmet_A",0)], itp_type_head_armor, 0,
356, weight(1.5)|head_armor(20)|difficulty(7), imodbits_plate],
["skullcap", "Skullcap", [("skull_cap_new_a",0)], itp_type_head_armor, 0,
376, weight(1.0)|head_armor(20)|difficulty(7), imodbits_plate],
["vaegir_fur_helmet", "Fur Helmet", [("vaeg_helmet2",0)], itp_type_head_armor, 0,
395, weight(1.5)|head_armor(21)|difficulty(7), imodbits_plate],
["bishop_mitre", "Bishop's Mitre", [("pw_bishop_mitre",0)], itp_type_head_armor, 0,
546, weight(1.5)|head_armor(13)|difficulty(8), imodbits_cloth],
["mail_coif", "Mail Coif", [("mail_coif_new",0)], itp_type_head_armor, 0,
403, weight(1.25)|head_armor(22)|difficulty(9), imodbits_armor],
["footman_helmet", "Footman's Helmet", [("skull_cap_new",0)], itp_type_head_armor, 0,
495, weight(1.5)|head_armor(24)|difficulty(9), imodbits_plate],
["sarranid_horseman_helmet", "Horseman Helmet", [("sar_helmet2",0)], itp_type_head_armor, 0,
580, weight(2)|head_armor(25)|difficulty(9), imodbits_plate],
["nasal_helmet", "Nasal Helmet", [("nasal_helmet_b",0)], itp_type_head_armor, 0,
605, weight(1.25)|head_armor(26)|difficulty(9), imodbits_plate],
["norman_helmet", "Helmet with Cap", [("norman_helmet_a",0)], itp_type_head_armor|itp_fit_to_head, 0,
654, weight(1.25)|head_armor(28)|difficulty(9), imodbits_plate],
["nordic_footman_helmet", "Footman Helmet", [("Helmet_B_vs2",0)], itp_type_head_armor|itp_fit_to_head, 0,
670, weight(1.75)|head_armor(30)|difficulty(9), imodbits_plate],
["khergit_war_helmet", "War Helmet", [("tattered_steppe_cap_a_new",0)], itp_type_head_armor, 0,
710, weight(2)|head_armor(31)|difficulty(9), imodbits_plate],
["segmented_helmet", "Segmented Helmet", [("segmented_helm_new",0)], itp_type_head_armor, 0,
724, weight(1.25)|head_armor(31)|difficulty(9), imodbits_plate],
["vaegir_spiked_helmet", "Spiked Cap", [("vaeg_helmet1",0)], itp_type_head_armor, 0,
823, weight(2)|head_armor(32)|difficulty(9), imodbits_plate],
["helmet_with_neckguard", "Helmet with Neckguard", [("neckguard_helm_new",0)], itp_type_head_armor, 0,
840, weight(1.5)|head_armor(33)|difficulty(10), imodbits_plate],
["flat_topped_helmet", "Flat Topped Helmet", [("flattop_helmet_new",0)], itp_type_head_armor, 0,
869, weight(1.75)|head_armor(33)|difficulty(10), imodbits_plate],
["nordic_fighter_helmet", "Fighter Helmet", [("Helmet_B",0)], itp_type_head_armor|itp_fit_to_head, 0,
912, weight(2)|head_armor(34)|difficulty(10), imodbits_plate],
["kettle_hat", "Kettle Hat", [("kettle_hat_new",0)], itp_type_head_armor, 0,
940, weight(1.75)|head_armor(35)|difficulty(10), imodbits_plate],
["sarranid_helmet1", "Keffiyeh Helmet", [("sar_helmet1",0)], itp_type_head_armor, 0,
923, weight(2)|head_armor(35)|difficulty(11), imodbits_plate],
["vaegir_lamellar_helmet", "Helmet with Lamellar Guard", [("vaeg_helmet4",0)], itp_type_head_armor, 0,
960, weight(2)|head_armor(38)|difficulty(11), imodbits_plate],
["spiked_helmet", "Spiked Helmet", [("spiked_helmet_new",0)], itp_type_head_armor, 0,
1078, weight(2)|head_armor(38)|difficulty(11), imodbits_plate],
["sarranid_mail_coif", "Mail Coif", [("tuareg_helmet2",0)], itp_type_head_armor, 0,
1230, weight(2)|head_armor(39)|difficulty(13), imodbits_plate],
["nordic_huscarl_helmet", "Huscarl's Helmet", [("Helmet_C_vs2",0)], itp_type_head_armor, 0,
1420, weight(2)|head_armor(40)|difficulty(14), imodbits_plate],
["bascinet", "Bascinet", [("bascinet_avt_new",0)], itp_type_head_armor, 0,
1579, weight(2)|head_armor(42)|difficulty(14), imodbits_plate],
["bascinet_2", "Bascinet with Aventail", [("bascinet_new_a",0)], itp_type_head_armor, 0,
1654, weight(2.25)|head_armor(44)|difficulty(14), imodbits_plate],
["bascinet_3", "Bascinet with Nose Guard", [("bascinet_new_b",0)], itp_type_head_armor, 0,
1683, weight(2.5)|head_armor(45)|difficulty(14), imodbits_plate],
["vaegir_noble_helmet", "Nobleman Helmet", [("vaeg_helmet7",0)], itp_type_head_armor, 0,
1710, weight(2)|head_armor(45)|difficulty(14), imodbits_plate],
["guard_helmet", "Guard Helmet", [("reinf_helmet_new",0)], itp_type_head_armor, 0,
2355, weight(2.5)|head_armor(47)|difficulty(15), imodbits_plate],
["sarranid_veiled_helmet", "Veiled Helmet", [("sar_helmet4",0)], itp_type_head_armor|itp_covers_beard, 0,
2341, weight(3)|head_armor(47)|difficulty(15), imodbits_plate],
["vaegir_war_helmet", "War Helmet", [("vaeg_helmet6",0)], itp_type_head_armor, 0,
2420, weight(2.25)|head_armor(47)|difficulty(15), imodbits_plate],
["nordic_warlord_helmet", "Warlord Helmet", [("Helmet_C",0)], itp_type_head_armor, 0,
3180, weight(2.25)|head_armor(48)|difficulty(15), imodbits_plate],
["bishop_helm", "Bishop's Helm", [("pw_bishop_helm",0)], itp_type_head_armor|itp_covers_head, 0,
6034, weight(3.0)|head_armor(49)|difficulty(15), imodbits_plate],
["full_helm", "Full Helm", [("great_helmet_new_b",0)], itp_type_head_armor|itp_covers_head, 0,
5121, weight(2.5)|head_armor(51)|difficulty(15), imodbits_plate],
["vaegir_mask", "War Mask", [("vaeg_helmet8",0)], itp_type_head_armor, 0,
6950, weight(2.5)|head_armor(50)|difficulty(15), imodbits_plate],
["vaegir_mask_b", "War Mask", [("vaeg_helmet9",0)], itp_type_head_armor|itp_covers_beard, 0,
7012, weight(2.75)|head_armor(52)|difficulty(15), imodbits_plate],
["great_helmet", "Great Helmet", [("great_helmet_new",0)], itp_type_head_armor|itp_covers_head, 0,
7380, weight(2.75)|head_armor(53)|difficulty(15), imodbits_plate],
["winged_great_helmet", "Winged Great Helmet", [("maciejowski_helmet_new",0)], itp_type_head_armor|itp_covers_head, 0,
9240, | |
<gh_stars>100-1000
#!/usr/bin/env python3
# Collection of utility functions
import os
import sys
import logging
import fcntl
import subprocess
import shutil
import requests
import time
import apprise
import random
import re
import psutil
from arm.config.config import cfg
from arm.ripper import apprise_bulk
from arm.ui import app, db
import arm.models.models as m
NOTIFY_TITLE = "ARM notification"
def notify(job, title, body):
"""Send notifications
title = title for notification
body = body of the notification
"""
# Prepend Site Name if configured, append Job ID if configured
if cfg["ARM_NAME"] != "":
title = f"[{cfg['ARM_NAME']}] - {title}"
if cfg["NOTIFY_JOBID"]:
title = f"{title} - {job.job_id}"
# Create an Apprise instance
apobj = apprise.Apprise()
if cfg["PB_KEY"] != "":
apobj.add('pbul://' + str(cfg["PB_KEY"]))
if cfg["IFTTT_KEY"] != "":
apobj.add('ifttt://' + str(cfg["IFTTT_KEY"]) + "@" + str(cfg["IFTTT_EVENT"]))
if cfg["PO_USER_KEY"] != "":
apobj.add('pover://' + str(cfg["PO_USER_KEY"]) + "@" + str(cfg["PO_APP_KEY"]))
if cfg["JSON_URL"] != "":
apobj.add(str(cfg["JSON_URL"]).replace("http://", "json://").replace("https://", "jsons://"))
try:
apobj.notify(body, title=title)
except Exception as e: # noqa: E722
logging.error(f"Failed sending notifications. error:{e}. Continuing processing...")
if cfg["APPRISE"] != "":
try:
apprise_bulk.apprise_notify(cfg["APPRISE"], title, body)
logging.debug("apprise-config: " + str(cfg["APPRISE"]))
except Exception as e: # noqa: E722
logging.error("Failed sending apprise notifications. " + str(e))
def notify_entry(job):
# Notify On Entry
if job.disctype in ["dvd", "bluray"]:
# Send the notifications
notify(job, NOTIFY_TITLE,
f"Found disc: {job.title}. Disc type is {job.disctype}. Main Feature is {cfg['MAINFEATURE']}"
f". Edit entry here: http://{check_ip()}:"
f"{cfg['WEBSERVER_PORT']}/jobdetail?job_id={job.job_id}")
elif job.disctype == "music":
notify(job, NOTIFY_TITLE, f"Found music CD: {job.label}. Ripping all tracks")
elif job.disctype == "data":
notify(job, NOTIFY_TITLE, "Found data disc. Copying data.")
else:
notify(job, NOTIFY_TITLE, "Could not identify disc. Exiting.")
sys.exit()
def scan_emby(job):
"""Trigger a media scan on Emby"""
if cfg["EMBY_REFRESH"]:
logging.info("Sending Emby library scan request")
url = f"http://{cfg['EMBY_SERVER']}:{cfg['EMBY_PORT']}/Library/Refresh?api_key={cfg['EMBY_API_KEY']}"
try:
req = requests.post(url)
if req.status_code > 299:
req.raise_for_status()
logging.info("Emby Library Scan request successful")
except requests.exceptions.HTTPError:
logging.error(f"Emby Library Scan request failed with status code: {req.status_code}")
else:
logging.info("EMBY_REFRESH config parameter is false. Skipping emby scan.")
def sleep_check_process(process_str, transcode_limit):
""" New function to check for max_transcode from cfg file and force obey limits\n
arguments:
process_str - The process string from arm.yaml
transcode_limit - The user defined limit for maximum transcodes\n\n
returns:
True - when we have space in the transcode queue
"""
if transcode_limit > 0:
loop_count = transcode_limit + 1
logging.debug("loop_count " + str(loop_count))
logging.info("Starting A sleep check of " + str(process_str))
while loop_count >= transcode_limit:
loop_count = sum(1 for proc in psutil.process_iter() if proc.name() == process_str)
logging.debug(f"Number of Processes running is: {loop_count} going to waiting 12 seconds.")
if transcode_limit > loop_count:
return True
# Try to make each check at different times
x = random.randrange(20, 120, 10)
logging.debug(f"sleeping for {x} seconds")
time.sleep(x)
else:
logging.info("Transcode limit is disabled")
def convert_job_type(video_type):
if video_type == "movie":
type_sub_folder = "movies"
elif video_type == "series":
type_sub_folder = "tv"
else:
type_sub_folder = "unidentified"
return type_sub_folder
def fix_job_title(job):
if job.year and job.year != "0000" and job.year != "":
job_title = f"{job.title} ({job.year})"
else:
job_title = f"{job.title}"
return job_title
def move_files(basepath, filename, job, ismainfeature=False):
"""
Move files into final media directory\n\n
:param basepath: path to source directory\n
:param filename: name of file to be moved\n
:param job: instance of Job class\n
:param ismainfeature: True/False
:return: None
"""
type_sub_folder = convert_job_type(job.video_type)
videotitle = fix_job_title(job)
logging.debug(f"Arguments: {basepath} : {filename} : {job.hasnicetitle} : {videotitle} : {ismainfeature}")
m_path = os.path.join(cfg["COMPLETED_PATH"], str(type_sub_folder), videotitle)
# For series there are no extras as we never get a main feature
e_path = os.path.join(m_path, cfg["EXTRAS_SUB"]) if job.video_type != "series" else m_path
make_dir(m_path)
if ismainfeature is True:
logging.info(f"Track is the Main Title. Moving '{filename}' to {m_path}")
m_file = os.path.join(m_path, videotitle + "." + cfg["DEST_EXT"])
if not os.path.isfile(m_file):
try:
shutil.move(os.path.join(basepath, filename), m_file)
except Exception as e:
logging.error(f"Unable to move '{filename}' to '{m_path}' - Error: {e}")
else:
logging.info(f"File: {m_file} already exists. Not moving.")
else:
make_dir(e_path)
logging.info(f"Moving '{filename}' to {e_path}")
e_file = os.path.join(e_path, videotitle + "." + cfg["DEST_EXT"])
if not os.path.isfile(e_file):
try:
shutil.move(os.path.join(basepath, filename), os.path.join(e_path, filename))
except Exception as e:
logging.error(f"Unable to move '{filename}' to {e_path} - {e}")
else:
logging.info(f"File: {e_file} already exists. Not moving.")
def make_dir(path):
"""
Make a directory\n
path = Path to directory\n
returns success True if successful
false if the directory already exists
"""
if not os.path.exists(path):
logging.debug(f"Creating directory: {path}")
try:
os.makedirs(path)
return True
except OSError:
err = f"Couldn't create a directory at path: {path} Probably a permissions error. Exiting"
logging.error(err)
sys.exit(err)
else:
return False
def get_cdrom_status(devpath):
"""get the status of the cdrom drive\n
devpath = path to cdrom\n
returns int
CDS_NO_INFO 0\n
CDS_NO_DISC 1\n
CDS_TRAY_OPEN 2\n
CDS_DRIVE_NOT_READY 3\n
CDS_DISC_OK 4\n
see linux/cdrom.h for specifics\n
"""
try:
fd = os.open(devpath, os.O_RDONLY | os.O_NONBLOCK)
except OSError:
# Sometimes ARM will log errors opening hard drives. this check should stop it
if not re.search(r'hd[a-j]|sd[a-j]|loop[0-9]', devpath):
logging.info(f"Failed to open device {devpath} to check status.")
exit(2)
result = fcntl.ioctl(fd, 0x5326, 0)
return result
def find_file(filename, search_path):
"""
Check to see if file exists by searching a directory recursively\n
filename = filename to look for\n
search_path = path to search recursively\n
returns True or False
"""
for dirpath, dirnames, filenames in os.walk(search_path):
if filename in filenames:
return True
return False
def rip_music(job, logfile):
"""
Rip music CD using abcde using abcde config\n
job = job object\n
logfile = location of logfile\n
returns True/False for success/fail
"""
abcfile = cfg["ABCDE_CONFIG_FILE"]
if job.disctype == "music":
logging.info("Disc identified as music")
# If user has set a cfg file with ARM use it
if os.path.isfile(abcfile):
cmd = f'abcde -d "{job.devpath}" -c {abcfile} >> "{logfile}" 2>&1'
else:
cmd = f'abcde -d "{job.devpath}" >> "{logfile}" 2>&1'
logging.debug(f"Sending command: {cmd}")
try:
subprocess.check_output(cmd, shell=True).decode("utf-8")
logging.info("abcde call successful")
return True
except subprocess.CalledProcessError as ab_error:
err = f"Call to abcde failed with code: {ab_error.returncode} ({ab_error.output})"
logging.error(err)
return False
def rip_data(job, datapath, logfile):
"""
Rip data disc using dd on the command line\n
job = job object\n
datapath = path to copy data to\n
logfile = location of logfile\n
returns True/False for success/fail
"""
if job.disctype == "data":
logging.info("Disc identified as data")
if job.label == "" or job.label is None:
job.label = "datadisc"
incomplete_filename = os.path.join(datapath, job.label + ".part")
final_filename = os.path.join(datapath, job.label + ".iso")
logging.info("Ripping data disc to: " + incomplete_filename)
# Added from pull 366
cmd = 'dd if="{0}" of="{1}" {2} 2>> {3}'.format(
job.devpath,
incomplete_filename,
cfg["DATA_RIP_PARAMETERS"],
logfile
)
logging.debug("Sending command: " + cmd)
try:
subprocess.check_output(
cmd,
shell=True
).decode("utf-8")
logging.info("Data rip call successful")
os.rename(incomplete_filename, final_filename)
return True
except subprocess.CalledProcessError as dd_error:
err = "Data rip failed with code: " + str(dd_error.returncode) + "(" + str(dd_error.output) + ")"
logging.error(err)
os.unlink(incomplete_filename)
# sys.exit(err)
return False
def set_permissions(job, directory_to_traverse):
"""
:param job: job object
:param directory_to_traverse: directory to fix permissions
:return: Bool if fails
"""
if not cfg['SET_MEDIA_PERMISSIONS']:
return False
try:
corrected_chmod_value = int(str(cfg["CHMOD_VALUE"]), 8)
logging.info("Setting permissions to: " + str(cfg["CHMOD_VALUE"]) + " on: " + directory_to_traverse)
os.chmod(directory_to_traverse, corrected_chmod_value)
if job.config.SET_MEDIA_OWNER and job.config.CHOWN_USER and job.config.CHOWN_GROUP:
import pwd
import grp
uid = pwd.getpwnam(job.config.CHOWN_USER).pw_uid
gid = grp.getgrnam(job.config.CHOWN_GROUP).gr_gid
os.chown(directory_to_traverse, uid, gid)
for dirpath, l_directories, l_files in os.walk(directory_to_traverse):
for cur_dir in l_directories:
logging.debug("Setting path: " + cur_dir + " to permissions value: " + str(cfg["CHMOD_VALUE"]))
os.chmod(os.path.join(dirpath, cur_dir), corrected_chmod_value)
if job.config.SET_MEDIA_OWNER:
os.chown(os.path.join(dirpath, cur_dir), uid, gid)
for cur_file in l_files:
logging.debug("Setting file: " + cur_file + " to permissions value: " + str(cfg["CHMOD_VALUE"]))
os.chmod(os.path.join(dirpath, cur_file), corrected_chmod_value)
if job.config.SET_MEDIA_OWNER:
os.chown(os.path.join(dirpath, cur_file), uid, gid)
logging.info("Permissions set successfully: True")
except Exception as e:
logging.error(f"Permissions setting failed as: {e}")
def check_db_version(install_path, db_file):
"""
Check if db exists and is up to date.
If it doesn't exist create it. If it's out of date update it.
"""
from alembic.script import ScriptDirectory
from alembic.config import Config
import sqlite3
import flask_migrate
mig_dir = os.path.join(install_path, "arm/migrations")
config = Config()
config.set_main_option("script_location", mig_dir)
script = ScriptDirectory.from_config(config)
# create db file if it doesn't exist
if not os.path.isfile(db_file):
logging.info("No database found. Initializing arm.db...")
make_dir(os.path.dirname(db_file))
with app.app_context():
flask_migrate.upgrade(mig_dir)
if not os.path.isfile(db_file):
logging.error("Can't create database file. This could be a permissions issue. Exiting...")
sys.exit()
# check to see if db is at current revision
head_revision = script.get_current_head()
logging.debug("Head is: " + head_revision)
conn = sqlite3.connect(db_file)
c = conn.cursor()
c.execute("SELECT {cn} FROM {tn}".format(cn="version_num", tn="alembic_version"))
db_version = c.fetchone()[0]
logging.debug("Database version is: " + db_version)
if head_revision == db_version:
logging.info("Database is up to date")
else:
logging.info(
| |
import random
import platform
if platform.system() == 'Darwin':
import matplotlib
matplotlib.use('MacOSX')
import matplotlib.pyplot as plt
import networkx
import plotly.graph_objects as go
from matplotlib.collections import LineCollection
from plotly import graph_objects
from plotly.offline import iplot, plot
from statistics import mean
from cdtea.space_time import SpaceTime
from cdtea.visualization.coordinates import *
from cdtea import face as Face
"""
Valuable details
https://chart-studio.plotly.com/~empet/14742/mesh3d-with-intensity-tests/
https://plotly.com/python/reference/mesh3d/
"""
# default styles
line_settings = {'opacity': 1, 'line': dict(color='rgb(20,20,20)', width=5)}
mesh_settings = {'opacity': .9}
# display configuration
no_axis = {'showbackground': False, 'showgrid': False, 'showline': False, 'showticklabels': False, 'ticks': '', 'title': '', 'zeroline': False}
layout = {'width': 1000, 'height': 1000, 'showlegend': False, 'scene': {'xaxis': no_axis, 'yaxis': no_axis, 'zaxis': no_axis}, 'hovermode': False}
EDGE_TYPE_COLOR = {'spacelike': '#ff0000', 'timelike': '#0000ff', }
def plotly_triangular_mesh(nodes, faces, face_color=None, node_color=None, name="", plot_edges=True, line_set=line_settings, mesh_set=mesh_settings):
x, y, z = nodes
i, j, k = faces
mesh = {'type': 'mesh3d', 'x': x, 'y': y, 'z': z, 'i': i, 'j': j, 'k': k, 'name': name, 'hoverinfo': 'skip'}
mesh = {**mesh, **mesh_set}
if node_color:
mesh["vertexcolor"] = node_color
elif face_color:
mesh["facecolor"] = face_color
if plot_edges is False: # the triangle sides are not plotted
return [mesh]
else: # plot edges
# define the lists x_e, y_e, z_e, of x, y, and z coordinates of edge end points for each triangle
x_e, y_e, z_e = [], [], []
for index in range(len(i)):
# None separates data corresponding to two consecutive triangles
x_e += [x[i[index]], x[j[index]], x[k[index]], x[i[index]]] + [None]
y_e += [y[i[index]], y[j[index]], y[k[index]], y[i[index]]] + [None]
z_e += [z[i[index]], z[j[index]], z[k[index]], z[i[index]]] + [None]
lines = {'type': 'scatter3d', 'x': x_e, 'y': y_e, 'z': z_e, 'mode': 'lines', 'name': name}
lines = {**lines, **line_set}
return [mesh, lines]
# 3d plots
# color functions
# BROKEN, idealy prompts some change to space-time
def face_color_time_direction(face):
import statistics
T = len(st.get_layers())
face = list(face)
layers = [theta_t[n] for n in face]
mode = statistics.mode(layers)
outlier = [theta_t[n] for n in face if theta_t[n] != mode][0]
# this colors past pointing triangles blue
if mode > outlier: # past pointing
return ((0, 0, 200))
# this colors future pointing triangles red
elif mode < outlier: # future pointing
return ((200, 0, 0))
def node_color_random(n):
return ((random.random(), random.random(), random.random()))
def plot_3d(st, type="torus", filename=None, get_coords=get_naive_coords, radius_1=2, radius_2=1, plot_edges=True, node_color_function=node_color_random):
"""
Generate a 3d plot of the space-time embedded in the surface of a torus.
"""
theta_x, theta_t = get_coords(st)
x, y, z, i, j, k, color = [], [], [], [], [], [], []
idx = 0
node_to_idx = {} # maps an event to an index.
node_color = []
# loop through each node and append its coordinates
for n in st.nodes:
node_to_idx[n] = idx
v = theta_x[n]
u = theta_t[n]
# node color is set here
node_color.append(node_color_function(n))
if type == "torus":
x.append((radius_1 + radius_2 * np.cos(v)) * np.cos(u))
y.append((radius_1 + radius_2 * np.cos(v)) * np.sin(u))
z.append(radius_2 * np.sin(v))
if type == "cylinder":
x.append(radius_2 * np.cos(v))
y.append(radius_2 * np.sin(v))
z.append(radius_2 * u * np.sqrt(3) / 2.)
"""
new types can be added here
"""
idx += 1
face_color = []
for face in st.faces:
face = list(face)
# doesn't draw any triangles that would stretch across two time slices. This removes the middle triangles connecting the top to the bottom
# needs more info from space time to find triangle orientation, should this be stored in each triangle?
# if type == "cylinder" and abs(mode - outlier) > 2 * pi / T * 2:
# continue
i.append(node_to_idx[face[0]])
j.append(node_to_idx[face[1]])
k.append(node_to_idx[face[2]])
# generate the mesh
data = plotly_triangular_mesh((x, y, z), (i, j, k), face_color=face_color, node_color=node_color, name=filename, plot_edges=plot_edges)
layout["title"] = filename
fig = dict(data=data, layout=layout)
if filename:
plot(fig, filename="../plots/" + filename + ".html")
else:
iplot(fig)
# 2d plot (cutting a space and time slice)
def plot_2d(st, offset=2 * pi / 600., get_coords=get_naive_coords, labels=False):
theta_x, theta_t = get_coords(st)
coords = {}
for n in event.events(st, st.nodes):
v = theta_x[n.key]
u = theta_t[n.key]
coords[n.key] = (v, u)
face_coordinate = {}
face_is_display = {}
for face in Face.faces(st):
face_lst = list(face.nodes)
xx, yy = [], []
for n in face_lst:
xx.append(theta_x[n])
yy.append(theta_t[n])
avg_x = np.mean(xx)
avg_y = np.mean(yy)
xx = [p - (p - avg_x) / 2 for p in xx]
yy = [p - (p - avg_y) / 2 for p in yy]
face_coordinate[face] = (avg_x, avg_y)
face_is_display[face] = False
if max([abs(theta_t[face_lst[0]] - theta_t[face_lst[1]]), abs(theta_t[face_lst[0]] - theta_t[face_lst[2]])]) < pi:
if max([abs(theta_x[face_lst[0]] - theta_x[face_lst[1]]), abs(theta_x[face_lst[0]] - theta_x[face_lst[2]])]) < pi:
face_is_display[face] = True
c = (.5, 0, 0, .6)
if face.type == 0:
c = (0, 0, .5, .6)
plt.fill(xx, yy, c=c)
if labels:
plt.annotate(face.key, (avg_x, avg_y), va="center", ha="center", c="white")
face_right_connections = []
face_left_connections = []
face_time_connections = []
for face in Face.faces(st):
x, y = face_coordinate[face]
x_r, y_r = face_coordinate[face.right]
x_l, y_l = face_coordinate[face.left]
x_t, y_t = face_coordinate[face.temporal_neighbor]
# right
if face_is_display[face] and face_is_display[face.right]:
if abs(y - y_r) < 10:
if abs(x - x_r) < 10:
face_right_connections.append([(x, y-offset), (x_r, y_r-offset), ])
# left
if face_is_display[face] and face_is_display[face.left]:
if abs(y - y_l) < 10:
if abs(x - x_l) < 10:
face_left_connections.append([(x, y+offset), (x_l, y_l+offset), ])
# temporal
if face_is_display[face] and face_is_display[face.temporal_neighbor]:
if abs(y - y_t) < 10:
if abs(x - x_t) < 10:
if face.type == 0:
face_time_connections.append([(x+offset/2., y), (x_t+offset/2., y_t), ])
else:
face_time_connections.append([(x-offset/2., y), (x_t-offset/2., y_t), ])
edges_past_pointing, edges_future_pointing, edges_left_pointing, edges_right_pointing = [], [], [], []
for e in event.events(st, st.nodes):
past_asj = None
for adjacent in e.past:
if past_asj == adjacent:
print(adjacent)
past_asj = adjacent
if abs(theta_t[e.key] - theta_t[adjacent.key]) < pi:
if abs(theta_x[e.key] - theta_x[adjacent.key]) < pi:
edges_past_pointing.append([coords[e.key] + np.array([0, offset]), coords[adjacent.key] + np.array([0, offset]), ])
for adjacent in e.future:
if abs(theta_t[e.key] - theta_t[adjacent.key]) < pi:
if abs(theta_x[e.key] - theta_x[adjacent.key]) < pi:
edges_future_pointing.append([coords[e.key] - np.array([0, offset]), coords[adjacent.key] - np.array([0, offset]), ])
if abs(theta_t[e.key] - theta_t[e.left.key]) < pi:
if abs(theta_x[e.key] - theta_x[e.left.key]) < pi:
edges_left_pointing.append([coords[e.key] + np.array([0, offset]), coords[e.left.key] + np.array([0, offset]), ])
if abs(theta_t[e.key] - theta_t[e.right.key]) < pi:
if abs(theta_x[e.key] - theta_x[e.right.key]) < pi:
edges_right_pointing.append([coords[e.key] + np.array([0, -offset]), coords[e.right.key] + np.array([0, -offset]), ])
plt.gca().add_collection(LineCollection(edges_future_pointing, color=(1, 0, 0, 1), antialiaseds=True, linewidth=0.6, ))
plt.gca().add_collection(LineCollection(edges_past_pointing, color=(0, 0, 1, 1), antialiaseds=True, linewidth=0.6, ))
plt.gca().add_collection(LineCollection(edges_left_pointing, color=(0, 1, 0, 1), antialiaseds=True, linewidth=0.6, ))
plt.gca().add_collection(LineCollection(edges_right_pointing, color=(.5, 0, .5, 1), antialiaseds=True, linewidth=0.6, ))
plt.gca().add_collection(LineCollection(face_right_connections, color=(1, 0, 0, 1), antialiaseds=True, linewidth=.6, ))
plt.gca().add_collection(LineCollection(face_left_connections, color=(0, 0, 1, 1), antialiaseds=True, linewidth=.6, ))
plt.gca().add_collection(LineCollection(face_time_connections, color=(0, 1, 0, 1), antialiaseds=True, linewidth=.6, ))
s = 100
if labels == True:
for n in st.nodes:
plt.annotate(n, coords[n], va="center", ha="center", c="black")
s = 600
x = [coords[n][0] for n in st.nodes]
y = [coords[n][1] for n in st.nodes]
plt.scatter(x, y, color="white", zorder=2, s=s, edgecolors="black")
plt.axis("off")
plt.show()
def plot_3d_nx(st: SpaceTime, render: bool = True, iterations: int = 50, layout_type: str = 'spring'):
G = st.to_networkx()
if layout_type == 'spring':
layout = networkx.spring_layout(G, iterations=iterations, dim=3)
elif layout_type == 'spectral':
layout = networkx.spectral_layout(G, dim=3)
else:
raise ValueError('Unknown layout type: {}'.format(layout_type))
edges = G.edges()
spacelike_edges = [e for e in edges if G.get_edge_data(e[0], e[1])['type'] == 'spacelike']
timelike_edges = [e for e in edges if G.get_edge_data(e[0], e[1])['type'] == 'timelike']
spacelike_edge_x = []
spacelike_edge_y = []
spacelike_edge_z = []
for edge in spacelike_edges:
x0, y0, z0 = layout[edge[0]]
x1, y1, z1 = layout[edge[1]]
spacelike_edge_x.extend([x0, x1, None])
spacelike_edge_y.extend([y0, y1, None])
spacelike_edge_z.extend([z0, z1, None])
timelike_edge_x = []
timelike_edge_y = []
timelike_edge_z = []
for edge in timelike_edges:
x0, y0, z0 = layout[edge[0]]
x1, y1, z1 = layout[edge[1]]
timelike_edge_x.extend([x0, x1, None])
timelike_edge_y.extend([y0, y1, None])
timelike_edge_z.extend([z0, z1, None])
spacelike_edge_trace = graph_objects.Scatter3d(x=spacelike_edge_x, y=spacelike_edge_y, z=spacelike_edge_z, line=dict(width=0.5, color=EDGE_TYPE_COLOR['spacelike']),
hoverinfo='none', mode='lines')
timelike_edge_trace = graph_objects.Scatter3d(x=timelike_edge_x, y=timelike_edge_y, z=timelike_edge_z, line=dict(width=0.5, color=EDGE_TYPE_COLOR['timelike']),
hoverinfo='none', mode='lines')
node_x = []
node_y = []
node_z = []
for node in G.nodes():
x, y, z = layout[node]
node_x.append(x)
node_y.append(y)
node_z.append(z)
layer_dict = networkx.get_node_attributes(G, 'layer')
node_trace = graph_objects.Scatter3d(x=node_x, y=node_y, z=node_z, mode='markers', marker=dict( # showscale=True,
# colorscale options
# 'Greys' | 'YlGnBu' | 'Greens' | 'YlOrRd' | 'Bluered' | 'RdBu' |
# 'Reds' | 'Blues' | 'Picnic' | 'Rainbow' | 'Portland' | 'Jet' |
# 'Hot' | 'Blackbody' | 'Earth' | 'Electric' | 'Viridis' |
colorscale='Viridis', # reversescale=True,
color=[layer_dict[n] for n in G.nodes()], size=3, opacity=0.8, colorbar=dict(thickness=150, title='Time Layer', xanchor='left', | |
import asyncio
import logging
import disnake
from disnake.ext import commands
from cogs.mixins import AceMixin
from utils.configtable import ConfigTable
from utils.context import can_prompt
from utils.converters import EmojiConverter, MaxLengthConverter
from utils.string import po, shorten
log = logging.getLogger(__name__)
FOOTER_TEXT = 'Click a reaction to add/remove roles.'
RERUN_PROMPT = 'Re-run `roles spawn` for changes to take effect.'
UP_EMOJI = '🔼'
DOWN_EMOJI = '🔽'
MOVEUP_EMOJI = '⏫'
MOVEDOWN_EMOJI = '⏬'
ADD_ROLE_EMOJI = '🇷'
ADD_SEL_EMOJI = '🇸'
DEL_EMOJI = '➖'
EDIT_EMOJI = '✏️'
SAVE_EMOJI = '💾'
ABORT_EMOJI = '🚮'
EMBED_EMOJIS = (
ADD_SEL_EMOJI, ADD_ROLE_EMOJI, UP_EMOJI, DOWN_EMOJI,
MOVEUP_EMOJI, MOVEDOWN_EMOJI, EDIT_EMOJI, DEL_EMOJI, ABORT_EMOJI, SAVE_EMOJI
)
class SelectorEmojiConverter(EmojiConverter):
async def convert(self, ctx, argument):
argument = await super().convert(ctx, argument)
if argument in (role.emoji for role in ctx.head.selector.roles):
raise commands.CommandError('This emoji already exists in this selector.')
return argument
role_title_converter = MaxLengthConverter(199)
role_desc_converter = MaxLengthConverter(1024)
selector_title_converter = MaxLengthConverter(256)
selector_desc_converter = MaxLengthConverter(1024)
class SelectorInlineConverter(commands.Converter):
async def convert(self, ctx, argument):
lowered = argument.lower()
if lowered in ('yes', 'y', 'true', 't', '1', 'enable', 'on'):
return True
elif lowered in ('no', 'n', 'false', 'f', '0', 'disable', 'off'):
return False
else:
raise commands.CommandError('Input could not be interpreted as boolean.')
class CustomRoleConverter(commands.RoleConverter):
async def convert(self, ctx, argument):
try:
role = await super().convert(ctx, argument)
except commands.CommandError as exc:
raise commands.CommandError(str(exc))
if role == ctx.guild.default_role:
raise commands.CommandError('The *everyone* role is not allowed.')
if role.id in (other_role.role_id for selector in ctx.head.selectors for other_role in selector.roles):
raise commands.CommandError('This role already exists somewhere else.')
if ctx.author != ctx.guild.owner and role >= ctx.author.top_role:
raise commands.CommandError('Sorry, you can\'t add roles higher than your top role.')
config = await ctx.bot.config.get_entry(ctx.guild.id)
if role == config.mod_role:
raise commands.CommandError('Can\'t add moderation role to selector.')
return role.id
NEW_ROLE_PREDS = (
('What role do you want to add? (Send a role mention or just the role ID)', CustomRoleConverter()),
('What name should this role entry have?', role_title_converter),
('What emoji should be associated with this role?', SelectorEmojiConverter()),
('What description should this role have?', role_desc_converter),
)
NEW_SEL_PREDS = (
('What should the name of the selector be?', selector_title_converter),
)
EDIT_FOOTER = 'Send a message with your answer! Send \'exit\' to cancel.'
RETRY_MSG = 'Please try again, or send \'exit\' to cancel.'
class MaybeDirty:
dirty = False
def set_dirty(self):
self.dirty = True
def set_clean(self):
self.dirty = False
class MaybeNew:
@property
def is_new(self):
return self.id is None
class Role(MaybeDirty, MaybeNew):
def __init__(self, role_id, name, emoji, desc):
self.id = None
self.role_id = role_id
self.name = name
self.emoji = emoji
self.description = desc
@classmethod
def from_record(cls, record):
self = cls(record.get('role_id'), record.get('name'), record.get('emoji'), record.get('description'))
self.id = record.get('id')
return self
class Selector(MaybeDirty, MaybeNew):
def __init__(self, title, desc, roles: list):
self.id = None
self.title = title
self.description = desc
self.inline = True
self.roles = roles
@classmethod
def from_record(cls, record, roles):
self = cls(record.get('title'), record.get('description'), roles)
self.inline = record.get('inline')
self.id = record.get('id')
return self
def add_role(self, index, role):
self.set_dirty()
self.roles.insert(index, role)
class RoleHead(MaybeDirty):
front = '-> '
back = ' <-'
def __init__(self, conf, selectors: list):
self.conf = conf
self.selectors = selectors
self.selector_pos = 0
self.role_pos = None
@property
def selector(self):
return self.selectors[self.selector_pos]
@property
def role(self):
if self.role_pos is None:
return None
return self.selector.roles[self.role_pos]
@property
def selector_max(self):
return len(self.selectors) - 1
@property
def role_max(self):
return len(self.selector.roles) - 1
def add_selector(self, index, selector):
self.set_dirty()
self.selectors.insert(index, selector)
def move_selector(self, direction):
self.set_dirty()
swap_with = (self.selector_pos + direction) % (self.selector_max + 1)
self.selectors[self.selector_pos], self.selectors[swap_with] = self.selectors[swap_with], self.selectors[self.selector_pos]
self.selector_pos = swap_with
def move_role(self, direction):
sel = self.selector
sel.set_dirty()
new_sel_pos = (self.selector_pos + direction) % (self.selector_max + 1)
new_sel = self.selectors[new_sel_pos]
selector_count = len(self.selectors)
# if this is the last role in this selector and we're moving down
if selector_count > 1 and direction == 1 and self.role_pos == self.role_max:
# move the role to the first role slot in the selector below
new_sel.add_role(0, sel.roles.pop(self.role_pos))
self.selector_pos = new_sel_pos
self.role_pos = 0
# if this is the first role in this selector and we're moving up
elif selector_count > 1 and direction == -1 and self.role_pos == 0:
# move the role to the last role slot in the selector above
new_role_pos = len(new_sel.roles)
new_sel.add_role(new_role_pos, sel.roles.pop(self.role_pos))
self.selector_pos = new_sel_pos
self.role_pos = new_role_pos
# otherwise, just swap the two roles in this selector
elif len(self.selector.roles) > 1:
swap_with = (self.role_pos + direction) % len(sel.roles)
sel.roles[self.role_pos], sel.roles[swap_with] = sel.roles[swap_with], sel.roles[self.role_pos]
self.role_pos = swap_with
def up(self):
if self.role_pos is None:
# get the above selector
self.selector_pos = (self.selector_pos - 1) % (self.selector_max + 1)
role_count = len(self.selector.roles)
# if it has items, select the last item in that selector
if role_count:
self.role_pos = role_count - 1
else:
self.role_pos = None
# in a selector
else:
if self.role_pos > 0:
self.role_pos -= 1
else:
self.role_pos = None
def down(self):
# selector is currently selected
if self.role_pos is None:
# check if there's a role in the selector we can select
if len(self.selector.roles) > 0:
self.role_pos = 0
else:
# otherwise go to the selector below
self.selector_pos = (self.selector_pos + 1) % (self.selector_max + 1)
# role is currently selected
else:
# if there's a role below to select...
if self.role_pos != self.role_max:
self.role_pos += 1
# otherwise, select next selector
else:
self.role_pos = None
self.selector_pos = (self.selector_pos + 1) % (self.selector_max + 1)
def embed(self, footer=''):
e = disnake.Embed(
description=(
f'{ADD_SEL_EMOJI} Add selector\n{ADD_ROLE_EMOJI} Add role\n{UP_EMOJI} {DOWN_EMOJI} Move up/down\n'
f'{MOVEUP_EMOJI} {MOVEDOWN_EMOJI} Move item up/down\n{EDIT_EMOJI} Edit item\n'
f'{DEL_EMOJI} Delete item\n{ABORT_EMOJI} Discard changes\n{SAVE_EMOJI} Save changes\n\nEditor:'
)
)
if not self.selectors:
e.description = 'Click {} to create your first role selector!'.format(ADD_SEL_EMOJI)
return e
e.set_footer(text=footer)
def wrap(to_wrap):
return self.front + to_wrap + self.back
for sel_idx, selector in enumerate(self.selectors):
rls = list()
for role_idx, (role) in enumerate(selector.roles):
string = '{} {}'.format(role.emoji, shorten(role.name, 64))
rls.append(wrap(string) if sel_idx == self.selector_pos and role_idx == self.role_pos else string)
e.add_field(
name=wrap(selector.title) if self.role_pos is None and sel_idx == self.selector_pos else selector.title,
value='\n'.join(rls) if rls else 'Select the selector and press {} to add a role!'.format(ADD_ROLE_EMOJI),
inline=False
)
return e
async def store(self, ctx):
db = ctx.bot.db
# delete role entries
selector_ids = list(selector.id for selector in self.selectors if selector.id is not None)
role_ids = list(role.id for selector in self.selectors for role in selector.roles if role.id is not None)
# delete role entries that don't exist anymore
await db.execute(
'DELETE FROM role_entry WHERE guild_id=$1 AND id!=ALL($2::INTEGER[])',
ctx.guild.id, role_ids
)
# delete role selectors that don't exist anymore
await db.execute(
'DELETE FROM role_selector WHERE guild_id=$1 AND id!=ALL($2::INTEGER[])',
ctx.guild.id, selector_ids
)
sel_ids = list()
for selector in self.selectors:
ids = list()
for role in selector.roles:
if role.is_new:
ids.append(await db.fetchval(
'INSERT INTO role_entry (guild_id, role_id, name, emoji, description) values ($1, $2, $3, $4, $5) RETURNING id',
ctx.guild.id, role.role_id, role.name, role.emoji, role.description
))
else:
if role.dirty:
await db.execute(
'UPDATE role_entry SET name=$2, emoji=$3, description=$4 WHERE id=$1',
role.id, role.name, role.emoji, role.description
)
ids.append(role.id)
if selector.is_new:
sel_ids.append(await db.fetchval(
'INSERT INTO role_selector (guild_id, title, description, inline, roles) VALUES ($1, $2, $3, $4, $5) RETURNING id',
ctx.guild.id, selector.title, selector.description, selector.inline, ids
))
else:
if selector.dirty:
await db.execute(
'UPDATE role_selector SET title=$2, description=$3, inline=$4, roles=$5 WHERE id=$1',
selector.id, selector.title, selector.description, selector.inline, ids
)
sel_ids.append(selector.id)
await self.conf.update(selectors=sel_ids)
class Roles(AceMixin, commands.Cog):
'''Create role selection menu(s).'''
def __init__(self, bot):
super().__init__(bot)
self.editing = set()
self.messages = dict()
self.footer_tasks = dict()
self.footer_lock = asyncio.Lock()
self.config = ConfigTable(bot, table='role', primary='guild_id')
async def bot_check(self, ctx):
return (ctx.channel.id, ctx.author.id) not in self.editing
async def cog_check(self, ctx):
return await ctx.is_mod()
def set_editing(self, ctx):
self.editing.add((ctx.channel.id, ctx.author.id))
def unset_editing(self, ctx):
try:
self.editing.remove((ctx.channel.id, ctx.author.id))
except KeyError:
pass
@commands.group(hidden=True, invoke_without_command=True)
async def roles(self, ctx):
await ctx.send_help(self.roles)
@roles.command()
@can_prompt()
@commands.bot_has_permissions(manage_messages=True)
async def editor(self, ctx):
'''Editor for selectors and roles.'''
# ignore command input from user while editor is open
self.set_editing(ctx)
conf = await self.config.get_entry(ctx.guild.id)
slcs = await self.db.fetch(
'''
SELECT rs.*
FROM role_selector as rs
JOIN unnest($1::INTEGER[]) WITH ORDINALITY t(id, ord) USING (id)
WHERE id=ANY($1::INTEGER[])
ORDER BY t.ord
''',
conf.selectors
)
selectors = list()
for slc in slcs:
roles = await self.db.fetch(
'''
SELECT re.*
FROM role_entry as re
JOIN unnest($1::INTEGER[]) WITH ORDINALITY t(id, ord) USING (id)
WHERE id=ANY($1::INTEGER[])
ORDER BY t.ord
''',
slc.get('roles')
)
selector = Selector.from_record(slc, list(Role.from_record(role) for role in roles))
selectors.append(selector)
head = RoleHead(conf, selectors)
# so converters can access the head for data integrity tests...
ctx.head = head
msg = await ctx.send(embed=disnake.Embed(description='Please wait while reactions are being added...'))
self.messages[ctx.guild.id] = msg
for emoji in EMBED_EMOJIS:
await msg.add_reaction(emoji)
def pred(reaction, user):
return reaction.message.id == msg.id and user.id == ctx.author.id
async def close():
self.unset_editing(ctx)
try:
await msg.delete()
self.messages.pop(ctx.guild.id)
except disnake.HTTPException:
pass
while True:
await msg.edit(embed=head.embed())
try:
reaction, user = await self.bot.wait_for('reaction_add', check=pred, timeout=300.0)
except asyncio.TimeoutError:
await close()
raise commands.CommandError('Role editor closed after 5 minutes of inactivity.')
else:
await msg.remove_reaction(reaction.emoji, user)
reac = str(reaction)
if reac == ADD_SEL_EMOJI:
if len(head.selectors) > 7:
await ctx.send(
embed=disnake.Embed(description='No more than 8 selectors, sorry!'),
delete_after=6
)
continue
selector_data = await self._multiprompt(ctx, msg, NEW_SEL_PREDS)
if selector_data is None:
continue
selector = Selector(selector_data[0], None, list())
selector.set_dirty()
new_pos = 0 if not head.selectors else head.selector_pos + 1
head.add_selector(new_pos, selector)
head.selector_pos = new_pos
head.role_pos = None
if reac == ABORT_EMOJI:
await close()
raise commands.CommandError('Editing aborted, no changes saved.')
if reac == SAVE_EMOJI:
await head.store(ctx)
await close()
await ctx.send('New role selectors saved. Do `roles spawn` to see!')
break
# rest of the actions assume at least one item (selector) is present
if not head.selectors:
continue
if reac == ADD_ROLE_EMOJI:
if len(head.selector.roles) > 24:
await ctx.send(
embed=disnake.Embed(description='No more than 25 roles in one selector, sorry!'),
delete_after=6
)
continue
role_data = await self._multiprompt(ctx, msg, NEW_ROLE_PREDS)
if role_data is None:
continue
role = Role(*role_data)
new_pos = 0 if head.role_pos is None else head.role_pos + 1
head.selector.add_role(new_pos, role)
head.role_pos = new_pos
if reac == DOWN_EMOJI:
head.down()
if reac == UP_EMOJI:
head.up()
if reac in (MOVEUP_EMOJI, MOVEDOWN_EMOJI):
direction = -1 if reac == MOVEUP_EMOJI else 1
if head.role_pos is None:
head.move_selector(direction)
else:
head.move_role(direction)
if reac == DEL_EMOJI:
if head.role_pos is None:
if len(head.selector.roles):
p = ctx.prompt(
'Delete selector?',
'The selector you\'re trying to delete has {} roles inside it.'.format(
len(head.selector.roles)
)
)
if not await p:
continue
head.selectors.pop(head.selector_pos)
if head.selector_pos > head.selector_max:
head.selector_pos = head.selector_max
head.role_pos = None
else:
head.selector.roles.pop(head.role_pos)
if len(head.selector.roles) == 0:
head.role_pos = None
elif head.role_pos > head.role_max:
head.role_pos = head.role_max
if reac == EDIT_EMOJI:
await self._edit_item(
ctx, msg,
head.selector if head.role_pos is None else head.selector.roles[head.role_pos]
)
# similarly to 'tag make', unset editing if an error occurs to not lock the users from using the bot
@editor.error
async def editor_error(self, ctx, error):
self.unset_editing(ctx)
# try to delete the embed message if it exists
try:
msg = self.messages.pop(ctx.guild.id)
await msg.delete()
except (KeyError, disnake.HTTPException):
pass
async def _multiprompt(self, ctx, msg, preds):
outs | |
import os
from datetime import datetime, timedelta
from shutil import copy
from unittest.mock import Mock, patch
import numpy as np
import pandas as pd
from lighthouse.constants.fields import (
FIELD_COORDINATE,
FIELD_PLATE_BARCODE,
FIELD_RESULT,
FIELD_ROOT_SAMPLE_ID,
FIELD_SOURCE,
)
from lighthouse.helpers.reports import (
add_cherrypicked_column,
delete_reports,
get_cherrypicked_samples,
get_distinct_plate_barcodes,
get_fit_to_pick_samples,
get_new_report_name_and_path,
report_query_window_start,
unpad_coordinate,
)
# ----- get_new_report_name_and_path tests -----
def test_get_new_report_name_and_path(app, freezer):
report_date = datetime.now().strftime("%y%m%d_%H%M")
with app.app_context():
report_name, _ = get_new_report_name_and_path()
assert report_name == f"{report_date}_fit_to_pick_with_locations.xlsx"
# ----- unpad_coordinate tests -----
def test_unpad_coordinate_A01():
assert unpad_coordinate("A01") == "A1"
def test_unpad_coordinate_A1():
assert unpad_coordinate("A1") == "A1"
def test_unpad_coordinate_A10():
assert unpad_coordinate("A10") == "A10"
def test_unpad_coordinate_B01010():
assert unpad_coordinate("B01010") == "B1010"
# ----- delete_reports tests -----
def test_delete_reports(app):
copies_of_reports_folder = "tests/data/reports_copies"
filenames = [
"200716_1345_positives_with_locations.xlsx",
"200716_1618_positives_with_locations.xlsx",
"200716_1640_positives_with_locations.xlsx",
"200716_1641_fit_to_pick_with_locations.xlsx",
"200716_1642_fit_to_pick_with_locations.xlsx",
]
for filename in filenames:
copy(f"{copies_of_reports_folder}/{filename}", f"{app.config['REPORTS_DIR']}/{filename}")
with app.app_context():
delete_reports(filenames)
for filename in filenames:
assert os.path.isfile(f"{app.config['REPORTS_DIR']}/{filename}") is False
# ----- get_cherrypicked_samples tests -----
def test_get_cherrypicked_samples_test_db_connection_close(app):
"""
Test Scenario
- Check that connection is close when we call get_cherrypicked_samples
"""
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine") as mock_sql_engine:
mock_db_connection = Mock()
mock_sql_engine().connect.return_value = mock_db_connection
get_cherrypicked_samples(samples, plate_barcodes)
mock_db_connection.close.assert_called_once()
def test_get_cherrypicked_samples_test_db_connection_close_on_exception(app):
"""
Test Scenario
- Check that connection is close when we call get_cherrypicked_samples
"""
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine") as mock_sql_engine:
with patch(
"pandas.read_sql",
side_effect=Exception("Boom!"),
):
mock_db_connection = Mock()
mock_sql_engine().connect.return_value = mock_db_connection
get_cherrypicked_samples(samples, plate_barcodes)
mock_db_connection.close.assert_called_once()
# Test Scenario
# - Mocking database responses
# - Only the Sentinel query returns matches (No Beckman)
# - No chunking: a single query is made in which all matches are returned
# - No duplication of returned matches
def test_get_cherrypicked_samples_no_beckman(app):
expected = [
pd.DataFrame(
["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2]
), # Cherrypicking query response
]
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=expected,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes)
assert returned_samples.at[0, FIELD_ROOT_SAMPLE_ID] == "MCM001"
assert returned_samples.at[1, FIELD_ROOT_SAMPLE_ID] == "MCM003"
assert returned_samples.at[2, FIELD_ROOT_SAMPLE_ID] == "MCM005"
# Test Scenario
# - Mocking database responses
# - Only the Sentinel queries return matches (No Beckman)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - No duplication of returned matches
def test_get_cherrypicked_samples_chunking_no_beckman(app):
# Note: This represents the results of three different (Sentinel, Beckman) sets of
# database queries, each Sentinel query getting indexed from 0. Do not change the
# indices here unless you have modified the behaviour of the query.
query_results = [
pd.DataFrame(["MCM001"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM003"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
]
expected = pd.DataFrame(["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2])
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=query_results,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes, 2)
pd.testing.assert_frame_equal(expected, returned_samples)
# Test Scenario
# - Actual database responses
# - Only the Sentinel queries return matches (No Beckman)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - Duplication of returned matches across different chunks: duplicates should be filtered out
def test_get_cherrypicked_samples_repeat_tests_no_beckman(app, mlwh_sentinel_cherrypicked, event_wh_data):
# the following come from MLWH_SAMPLE_STOCK_RESOURCE in fixture_data
root_sample_ids = ["root_1", "root_2", "root_1"]
plate_barcodes = ["pb_1", "pb_2", "pb_3"]
# root_1 will match 2 samples, but only one of those will match an event (on Sanger Sample Id)
# therefore we only get 1 of the samples called 'root_1' back (the one on plate 'pb_1')
# this also checks we don't get a duplicate row for root_1 / pb_1, despite it cropped up in 2
# different 'chunks'
expected_rows = [["root_1", "pb_1", "positive", "A1"], ["root_2", "pb_2", "positive", "A1"]]
expected_columns = [FIELD_ROOT_SAMPLE_ID, FIELD_PLATE_BARCODE, "Result_lower", FIELD_COORDINATE]
expected = pd.DataFrame(np.array(expected_rows), columns=expected_columns, index=[0, 1])
with app.app_context():
chunk_size = 2
returned_samples = get_cherrypicked_samples(root_sample_ids, plate_barcodes, chunk_size)
print(returned_samples)
pd.testing.assert_frame_equal(expected, returned_samples)
# Test Scenario
# - Mocking database responses
# - Only the Beckman query returns matches (No Sentinel)
# - No chunking: a single query is made in which all matches are returned
# - No duplication of returned matches
def test_get_cherrypicked_samples_no_sentinel(app):
expected = [
pd.DataFrame(
["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2]
), # Cherrypicking query response
]
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=expected,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes)
assert returned_samples.at[0, FIELD_ROOT_SAMPLE_ID] == "MCM001"
assert returned_samples.at[1, FIELD_ROOT_SAMPLE_ID] == "MCM003"
assert returned_samples.at[2, FIELD_ROOT_SAMPLE_ID] == "MCM005"
# Test Scenario
# - Mocking database responses
# - Only the Beckman queries return matches (No Sentinel)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - No duplication of returned matches
def test_get_cherrypicked_samples_chunking_no_sentinel(app):
# Note: This represents the results of three different (Sentinel, Beckman) sets of
# database queries, each Sentinel query getting indexed from 0. Do not change the
# indices here unless you have modified the behaviour of the query.
query_results = [
pd.DataFrame(["MCM001"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM003"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
pd.DataFrame(["MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0]), # Cherrypicking query resp.
]
expected = pd.DataFrame(["MCM001", "MCM003", "MCM005"], columns=[FIELD_ROOT_SAMPLE_ID], index=[0, 1, 2])
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=query_results,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes, 2)
pd.testing.assert_frame_equal(expected, returned_samples)
# Test Scenario
# - Actual database responses
# - Only the Beckman queries return matches (No Sentinel)
# - Chunking: multiple queries are made, with all matches contained in the sum of these queries
# - Duplication of returned matches across different chunks: duplicates should be filtered out
def test_get_cherrypicked_samples_repeat_tests_no_sentinel(app, mlwh_beckman_cherrypicked, event_wh_data):
# the following come from MLWH_SAMPLE_LIGHTHOUSE_SAMPLE in fixture_data
root_sample_ids = ["root_4", "root_5", "root_4"]
plate_barcodes = ["pb_4", "pb_5", "pb_6"]
# root_4 will match 2 samples, but only one of those will match an event (on sample uuid)
# therefore we only get 1 of the samples called 'root_4' back (the one on plate 'pb_4')
# this also checks we don't get a duplicate row for root_4 / pb_4, despite it cropped up in 2
# different 'chunks'
expected_rows = [["root_4", "pb_4", "positive", "A1"], ["root_5", "pb_5", "positive", "A1"]]
expected_columns = [FIELD_ROOT_SAMPLE_ID, FIELD_PLATE_BARCODE, "Result_lower", FIELD_COORDINATE]
expected = pd.DataFrame(np.array(expected_rows), columns=expected_columns, index=[0, 1])
with app.app_context():
chunk_size = 2
returned_samples = get_cherrypicked_samples(root_sample_ids, plate_barcodes, chunk_size)
# The view could be returning the rows in a different order, which we solve by sorting and
# reindexing the rows for returned_samples, so we can compare with our expected frame
resorted_returned_samples = returned_samples.sort_values(by=FIELD_ROOT_SAMPLE_ID, ignore_index=True)
pd.testing.assert_frame_equal(expected, resorted_returned_samples)
# Test Scenario
# - Mocking database responses
# - Both Sentinel and Beckman queries return matches
# - No chunking: a single query is made (per workflow) in which all matches are returned
# - Duplication of returned matches across different workflows: duplicates should be filtered out
def test_get_cherrypicked_samples_sentinel_and_beckman(app):
expected = [
pd.DataFrame(
[
# Sentinel
"MCM001",
"MCM006",
# Beckman
"MCM001",
"MCM003",
"MCM005",
],
columns=[FIELD_ROOT_SAMPLE_ID],
index=[0, 1, 2, 3, 4],
), # Cherrypicking query response
]
samples = ["MCM001", "MCM002", "MCM003", "MCM004", "MCM005", "MCM006"]
plate_barcodes = ["123", "456"]
with app.app_context():
with patch("sqlalchemy.create_engine", return_value=Mock()):
with patch(
"pandas.read_sql",
side_effect=expected,
):
returned_samples = get_cherrypicked_samples(samples, plate_barcodes)
assert returned_samples.at[0, FIELD_ROOT_SAMPLE_ID] == "MCM001"
assert returned_samples.at[1, FIELD_ROOT_SAMPLE_ID] == "MCM006"
assert returned_samples.at[2, FIELD_ROOT_SAMPLE_ID] == "MCM003"
assert returned_samples.at[3, FIELD_ROOT_SAMPLE_ID] == "MCM005"
# Test Scenario
# - Mocking database responses
# - Both Sentinel and Beckman queries return matches
# - Chunking: multiple queries are made (per workflow), with all matches contained in the sum
# - Duplication of returned matches across different workflows: duplicates should be filtered out
def test_get_cherrypicked_samples_chunking_sentinel_and_beckman(app):
# Note: This represents the results of three different (Sentinel, Beckman) sets of
# database queries, each query getting indexed from 0. Do not changes the
# indicies here unless you have modified the behaviour of the query.
query_results = [
pd.DataFrame(
[
# Sentinel
"MCM001",
# Beckman
"MCM001",
"MCM002",
],
columns=[FIELD_ROOT_SAMPLE_ID],
index=[0, 1, 2],
), # Cherrypicking info
pd.DataFrame(
[
# Sentinel
"MCM003",
# Beckman
"MCM003",
"MCM004",
],
columns=[FIELD_ROOT_SAMPLE_ID],
index=[0, 1, 2],
), # Cherrypicking info
pd.DataFrame(
[
# Sentinel
"MCM005",
# Beckman
"MCM005",
"MCM006",
],
columns=[FIELD_ROOT_SAMPLE_ID],
index=[0, 1, 2],
), # Cherrypicking info
]
expected = | |
define output pulses only in ticks of the
timebase.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetCOCtrTimebaseRate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_ctr_timebase_rate.setter
def co_ctr_timebase_rate(self, val):
cfunc = lib_importer.windll.DAQmxSetCOCtrTimebaseRate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_double]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_ctr_timebase_rate.deleter
def co_ctr_timebase_rate(self):
cfunc = lib_importer.windll.DAQmxResetCOCtrTimebaseRate
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_ctr_timebase_src(self):
"""
str: Specifies the terminal of the timebase to use for the
counter. Typically, NI-DAQmx uses one of the internal
counter timebases when generating pulses. Use this property
to specify an external timebase and produce custom pulse
widths that are not possible using the internal timebases.
"""
cfunc = lib_importer.windll.DAQmxGetCOCtrTimebaseSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_char_p, ctypes.c_uint]
temp_size = 0
while True:
val = ctypes.create_string_buffer(temp_size)
size_or_code = cfunc(
self._handle, self._name, val, temp_size)
if is_string_buffer_too_small(size_or_code):
# Buffer size must have changed between calls; check again.
temp_size = 0
elif size_or_code > 0 and temp_size == 0:
# Buffer size obtained, use to retrieve data.
temp_size = size_or_code
else:
break
check_for_error(size_or_code)
return val.value.decode('ascii')
@co_ctr_timebase_src.setter
def co_ctr_timebase_src(self, val):
cfunc = lib_importer.windll.DAQmxSetCOCtrTimebaseSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes_byte_str]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_ctr_timebase_src.deleter
def co_ctr_timebase_src(self):
cfunc = lib_importer.windll.DAQmxResetCOCtrTimebaseSrc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_data_xfer_mech(self):
"""
:class:`nidaqmx.constants.DataTransferActiveTransferMode`:
Specifies the data transfer mode for the device. For
buffered operations, use DMA or USB Bulk. For non-buffered
operations, use Polled.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetCODataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return DataTransferActiveTransferMode(val.value)
@co_data_xfer_mech.setter
def co_data_xfer_mech(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetCODataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_data_xfer_mech.deleter
def co_data_xfer_mech(self):
cfunc = lib_importer.windll.DAQmxResetCODataXferMech
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_data_xfer_req_cond(self):
"""
:class:`nidaqmx.constants.OutputDataTransferCondition`:
Specifies under what condition to transfer data from the
buffer to the onboard memory of the device.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetCODataXferReqCond
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return OutputDataTransferCondition(val.value)
@co_data_xfer_req_cond.setter
def co_data_xfer_req_cond(self, val):
val = val.value
cfunc = lib_importer.windll.DAQmxSetCODataXferReqCond
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_int]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_data_xfer_req_cond.deleter
def co_data_xfer_req_cond(self):
cfunc = lib_importer.windll.DAQmxResetCODataXferReqCond
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_enable_initial_delay_on_retrigger(self):
"""
bool: Specifies whether to apply the initial delay to
retriggered pulse trains.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetCOEnableInitialDelayOnRetrigger
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_enable_initial_delay_on_retrigger.setter
def co_enable_initial_delay_on_retrigger(self, val):
cfunc = lib_importer.windll.DAQmxSetCOEnableInitialDelayOnRetrigger
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_enable_initial_delay_on_retrigger.deleter
def co_enable_initial_delay_on_retrigger(self):
cfunc = lib_importer.windll.DAQmxResetCOEnableInitialDelayOnRetrigger
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_mem_map_enable(self):
"""
bool: Specifies for NI-DAQmx to map hardware registers to the
memory space of the application, if possible. Normally, NI-
DAQmx maps hardware registers to memory accessible only to
the kernel. Mapping the registers to the memory space of the
application increases performance. However, if the
application accesses the memory space mapped to the
registers, it can adversely affect the operation of the
device and possibly result in a system crash.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetCOMemMapEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_mem_map_enable.setter
def co_mem_map_enable(self, val):
cfunc = lib_importer.windll.DAQmxSetCOMemMapEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str, c_bool32]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_mem_map_enable.deleter
def co_mem_map_enable(self):
cfunc = lib_importer.windll.DAQmxResetCOMemMapEnable
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_output_state(self):
"""
:class:`nidaqmx.constants.Level`: Indicates the current state of
the output terminal of the counter.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetCOOutputState
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return Level(val.value)
@property
def co_output_type(self):
"""
:class:`nidaqmx.constants.UsageTypeCO`: Indicates how to define
pulses generated on the channel.
"""
val = ctypes.c_int()
cfunc = lib_importer.windll.DAQmxGetCOOutputType
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_int)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return UsageTypeCO(val.value)
@property
def co_prescaler(self):
"""
int: Specifies the divisor to apply to the signal you connect to
the counter source terminal. Pulse generations defined by
frequency or time take this setting into account, but pulse
generations defined by ticks do not. You should use a
prescaler only when you connect an external signal to the
counter source terminal and when that signal has a higher
frequency than the fastest onboard timebase.
"""
val = ctypes.c_uint()
cfunc = lib_importer.windll.DAQmxGetCOPrescaler
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_uint)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_prescaler.setter
def co_prescaler(self, val):
cfunc = lib_importer.windll.DAQmxSetCOPrescaler
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_uint]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_prescaler.deleter
def co_prescaler(self):
cfunc = lib_importer.windll.DAQmxResetCOPrescaler
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_pulse_done(self):
"""
bool: Indicates if the task completed pulse generation. Use this
value for retriggerable pulse generation when you need to
determine if the device generated the current pulse. For
retriggerable tasks, when you query this property, NI-DAQmx
resets it to False.
"""
val = c_bool32()
cfunc = lib_importer.windll.DAQmxGetCOPulseDone
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(c_bool32)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@property
def co_pulse_duty_cyc(self):
"""
float: Specifies the duty cycle of the pulses. The duty cycle of
a signal is the width of the pulse divided by period. NI-
DAQmx uses this ratio and the pulse frequency to determine
the width of the pulses and the delay between pulses.
"""
val = ctypes.c_double()
cfunc = lib_importer.windll.DAQmxGetCOPulseDutyCyc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.POINTER(ctypes.c_double)]
error_code = cfunc(
self._handle, self._name, ctypes.byref(val))
check_for_error(error_code)
return val.value
@co_pulse_duty_cyc.setter
def co_pulse_duty_cyc(self, val):
cfunc = lib_importer.windll.DAQmxSetCOPulseDutyCyc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str,
ctypes.c_double]
error_code = cfunc(
self._handle, self._name, val)
check_for_error(error_code)
@co_pulse_duty_cyc.deleter
def co_pulse_duty_cyc(self):
cfunc = lib_importer.windll.DAQmxResetCOPulseDutyCyc
if cfunc.argtypes is None:
with cfunc.arglock:
if cfunc.argtypes is None:
cfunc.argtypes = [
lib_importer.task_handle, ctypes_byte_str]
error_code = cfunc(
self._handle, self._name)
check_for_error(error_code)
@property
def co_pulse_freq(self):
"""
float: Specifies the frequency of the pulses to generate. This
value is in the units you specify with
**co_pulse_freq_units** or when you create the channel.
"""
val = ctypes.c_double()
cfunc = | |
parameter is None. If the session
# option isn't set, then use the core's default reset type.
if reset_type is None:
if self.session.options.get('reset_type') is None:
reset_type = self.default_reset_type
else:
try:
# Convert session option value to enum.
resetOption = self.session.options.get('reset_type')
reset_type = cmdline.convert_reset_type(resetOption)
# The converted option will be None if the option value is 'default'.
if reset_type is None:
reset_type = self.default_reset_type
except ValueError:
reset_type = self.default_reset_type
else:
assert isinstance(reset_type, Target.ResetType)
# If the reset type is just SW, then use our default software reset type.
if reset_type is Target.ResetType.SW:
reset_type = self.default_software_reset_type
# Fall back to emulated sw reset if the vectreset is specified and the core doesn't support it.
if (reset_type is Target.ResetType.SW_VECTRESET) and (not self._supports_vectreset):
reset_type = Target.ResetType.SW_EMULATED
return reset_type
def _perform_reset(self, reset_type):
"""! @brief Perform a reset of the specified type."""
assert isinstance(reset_type, Target.ResetType)
if reset_type is Target.ResetType.HW:
# Tell DP to not send reset notifications because we are doing it.
self.session.target.dp.reset(send_notifications=False)
elif reset_type is Target.ResetType.SW_EMULATED:
self._perform_emulated_reset()
else:
if reset_type is Target.ResetType.SW_SYSRESETREQ:
mask = CortexM.NVIC_AIRCR_SYSRESETREQ
elif reset_type is Target.ResetType.SW_VECTRESET:
mask = CortexM.NVIC_AIRCR_VECTRESET
else:
raise exceptions.InternalError("unhandled reset type")
# Transfer errors are ignored on the AIRCR write for resets. On a few systems, the reset
# apparently happens so quickly that we can't even finish the SWD transaction.
try:
self.write_memory(CortexM.NVIC_AIRCR, CortexM.NVIC_AIRCR_VECTKEY | mask)
# Without a flush a transfer error can occur
self.flush()
except exceptions.TransferError:
self.flush()
# Post reset delay.
sleep(self.session.options.get('reset.post_delay'))
def _post_reset_core_accessibility_test(self):
"""! @brief Wait for the system to come out of reset and this core to be accessible.
Keep reading the DHCSR until we get a good response with S_RESET_ST cleared, or we time out. There's nothing
we can do if the test times out, and in fact if this is a secondary core on a multicore system then timing out
is almost guaranteed.
"""
recover_timeout = self.session.options.get('reset.core_recover.timeout')
if recover_timeout == 0:
return
with timeout.Timeout(recover_timeout, self._RESET_RECOVERY_SLEEP_INTERVAL) as time_out:
dhcsr = None
while time_out.check():
try:
dhcsr = self.read32(CortexM.DHCSR)
if (dhcsr & CortexM.S_RESET_ST) == 0:
break
except exceptions.TransferError:
# Ignore errors caused by flushing.
try:
self.flush()
except exceptions.TransferError:
pass
else:
# If dhcsr is None then we know that we never were able to read the register.
if dhcsr is None:
LOG.warning("Core #%d is not accessible after reset", self.core_number)
else:
LOG.debug("Core #%d did not come out of reset within timeout", self.core_number)
def reset(self, reset_type=None):
"""! @brief Reset the core.
The reset method is selectable via the reset_type parameter as well as the reset_type
session option. If the reset_type parameter is not specified or None, then the reset_type
option will be used. If the option is not set, or if it is set to a value of 'default', the
the core's default_reset_type property value is used. So, the session option overrides the
core's default, while the parameter overrides everything.
Note that only v7-M cores support the `VECTRESET` software reset method. If this method
is chosen but the core doesn't support it, the the reset method will fall back to an
emulated software reset.
After a call to this function, the core is running.
"""
reset_type = self._get_actual_reset_type(reset_type)
LOG.debug("reset, core %d, type=%s", self.core_number, reset_type.name)
self.session.notify(Target.Event.PRE_RESET, self)
self._run_token += 1
# Give the delegate a chance to overide reset. If the delegate returns True, then it
# handled the reset on its own.
if not self.call_delegate('will_reset', core=self, reset_type=reset_type):
self._perform_reset(reset_type)
# Post reset recovery tests.
# We only need to test accessibility after reset for system-level resets.
# If a hardware reset is being used, then the DP will perform its post-reset recovery for us. Out of the
# other reset types, only a system-level reset by SW_SYSRESETREQ require us to ensure the DP reset recovery
# is performed. VECTRESET
if reset_type is Target.ResetType.SW_SYSRESETREQ:
self.ap.dp.post_reset_recovery()
if reset_type in (Target.ResetType.HW, Target.ResetType.SW_SYSRESETREQ):
# Now run the core accessibility test.
self._post_reset_core_accessibility_test()
self.call_delegate('did_reset', core=self, reset_type=reset_type)
self.session.notify(Target.Event.POST_RESET, self)
def set_reset_catch(self, reset_type=None):
"""! @brief Prepare to halt core on reset."""
LOG.debug("set reset catch, core %d", self.core_number)
self._reset_catch_delegate_result = self.call_delegate('set_reset_catch', core=self, reset_type=reset_type)
# Default behaviour if the delegate didn't handle it.
if not self._reset_catch_delegate_result:
# Halt the target.
self.halt()
# Save CortexM.DEMCR.
self._reset_catch_saved_demcr = self.read_memory(CortexM.DEMCR)
# Enable reset vector catch if needed.
if (self._reset_catch_saved_demcr & CortexM.DEMCR_VC_CORERESET) == 0:
self.write_memory(CortexM.DEMCR, self._reset_catch_saved_demcr | CortexM.DEMCR_VC_CORERESET)
def clear_reset_catch(self, reset_type=None):
"""! @brief Disable halt on reset."""
LOG.debug("clear reset catch, core %d", self.core_number)
self.call_delegate('clear_reset_catch', core=self, reset_type=reset_type)
if not self._reset_catch_delegate_result:
# restore vector catch setting
self.write_memory(CortexM.DEMCR, self._reset_catch_saved_demcr)
def reset_and_halt(self, reset_type=None):
"""! @brief Perform a reset and stop the core on the reset handler."""
# Set up reset catch.
self.set_reset_catch(reset_type)
# Perform the reset.
self.reset(reset_type)
# wait until the unit resets
with timeout.Timeout(self.session.options.get('reset.halt_timeout')) as t_o:
while t_o.check():
if self.get_state() not in (Target.State.RESET, Target.State.RUNNING):
break
sleep(0.01)
else:
LOG.warning("Timed out waiting for core to halt after reset (state is %s)", self.get_state().name)
# Make sure the thumb bit is set in XPSR in case the reset handler
# points to an invalid address. Only do this if the core is actually halted, otherwise we
# can't access XPSR.
if self.get_state() == Target.State.HALTED:
xpsr = self.read_core_register('xpsr')
if xpsr & self.XPSR_THUMB == 0:
self.write_core_register('xpsr', xpsr | self.XPSR_THUMB)
# Restore to original state.
self.clear_reset_catch(reset_type)
def get_state(self):
dhcsr = self.read_memory(CortexM.DHCSR)
if dhcsr & CortexM.S_RESET_ST:
# Reset is a special case because the bit is sticky and really means
# "core was reset since last read of DHCSR". We have to re-read the
# DHCSR, check if S_RESET_ST is still set and make sure no instructions
# were executed by checking S_RETIRE_ST.
newDhcsr = self.read_memory(CortexM.DHCSR)
if (newDhcsr & CortexM.S_RESET_ST) and not (newDhcsr & CortexM.S_RETIRE_ST):
return Target.State.RESET
if dhcsr & CortexM.S_LOCKUP:
return Target.State.LOCKUP
elif dhcsr & CortexM.S_SLEEP:
return Target.State.SLEEPING
elif dhcsr & CortexM.S_HALT:
return Target.State.HALTED
else:
return Target.State.RUNNING
def get_security_state(self):
"""! @brief Returns the current security state of the processor.
@return @ref pyocd.core.target.Target.SecurityState "Target.SecurityState" enumerator. For
v6-M and v7-M cores, SecurityState.NONSECURE is always returned.
"""
return Target.SecurityState.NONSECURE
@property
def run_token(self):
return self._run_token
def is_running(self):
return self.get_state() == Target.State.RUNNING
def is_halted(self):
return self.get_state() == Target.State.HALTED
def resume(self):
"""! @brief Resume execution of the core.
"""
if self.get_state() != Target.State.HALTED:
LOG.debug('cannot resume: target not halted')
return
LOG.debug("resuming core %d", self.core_number)
self.session.notify(Target.Event.PRE_RUN, self, Target.RunType.RESUME)
self._run_token += 1
self.clear_debug_cause_bits()
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN)
self.flush()
self.session.notify(Target.Event.POST_RUN, self, Target.RunType.RESUME)
def find_breakpoint(self, addr):
return self.bp_manager.find_breakpoint(addr)
def check_reg_list(self, reg_list):
"""! @brief Sanity check register values and raise helpful errors."""
for reg in reg_list:
if reg not in self.core_registers.by_index:
# Invalid register, try to give useful error. An invalid name will already
# have raised a KeyError above.
info = CortexMCoreRegisterInfo.get(reg)
if info.is_fpu_register and (not self.has_fpu):
raise KeyError("attempt to read FPU register %s without FPU", info.name)
else:
raise KeyError("register %s not available in this CPU", info.name)
def read_core_register(self, reg):
"""! @brief Read one core register.
The core must be halted or reads will fail.
@param self The core.
@param reg Either the register's name in lowercase or an integer register index.
@return The current value of the register. Most core registers return an integer value,
while the floating point single and double precision register return a float value.
@exception KeyError Invalid or unsupported register was requested.
@exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to
read the register.
"""
reg_info = CortexMCoreRegisterInfo.get(reg)
regValue = self.read_core_register_raw(reg_info.index)
return reg_info.from_raw(regValue)
def read_core_register_raw(self, reg):
"""! @brief Read a core register without type conversion.
The core must be halted or reads will fail.
@param self The core.
@param reg Either the register's name in lowercase or an integer register index.
@return The current integer value of the register. Even float register values are returned
as integers (thus the "raw").
@exception KeyError Invalid or unsupported register was requested.
@exception @ref pyocd.core.exceptions.CoreRegisterAccessError "CoreRegisterAccessError" Failed to
read the register.
"""
vals = self.read_core_registers_raw([reg])
| |
python index +1
# return -1 for invalid:
if model_list is None:
return -1
# return 1 if only one choice:
if len(model_list) == 1:
return 0 + 1
# model list is assumed to be in order simple -> complex.
# philosophy: prefer simpler models.
aic = vget_aic(model_list, error=np.nan)
# if all nans, then return -1 (invalid)
if np.all(np.isnan(aic)):
return -1
# print(np.array(aic)-aic[0])
# now we have different ways of choosing based on if 2 or 3 models:
# TODO: generalize to more than 3. Should be easy enough, given the
# explanation of the algorithm in the docstring.
if len(model_list) == 2:
if (aic[1] - aic[0]) < d_aic:
return (
1 + 1
) # these +1's are for translation to human interaction indexing....
else:
return 0 + 1
if len(model_list) == 3:
if (aic[1] - aic[0]) < d_aic:
# True, this means 2 gaussians better than 1.
# Eliminates id 0 as option and do more tests:
if (aic[2] - aic[1]) < d_aic:
# True, this means 3 gaussians better than 2. choose this.
return 2 + 1
else:
return 1 + 1
else:
# False, this means 2 gaussians not better than 1.
# Eliminates id 1 as option and do more tests:
if (aic[2] - aic[0]) < d_aic:
# True, this means 3 gaussians better than 1. choose this.
return 2 + 1
else:
return 0 + 1
# safest thing to return is 0 i guess?
return 0 + 1
def choose_model_aic(model_list, d_aic=-150):
"""Broadcast :func:`choose_model_aic_single` over array.
Parameters
----------
model_list : array-like, containing :class:`lmfit.model.ModelResult`
Array representing spatial dimensions and the last dimension contains
the model result for different models fitted to that spaxel. Works also
for simply a list of model results for one pixel.
d_aic : float, optional
The change in fit aic (Akaike Information Criterion) indicating
a significantly better fit, by default -150.
Returns
-------
array of shape model_list.shape[:-1] containing int, or int
Spatial array containing the chosen model number, starting with 1.
invalid entries are given the value -1.
See Also
--------
:func:`choose_model_aic_single` : A detailed discussion of this function.
"""
# assume the first dimensions of model_list are spatial and the last is
# the different models.
# Handle a single pixel:
model_list = np.array(model_list)
shape = model_list.shape
if len(shape) == 1:
single = choose_model_aic_single(model_list, d_aic=d_aic)
return single
# if we have passed that block, we know we have an array of size shape to loop over.
# create output
output = np.empty(shape[:-1], dtype=int)
for index in np.ndindex(*shape[:-1]):
output[index] = choose_model_aic_single(model_list[index], d_aic=d_aic)
return output
def marginal_fits(fit_list, choices, flux=0.25, dmu=0.5):
"""Determine which fits should be inspected by hand.
We noticed at times that when fitting multiple gaussians, there was often
a case of an "embedded" gaussian. This is when a small flux narrow
gaussian was fit at the same center wavelength as the highest flux
gaussian. This function is intended to identify those cases and ask the
user to verify that this is actually the desired fit.
This function analyzes the selected model (provided by the combination of
`fit_list` and `choices`), and determines if user inspection is needed based
on the relative characteristics of the gaussians. This procedure depends on
the analyzed components having parameter names ending in "flux" and "center",
and was originally programmed to analyze a multi-gaussian model.
Note that ("amplitude" is used as a fallback for "flux",
because lmfit's gaussian use this name to denote integrated flux).
The "main" gaussian is selected as the model
component with highest flux, lets call this main gaussian `g0`.
For the other components `g#`, we compute `g#_flux/g0_flux` and
`g#_center` - `g0_center`. If any component has both the following:
* `g#_flux/g0_flux < flux`
* `g#_center - g0_center < dmu`
then that fit will be flagged for examination.
Parameters
----------
fit_list : array-like of :class:`lmfit.model.ModelResult`
This assumes a spatial array (n,m) of ModelResults, with an outer dimension
varying in the model used for that spaxel. e.g. shape = (3,n,m) for 3
different models.
choices : array-like of shape `fit_list[0]` containing int
This will be used to select which model for a given spaxel will be
analyzed in this function. For example, if there are 3 models, the value
for choices must be 1,2,or 3 (or negative to indicate invalid).
flux : float, optional
The gaussian flux ratio to main gaussian component indicating that
this should be inspected by hand, by default 0.25
dmu : float, optional
dmu in my head meant delta mu, the change in the x center between a
gaussian component and the main gaussian, by default 0.5.
Returns
-------
numpy array of boolean
Array of same shape as choices, where True means the user should inspect
this spaxel's fit, and False means this spaxel should be okay with the
automatic guessing.
"""
# returns boolean array of shape choices.
# True indicates you need to manually look at them.
# Python starts counting at 0 (0-based indexing.). choices starts counting at 1.
# subtract 1 from choices to convert between these 2 indexing regimes.
chosen_models = np.choose(choices - 1, fit_list, mode="clip")
output = np.empty_like(choices, dtype=bool)
# get the gaussian component comparison for the chosen models.
# 2d index iteration.
for index, modelresult in np.ndenumerate(chosen_models):
if (
modelresult is None
): # this means pixel was not fit, because it didn't pass snr test.
# therefore user does not need to check.
output[index] = False
continue
# if chosen model fit has not succeeded, then user checks.
if not modelresult.success:
output[index] = True
continue # continues to next iteration in the for loop.
# if model is a single gaussian, user does not need to check.
if get_ngaussians(modelresult) == 1:
output[index] = False
continue
# more than 1 gaussian component:
# do tests based on flux and dmu parameters.
# array of [g_flux/g0_flux, g_center - g0_center]
components = get_gcomponent_comparison(modelresult)
# test if the conditions are met for any component.
# if component[0] < flux AND component[1] < dmu, then user must decide.
# (set to True here.)
user_decides = False
for component in components:
if (component[0] < flux) and (np.abs(component[1]) < dmu):
user_decides = True
break # stop the inner loop because we know the answer already.
output[index] = user_decides
return output
def get_gcomponent_comparison(fit):
"""Determine component comparison to this highest flux gaussian.
This function finds the highest flux gaussian (we will name it g0),
and returns a list for the other components containing for each entry:
[g#_flux/g0_flux, g#_center - g0_center].
Parameters
----------
fit : :class:`lmfit.model.ModelResult`
The ModelResult to analyze the components for.
Returns
-------
list of [float,float]
A list containing the list [g#_flux/g0_flux, g#_center - g0_center]
for each component g# that is not g0.
"""
prefixes = [comp.prefix for comp in fit.components if "gaussian" in comp._name]
if len(prefixes) == 1: # means just one gaussian component
return []
# initialize lists for loop
values = []
centers = []
for pre in prefixes:
# values is the value of the flux parameter, and if the flux parameter
# doesn't exist, it falls back on trying to find the value of the
# amplitude parameter.
values += [
fit.params.get(pre + "flux", fit.params.get(pre + "amplitude")).value
]
centers += [fit.params[pre + "center"].value]
# ID the index of the maximum flux.
maxval_idx = np.argmax(values)
# convert to numpy array for easier math.
values = np.array(values)
centers = np.array(centers)
# Column stack allows us to retrieve, e.g. output[0] = [flux/flux0, center-center0]
# note that inside here, we remove the maximum-flux gaussian.
output = np.column_stack(
[
np.delete(values / values[maxval_idx], maxval_idx),
np.delete(centers - centers[maxval_idx], maxval_idx),
]
)
return output
def get_ngaussians(fit):
"""Determine the number of gaussians in a :class:`lmfit.model.Model`.
Parameters
----------
fit : :class:`lmfit.model.Model` or :class:`lmfit.model.ModelResult`
The model to analyze.
Returns
-------
int
The number of components whose name contains "gaussian".
"""
return | |
self.invertImage)
_count = 0
for i in range(0, len(self.logic.imagePoints)):
_count = _count + len(self.logic.imagePoints[i])
self.labelPointsCollected.text = _count
elif self.intrinsicArucoButton.checked:
ret = self.logic.findAruco(im, self.invertImage)
_count = 0
for i in range(0, len(self.logic.arucoCorners)):
_count = _count + len(self.logic.arucoCorners[i])
self.labelPointsCollected.text = _count
elif self.intrinsicCharucoButton.checked:
ret = self.logic.findCharuco(im, self.invertImage)
_count = 0
for i in range(0, len(self.logic.charucoCorners)):
_count = _count + len(self.logic.charucoCorners[i])
self.labelPointsCollected.text = _count
else:
pass
if ret:
self.labelResult.text = "Success (" + str(self.logic.countIntrinsics()) + ")"
else:
self.labelResult.text = "Failure."
def onIntrinsicModeChanged(self):
if self.intrinsicCheckerboardButton.checked:
self.checkerboardContainer.enabled = True
self.squareSizeDoubleSpinBox.enabled = True
self.flagsContainer.enabled = True
self.checkerboardFlags.enabled = True
self.circleGridFlags.enabled = False
self.arucoDictContainer.enabled = False
self.arucoContainer.enabled = False
self.charucoContainer.enabled = False
self.logic.calculateObjectPattern(self.rowsSpinBox.value, self.columnsSpinBox.value, 'checkerboard', self.squareSizeDoubleSpinBox.value, 0)
elif self.intrinsicCircleGridButton.checked:
self.checkerboardContainer.enabled = True
self.squareSizeDoubleSpinBox.enabled = False
self.flagsContainer.enabled = True
self.checkerboardFlags.enabled = False
self.circleGridFlags.enabled = True
self.arucoDictContainer.enabled = False
self.arucoContainer.enabled = False
self.charucoContainer.enabled = False
self.logic.calculateObjectPattern(self.rowsSpinBox.value, self.columnsSpinBox.value, 'circlegrid', self.squareSizeDoubleSpinBox.value, 0)
elif self.intrinsicArucoButton.checked:
self.checkerboardContainer.enabled = True
self.squareSizeDoubleSpinBox.enabled = False
self.flagsContainer.enabled = False
self.checkerboardFlags.enabled = False
self.circleGridFlags.enabled = False
self.arucoDictContainer.enabled = True
self.arucoContainer.enabled = True
self.charucoContainer.enabled = False
self.logic.calculateObjectPattern(self.rowsSpinBox.value, self.columnsSpinBox.value, 'aruco', self.arucoMarkerSizeSpinBox.value, self.arucoMarkerSeparationSpinBox.value)
elif self.intrinsicCharucoButton.checked:
self.checkerboardContainer.enabled = True
self.squareSizeDoubleSpinBox.enabled = False
self.flagsContainer.enabled = False
self.checkerboardFlags.enabled = False
self.circleGridFlags.enabled = False
self.arucoDictContainer.enabled = True
self.arucoContainer.enabled = False
self.charucoContainer.enabled = True
self.logic.calculateObjectPattern(self.rowsSpinBox.value, self.columnsSpinBox.value, 'charuco', self.charucoSquareSizeSpinBox.value, self.charucoMarkerSizeSpinBox.value)
else:
pass
def onStylusTipTransformSelected(self):
if self.stylusTipTransformObserverTag is not None:
self.stylusTipTransformNode.RemoveObserver(self.stylusTipTransformObserverTag)
self.stylusTipTransformObserverTag = None
self.stylusTipTransformNode = self.stylusTipTransformSelector.currentNode()
if self.stylusTipTransformNode is not None:
self.stylusTipTransformObserverTag = self.stylusTipTransformNode.AddObserver(slicer.vtkMRMLTransformNode.TransformModifiedEvent, self.onStylusTipTransformModified)
self.updateUI()
def onCalibrateButtonClicked(self):
done, error, mtx, dist = self.logic.calibratePinholeCamera()
if done:
self.videoCameraIntrinWidget.GetCurrentNode().SetAndObserveIntrinsicMatrix(mtx)
self.videoCameraIntrinWidget.GetCurrentNode().SetNumberOfDistortionCoefficients(dist.GetNumberOfValues())
for i in range(0, dist.GetNumberOfValues()):
self.videoCameraIntrinWidget.GetCurrentNode().SetDistortionCoefficientValue(i, dist.GetValue(i))
self.videoCameraIntrinWidget.GetCurrentNode().SetReprojectionError(error)
self.labelResult.text = "Calibration reprojection error: " + str(error) + "."
@vtk.calldata_type(vtk.VTK_OBJECT)
def onStylusTipTransformModified(self, caller, event):
mat = vtk.vtkMatrix4x4()
self.stylusTipTransformNode.GetMatrixTransformToWorld(mat)
if PinholeCameraCalibrationWidget.areSameVTK4x4(mat, self.IdentityMatrix):
self.stylusTipTransformStatusLabel.setPixmap(self.notOkPixmap)
self.manualButton.enabled = False
else:
self.stylusTipTransformStatusLabel.setPixmap(self.okPixmap)
self.manualButton.enabled = True
def updateUI(self):
self.capIntrinsicButton.enabled = self.imageSelector.currentNode() is not None \
and self.videoCameraSelector.currentNode() is not None
self.intrinsicsContainer.enabled = self.imageSelector.currentNode() is not None \
and self.videoCameraSelector.currentNode() is not None
self.trackerContainer.enabled = self.imageSelector.currentNode() is not None \
and self.stylusTipTransformSelector.currentNode() is not None \
and self.videoCameraSelector.currentNode() is not None \
and self.canSelectFiducials
def onProcessingModeChanged(self):
if self.manualModeButton.checked:
self.manualButton.setVisible(True)
self.semiAutoButton.setVisible(False)
self.autoButton.setVisible(False)
self.autoSettingsContainer.setVisible(False)
elif self.semiAutoModeButton.checked:
self.manualButton.setVisible(False)
self.semiAutoButton.setVisible(True)
self.autoButton.SetVisible(False)
self.autoSettingsContainer.setVisible(True)
else:
self.manualButton.setVisible(False)
self.semiAutoButton.setVisible(False)
self.autoButton.setVisible(True)
self.autoSettingsContainer.setVisible(True)
def endManualCapturing(self):
self.isManualCapturing = False
self.manualButton.setText('Capture')
# Resume playback
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(self.centerFiducialSelectionNode.GetID())
slicer.mrmlScene.RemoveNode(self.copyNode)
self.copyNode = None
# Re-enable UI
self.inputsContainer.setEnabled(True)
self.resetPtLButton.setEnabled(True)
def onManualButton(self):
if self.isManualCapturing:
# Cancel button hit
self.endManualCapturing()
slicer.modules.annotations.logic().StopPlaceMode()
return()
# Record tracker data at time of freeze and store
self.stylusTipTransformSelector.currentNode().GetMatrixTransformToWorld(self.stylusTipToPinholeCamera)
# Make a copy of the volume node (aka freeze cv capture) to allow user to play with detection parameters or click on center
self.centerFiducialSelectionNode = slicer.mrmlScene.GetNodeByID(slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().GetBackgroundVolumeID())
self.copyNode = slicer.mrmlScene.CopyNode(self.centerFiducialSelectionNode)
imData = vtk.vtkImageData()
imData.DeepCopy(self.centerFiducialSelectionNode.GetImageData())
self.copyNode.SetAndObserveImageData(imData)
self.copyNode.SetName('FrozenImage')
slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceCompositeNode().SetBackgroundVolumeID(self.copyNode.GetID())
# Initiate fiducial selection
self.markupsNode = slicer.vtkMRMLMarkupsFiducialNode()
slicer.mrmlScene.AddNode(self.markupsNode)
self.markupsNode.SetName('SphereCenter')
self.markupsLogic.SetActiveListID(self.markupsNode)
self.markupsLogic.StartPlaceMode(False)
self.pointModifiedObserverTag = self.markupsNode.AddObserver(slicer.vtkMRMLMarkupsNode.PointModifiedEvent, self.onPointModified)
# Disable input changing while capture is active
self.inputsContainer.setEnabled(False)
self.resetPtLButton.setEnabled(False)
self.isManualCapturing = True
self.manualButton.setText('Cancel')
@vtk.calldata_type(vtk.VTK_INT)
def onPointModified(self, caller, event, callData):
if callData is None:
return()
if self.markupsNode.GetNthControlPointPositionStatus(callData) == slicer.vtkMRMLMarkupsNode.PositionDefined:
self.endManualCapturing()
# Calculate point and line pair
arr = [0, 0, 0]
self.markupsNode.GetNthControlPointPosition(callData, arr)
point = np.zeros((1, 1, 2), dtype=np.float64)
point[0, 0, 0] = abs(arr[0])
point[0, 0, 1] = abs(arr[1])
# Get PinholeCamera parameters
mtx = PinholeCameraCalibrationWidget.vtk3x3ToNumpy(self.videoCameraSelector.currentNode().GetIntrinsicMatrix())
if self.videoCameraSelector.currentNode().GetNumberOfDistortionCoefficients() != 0:
dist = np.asarray(np.zeros((1, self.videoCameraSelector.currentNode().GetNumberOfDistortionCoefficients()), dtype=np.float64))
for i in range(0, self.videoCameraSelector.currentNode().GetNumberOfDistortionCoefficients()):
dist[0, i] = self.videoCameraSelector.currentNode().GetDistortionCoefficientValue(i)
else:
dist = np.asarray([], dtype=np.float64)
tip_cam = [self.stylusTipToPinholeCamera.GetElement(0, 3), self.stylusTipToPinholeCamera.GetElement(1, 3), self.stylusTipToPinholeCamera.GetElement(2, 3)]
# Origin - defined in camera, typically 0,0,0
origin_sen = np.asarray(np.zeros((3, 1), dtype=np.float64))
for i in range(0, 3):
origin_sen[i, 0] = self.videoCameraSelector.currentNode().GetCameraPlaneOffsetValue(i)
# Calculate the direction vector for the given pixel (after undistortion)
undistPoint = cv2.undistortPoints(point, mtx, dist, P=mtx)
pixel = np.vstack((undistPoint[0].transpose(), np.array([1.0], dtype=np.float64)))
# Find the inverse of the videoCamera intrinsic param matrix
# Calculate direction vector by multiplying the inverse of the intrinsic param matrix by the pixel
directionVec_sen = np.linalg.inv(mtx) * pixel / np.linalg.norm(np.linalg.inv(mtx) * pixel)
# And add it to the list!)
self.logic.addPointLinePair(tip_cam, origin_sen, directionVec_sen)
if self.developerMode:
self.rayList.append([tip_cam, origin_sen, directionVec_sen])
countString = str(self.logic.countMarkerToSensor()) + "/" + str(self.captureCountSpinBox.value) + " points captured."
if self.logic.countMarkerToSensor() >= self.captureCountSpinBox.value:
result, videoCameraToImage, string = self.calcRegAndBuildString()
if result and self.developerMode:
for combination in self.rayList:
logging.debug("x: " + str(combination[0]))
logging.debug("origin: " + str(combination[1]))
logging.debug("dir: " + str(combination[2]))
trans = vtk.vtkTransform()
trans.PostMultiply()
trans.Identity()
trans.Concatenate(self.stylusTipToPinholeCamera)
trans.Concatenate(videoCameraToImage)
posePosition = trans.GetPosition()
xPrime = posePosition[0] / posePosition[2]
yPrime = posePosition[1] / posePosition[2]
u = (mtx[0, 0] * xPrime) + mtx[0, 2]
v = (mtx[1, 1] * yPrime) + mtx[1, 2]
logging.debug("undistorted point: " + str(undistPoint[0, 0, 0]) + "," + str(undistPoint[0, 0, 1]))
logging.debug("u,v: " + str(u) + "," + str(v))
self.trackerResultsLabel.text = countString + " " + string
else:
self.trackerResultsLabel.text = countString
# Allow markups module some time to process the new markup, but then quickly delete it
# Avoids VTK errors in log
qt.QTimer.singleShot(10, self.removeMarkup)
def calcRegAndBuildString(self):
result, markerToSensor = self.logic.calculateMarkerToSensor()
string = ""
if result:
self.videoCameraSelector.currentNode().SetAndObserveMarkerToImageSensorTransform(markerToSensor)
string = "Registration complete. Error: " + str(self.logic.getErrorMarkerToSensor())
else:
string = "Registration failed."
return result, markerToSensor, string
def removeMarkup(self):
if self.markupsNode is not None:
self.markupsNode.RemoveObserver(self.pointModifiedObserverTag)
self.pointModifiedObserverTag = None
self.markupsNode.RemoveAllMarkups()
slicer.mrmlScene.RemoveNode(self.markupsNode)
self.markupsNode = None
def onSemiAutoButton(self):
pass
def onAutoButton(self):
pass
def onArucoDictChanged(self):
self.logic.changeArucoDict(self.arucoDictComboBox.currentText)
self.onIntrinsicModeChanged()
# PinholeCameraCalibrationLogic
class PinholeCameraCalibrationLogic(ScriptedLoadableModuleLogic):
def __init__(self):
self.objectPoints = []
self.imagePoints = []
# TODO logic should not have state, move these out to client (UI in this case)
self.arucoDict = None
self.arucoBoard = None
self.arucoCorners = []
self.arucoIDs = []
self.arucoCount = []
self.charucoCorners = []
self.charucoIDs = []
self.flags = 0
self.imageSize = (0,0)
self.objPatternRows = 0
self.objPatternColumns = 0
self.objSize = 0
self.subPixRadius = 5
self.objPattern = None
self.terminationCriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)
self.pointToLineRegistrationLogic = slicer.vtkSlicerPointToLineRegistrationLogic()
self.pointToLineRegistrationLogic.SetLandmarkRegistrationModeToRigidBody()
def setTerminationCriteria(self, criteria):
self.terminationCriteria = criteria
def calculateObjectPattern(self, rows, columns, type, param1, param2):
self.objPatternRows = rows
self.objPatternColumns = columns
self.objSize = param1
pattern_size = (self.objPatternColumns, self.objPatternRows)
self.objPattern = np.zeros((np.prod(pattern_size), 3), np.float32)
self.objPattern[:, :2] = np.indices(pattern_size).T.reshape(-1, 2)
self.objPattern *= param1
self.createBoard(type, param1, param2)
def createBoard(self, type, param1_mm, param2_mm):
if self.arucoDict is not None:
param1 = param1_mm * 0.001;
param2 = param2_mm * 0.001;
if type.find('charuco') != -1:
# square size, marker size
try:
self.arucoBoard = aruco.CharucoBoard_create(self.objPatternColumns, self.objPatternRows, param1, param2, self.arucoDict)
except:
pass
elif type.find('aruco') != -1:
# marker size, marker separation
try:
self.arucoBoard = aruco.GridBoard_create(self.objPatternColumns, self.objPatternRows, param1, param2, self.arucoDict)
except:
pass
def setSubPixRadius(self, radius):
self.subPixRadius = radius
def resetIntrinsic(self):
self.objectPoints = []
self.imagePoints = []
self.arucoCorners = []
self.arucoIDs = []
self.arucoCount = []
self.charucoCorners = []
self.charucoIDs = []
def setFlags(self, flags):
self.flags = flags
def findCheckerboard(self, image, invert):
try:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
except:
gray = image
self.imageSize = gray.shape[::-1]
if invert:
gray = cv2.bitwise_not(gray)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (self.objPatternColumns, self.objPatternRows), self.flags)
# If found, add object points, image points (after refining them)
if ret:
self.objectPoints.append(self.objPattern)
corners2 = cv2.cornerSubPix(gray, corners, (self.subPixRadius, self.subPixRadius), (-1, -1), self.terminationCriteria)
self.imagePoints.append(corners.reshape(-1,2))
return ret
def findCircleGrid(self, image, invert):
try:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
except:
gray = image
self.imageSize = gray.shape[::-1]
if invert:
gray = cv2.bitwise_not(gray)
ret, centers = cv2.findCirclesGrid(gray, (self.objPatternRows, self.objPatternColumns), self.flags)
if ret:
self.objectPoints.append(self.objPattern)
self.imagePoints.append(centers)
string = "Success (" + str(self.logic.countIntrinsics()) + ")"
done, result, error, mtx, dist = self.logic.calibratePinholeCamera()
if done:
self.videoCameraIntrinWidget.GetCurrentNode().SetAndObserveIntrinsicMatrix(mtx)
self.videoCameraIntrinWidget.GetCurrentNode().SetNumberOfDistortionCoefficients(dist.GetNumberOfValues())
for i in range(0, dist.GetNumberOfValues()):
self.videoCameraIntrinWidget.GetCurrentNode().SetDistortionCoefficientValue(i, dist.GetValue(i))
string += ". Calibration reprojection error: " + str(error)
self.labelResult.text = string
else:
self.labelResult.text = "Failure."
return ret
def findAruco(self, image, invert):
if self.arucoDict is None or self.arucoBoard is None:
return False
try:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
except:
gray = image
self.imageSize = gray.shape[::-1]
if invert:
gray = cv2.bitwise_not(gray)
corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, self.arucoDict)
if len(corners) > 0:
if len(self.arucoCorners) == 0:
self.arucoCorners = corners
self.arucoIDs = ids
else:
self.arucoCorners.append(corners[1])
self.arucoIDs.append(corners[2])
self.arucoCount.append(len(ids))
return len(corners)>0
def findCharuco(self, image, invert):
if self.arucoDict is None or self.arucoBoard is None:
return False
try:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
except:
gray = image
self.imageSize = gray.shape
if invert:
gray = cv2.bitwise_not(gray)
# SUB PIXEL CORNER DETECTION CRITERION
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, self.arucoDict)
res = None
| |
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by HazzaCheng on 2019-09-26
import librosa
import numpy as np
import random
import keras.backend as K
from tensorflow.python.keras import Input
from tensorflow.python.keras.engine import InputLayer
from tensorflow.python.keras.engine import InputSpec
from tensorflow.python.keras.engine import Layer
from tensorflow.python.keras.layers import Wrapper,Dense,MaxPool2D
from tensorflow import keras
import numpy.linalg as nl
from scipy import interpolate
from scipy.spatial.distance import pdist, cdist, squareform
class LayerNormalization(keras.layers.Layer):
def __init__(self,
center=True,
scale=True,
epsilon=None,
gamma_initializer='ones',
beta_initializer='zeros',
gamma_regularizer=None,
beta_regularizer=None,
gamma_constraint=None,
beta_constraint=None,
**kwargs):
"""Layer normalization layer
See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf)
:param center: Add an offset parameter if it is True.
:param scale: Add a scale parameter if it is True.
:param epsilon: Epsilon for calculating variance.
:param gamma_initializer: Initializer for the gamma weight.
:param beta_initializer: Initializer for the beta weight.
:param gamma_regularizer: Optional regularizer for the gamma weight.
:param beta_regularizer: Optional regularizer for the beta weight.
:param gamma_constraint: Optional constraint for the gamma weight.
:param beta_constraint: Optional constraint for the beta weight.
:param kwargs:
"""
super(LayerNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.center = center
self.scale = scale
if epsilon is None:
epsilon = K.epsilon() * K.epsilon()
self.epsilon = epsilon
self.gamma_initializer = keras.initializers.get(gamma_initializer)
self.beta_initializer = keras.initializers.get(beta_initializer)
self.gamma_regularizer = keras.regularizers.get(gamma_regularizer)
self.beta_regularizer = keras.regularizers.get(beta_regularizer)
self.gamma_constraint = keras.constraints.get(gamma_constraint)
self.beta_constraint = keras.constraints.get(beta_constraint)
self.gamma, self.beta = None, None
def get_config(self):
config = {
'center': self.center,
'scale': self.scale,
'epsilon': self.epsilon,
'gamma_initializer': keras.initializers.serialize(self.gamma_initializer),
'beta_initializer': keras.initializers.serialize(self.beta_initializer),
'gamma_regularizer': keras.regularizers.serialize(self.gamma_regularizer),
'beta_regularizer': keras.regularizers.serialize(self.beta_regularizer),
'gamma_constraint': keras.constraints.serialize(self.gamma_constraint),
'beta_constraint': keras.constraints.serialize(self.beta_constraint),
}
base_config = super(LayerNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_output_shape(self, input_shape):
return input_shape
def compute_mask(self, inputs, input_mask=None):
return input_mask
def build(self, input_shape):
shape = input_shape[-1:]
if self.scale:
self.gamma = self.add_weight(
shape=shape,
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint,
name='gamma',
)
if self.center:
self.beta = self.add_weight(
shape=shape,
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint,
name='beta',
)
super(LayerNormalization, self).build(input_shape)
def call(self, inputs, training=None):
mean = K.mean(inputs, axis=-1, keepdims=True)
variance = K.mean(K.square(inputs - mean), axis=-1, keepdims=True)
std = K.sqrt(variance + self.epsilon)
outputs = (inputs - mean) / std
if self.scale:
outputs *= self.gamma
if self.center:
outputs += self.beta
return outputs
#x = DropConnect(Dense(64, activation='relu'), prob=0.5)(x)
class DropConnectDense(Dense):
def __init__(self, *args, **kwargs):
self.prob = kwargs.pop('prob', 0.5)
if 0. < self.prob < 1.:
self.uses_learning_phase = True
super(DropConnectDense, self).__init__(*args, **kwargs)
def call(self, x, mask=None):
if 0. < self.prob < 1.:
self.kernel = K.in_train_phase(K.dropout(self.kernel, self.prob), self.kernel)
self.b = K.in_train_phase(K.dropout(self.b, self.prob), self.b)
# Same as original
output = K.dot(x, self.W)
if self.bias:
output += self.b
return self.activation(output)
class DropConnect(Wrapper):
def __init__(self, layer, prob=1., **kwargs):
self.prob = prob
self.layer = layer
super(DropConnect, self).__init__(layer, **kwargs)
if 0. < self.prob < 1.:
self.uses_learning_phase = True
def build(self, input_shape):
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
super(DropConnect, self).build()
def compute_output_shape(self, input_shape):
return self.layer.compute_output_shape(input_shape)
def call(self, x):
if 0. < self.prob < 1.:
self.layer.kernel = K.in_train_phase(K.dropout(self.layer.kernel, self.prob) * (1-self.prob), self.layer.kernel)
self.layer.bias = K.in_train_phase(K.dropout(self.layer.bias, self.prob) * (1-self.prob), self.layer.bias)
return self.layer.call(x)
#DropBlock2D(block_size=5, keep_prob=0.8, name='Dropout-1')
class DropBlock2D(Layer):
"""See: https://arxiv.org/pdf/1810.12890.pdf"""
def __init__(self,
block_size,
keep_prob,
sync_channels=False,
data_format=None,
**kwargs):
"""Initialize the layer.
:param block_size: Size for each mask block.
:param keep_prob: Probability of keeping the original feature.
:param sync_channels: Whether to use the same dropout for all channels.
:param data_format: 'channels_first' or 'channels_last' (default).
:param kwargs: Arguments for parent class.
"""
super(DropBlock2D, self).__init__(**kwargs)
self.block_size = block_size
self.keep_prob = keep_prob
self.sync_channels = sync_channels
self.data_format = K.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=4)
self.supports_masking = True
def get_config(self):
config = {'block_size': self.block_size,
'keep_prob': self.keep_prob,
'sync_channels': self.sync_channels,
'data_format': self.data_format}
base_config = super(DropBlock2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def compute_mask(self, inputs, mask=None):
return mask
def compute_output_shape(self, input_shape):
return input_shape
def _get_gamma(self, height, width):
"""Get the number of activation units to drop"""
height, width = K.cast(height, K.floatx()), K.cast(width, K.floatx())
block_size = K.constant(self.block_size, dtype=K.floatx())
return ((1.0 - self.keep_prob) / (block_size ** 2)) *\
(height * width / ((height - block_size + 1.0) * (width - block_size + 1.0)))
def _compute_valid_seed_region(self, height, width):
positions = K.concatenate([
K.expand_dims(K.tile(K.expand_dims(K.arange(height), axis=1), [1, width]), axis=-1),
K.expand_dims(K.tile(K.expand_dims(K.arange(width), axis=0), [height, 1]), axis=-1),
], axis=-1)
half_block_size = self.block_size // 2
valid_seed_region = K.switch(
K.all(
K.stack(
[
positions[:, :, 0] >= half_block_size,
positions[:, :, 1] >= half_block_size,
positions[:, :, 0] < height - half_block_size,
positions[:, :, 1] < width - half_block_size,
],
axis=-1,
),
axis=-1,
),
K.ones((height, width)),
K.zeros((height, width)),
)
return K.expand_dims(K.expand_dims(valid_seed_region, axis=0), axis=-1)
def _compute_drop_mask(self, shape):
height, width = shape[1], shape[2]
mask = K.random_binomial(shape, p=self._get_gamma(height, width))
mask *= self._compute_valid_seed_region(height, width)
mask = MaxPool2D(
pool_size=(self.block_size, self.block_size),
padding='same',
strides=1,
data_format='channels_last',
)(mask)
return 1.0 - mask
def call(self, inputs, training=None):
def dropped_inputs():
outputs = inputs
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 2, 3, 1])
shape = K.shape(outputs)
if self.sync_channels:
mask = self._compute_drop_mask([shape[0], shape[1], shape[2], 1])
else:
mask = self._compute_drop_mask(shape)
outputs = outputs * mask *\
(K.cast(K.prod(shape), dtype=K.floatx()) / K.sum(mask))
if self.data_format == 'channels_first':
outputs = K.permute_dimensions(outputs, [0, 3, 1, 2])
return outputs
return K.in_train_phase(dropped_inputs, inputs, training=training)
def mix_up(data, one_hot_labels, alpha=1):
np.random.seed(2333)
batch_size = len(data)
weights = np.random.beta(alpha, alpha, batch_size)
index = np.random.permutation(batch_size)
x1, x2 = data, data[index]
x = np.array([x1[i] * weights [i] + x2[i] * (1 - weights[i]) for i in range(len(weights))])
y1 = np.array(one_hot_labels).astype(np.float)
y2 = np.array(np.array(one_hot_labels)[index]).astype(np.float)
y = np.array([y1[i] * weights[i] + y2[i] * (1 - weights[i]) for i in range(len(weights))])
return x, y
def noise(data):
"""
Adding White Noise.
"""
# you can take any distribution from
# https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.random.html
# more noise reduce the value to 0.5
noise_amp = 0.05 * np.random.uniform() * np.amax(data)
data = data.astype('float64') + noise_amp * \
np.random.normal()
return data
def shift(data):
"""
Random Shifting.
"""
s_range = int(np.random.uniform(low=-5, high=5) * 1000) # default at 500
return np.roll(data, s_range)
def stretch(data, rate=0.8):
"""
Streching the Sound. Note that this expands the dataset slightly
"""
# keep the same length, drop some
data = librosa.effects.time_stretch(data, rate)[:len(data)]
return data
def pitch(data, sr=16000):
"""
Pitch Tuning.
"""
bins_per_octave = 12
pitch_pm = 2
pitch_change = pitch_pm * 2 * (np.random.uniform())
data = librosa.effects.pitch_shift(data.astype('float64'),
sr,
n_steps=pitch_change,
bins_per_octave=bins_per_octave)
return data
def dyn_change(data):
"""
Random Value Change.
"""
dyn_change = np.random.uniform(
low=-0.5, high=7) # default low = 1.5, high = 3
return data * dyn_change
def speed_npitch(data):
"""
speed and Pitch Tuning.
"""
# you can change low and high here
length_change = np.random.uniform(low=0.8, high=1)
speed_fac = 1.2 / length_change # try changing 1.0 to 2.0 ... =D
tmp = np.interp(
np.arange(
0, len(data), speed_fac), np.arange(
0, len(data)), data)
minlen = min(data.shape[0], tmp.shape[0])
data *= 0
data[0:minlen] = tmp[0:minlen]
return data
def makeT(cp):
# cp: [K x 2] control points
# T: [(K+3) x (K+3)]
K = cp.shape[0]
T = np.zeros((K+3, K+3))
T[:K, 0] = 1
T[:K, 1:3] = cp
T[K, 3:] = 1
T[K+1:, 3:] = cp.T
R = squareform(pdist(cp, metric='euclidean'))
R = R * R
R[R == 0] = 1 # a trick to make R ln(R) 0
R = R * np.log(R)
np.fill_diagonal(R, 0)
T[:K, 3:] = R
return T
def liftPts(p, cp):
# p: [N x 2], input points
# cp: [K x 2], control points
# pLift: [N x (3+K)], lifted input points
N, K = p.shape[0], cp.shape[0]
pLift = np.zeros((N, K+3))
pLift[:,0] = 1
pLift[:,1:3] = p
R = cdist(p, cp, 'euclidean')
R = R * R
R[R == 0] = 1
R = R * np.log(R)
pLift[:,3:] = R
return pLift
def spec_augment(spec):
W=40
T=30
F=13
mt=2
mf=2
# Nframe : number of spectrum frame
Nframe = spec.shape[1]
# Nbin : number of spectrum freq bin
Nbin = spec.shape[0]
# check input length
if Nframe < W*2+1:
W = int(Nframe/4)
if Nframe < T*2+1:
T = int(Nframe/mt)
if Nbin < F*2+1:
F = int(Nbin/mf)
# warping parameter initialize
w = random.randint(-W,W)
center = random.randint(W,Nframe-W)
src = np.asarray([[ float(center), 1], [ float(center), 0], [ float(center), 2], [0, 0], [0, 1], [0, 2], [Nframe-1, 0], [Nframe-1, 1], [Nframe-1, 2]])
dst = np.asarray([[ float(center+w), 1], [ float(center+w), 0], [ float(center+w), 2], [0, 0], [0, 1], [0, 2], [Nframe-1, 0], [Nframe-1, 1], [Nframe-1, 2]])
#print(src,dst)
# source control points
xs, ys = src[:,0],src[:,1]
cps = np.vstack([xs, ys]).T
# target control points
xt, yt = dst[:,0],dst[:,1]
# construct TT
TT = makeT(cps)
# solve cx, cy (coefficients for x and y)
xtAug = np.concatenate([xt, np.zeros(3)])
ytAug = np.concatenate([yt, | |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 11:33:55 2020
@author: User
"""
import sys
from pathlib import Path
import functools
# import collections
from collections import Counter
import pickle
# import types
# import post_helper
# import plotting
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.stats import linregress, zscore
import pandas as pd
import numpy as np
import datetime as dt
import pandas as pd
mpl.style.use("seaborn")
mpl.rcParams["figure.dpi"] = 100
# from sklearn.cluster import KMeans
# print ('Name prepare input:', __name__ )
if __name__ == "__main__":
# print(f'Package: {__package__}, File: {__file__}')
# FH_path = Path(__file__).parent.parent.parent.joinpath('FileHelper')
# sys.path.append(str(FH_path))
# sys.path.append(str(Path(__file__).parent.parent.joinpath('indexer')))
sys.path.append(str(Path(__file__).parent.parent.parent))
# sys.path.append("..")
# print(sys.path)
# import FileHelper
from FileHelper.PostChar import Characterization_TypeSetting, SampleCodesChar
from FileHelper.PostPlotting import *
from FileHelper.FindSampleID import GetSampleID
from FileHelper.FindFolders import FindExpFolder
# from FileHelper.FileFunctions.FileOperations import PDreadXLorCSV
from collect_load import Load_from_Indexes, CollectLoadPars
# from FileHelper.FindExpFolder import FindExpFolder
from plotting import eisplot
from prep_postchar import postChar
import EIS_export
elif "prepare_input" in __name__:
pass
# import RunEC_classifier
# from FileHelper.FindSampleID import FindSampleID
import logging
_logger = logging.getLogger(__name__)
# from FileHelper.PostChar import SampleSelection, Characterization_TypeSetting
def mkfolder(folder):
folder.mkdir(exist_ok=True, parents=True)
return folder
def filter_cols(_df, n):
if any(["startswith" in i for i in n]):
_lst = [i for i in _df.columns if i.startswith(n[-1])]
else:
_lst = [i for i in _df.columns if n[-1] in i]
return _lst
OriginColors = Characterization_TypeSetting.OriginColorList()
Pfolder = FindExpFolder().TopDir.joinpath(
Path("Preparation-Thesis/SiO2_projects/SiO2_Me_ECdepth+LC")
)
plotsfolder = mkfolder(Pfolder.joinpath("correlation_plots"))
EC_folder = Pfolder.joinpath("EC_data")
EC_index, SampleCodes = Load_from_Indexes.get_EC_index()
print("finished")
# SampleCodesChar().load
def multiIndex_pivot(df, index=None, columns=None, values=None):
# https://github.com/pandas-dev/pandas/issues/23955
output_df = df.copy(deep=True)
if index is None:
names = list(output_df.index.names)
output_df = output_df.reset_index()
else:
names = index
output_df = output_df.assign(
tuples_index=[tuple(i) for i in output_df[names].values]
)
if isinstance(columns, list):
output_df = output_df.assign(
tuples_columns=[tuple(i) for i in output_df[columns].values]
) # hashable
output_df = output_df.pivot(
index="tuples_index", columns="tuples_columns", values=values
)
output_df.columns = pd.MultiIndex.from_tuples(
output_df.columns, names=columns
) # reduced
else:
output_df = output_df.pivot(
index="tuples_index", columns=columns, values=values
)
output_df.index = pd.MultiIndex.from_tuples(output_df.index, names=names)
return output_df
def get_float_cols(df):
return [key for key, val in df.dtypes.to_dict().items() if "float64" in str(val)]
def cm2inch(value):
return value / 2.54
# class PorphSamples():
# def __init__(self):
# self.template = PorphSamples.template()
def decorator(func):
@functools.wraps(func)
def wrapper_decorator(*args, **kwargs):
# Do something before
value = func(*args, **kwargs)
# Do something after
return value
return wrapper_decorator
def read_load_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
DF_diff = pd.read_pickle(_pklpath)
DF_diff.columns
return DF_diff
except Exception as e:
print("reading error", e)
return pd.DataFrame()
else:
print("read error not existing", _pklpath)
return pd.DataFrame()
def save_DF_pkl(_pklstem, _DF):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
_DF.to_pickle(_pklpath)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def load_dict_pkl(_pklstem):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
if _pklpath.exists():
try:
print("pkl reloading:", _pklpath)
with open(_pklpath, "rb") as file:
_dict = pickle.load(file)
return _dict
except Exception as e:
print("reading error", e)
return {}
else:
print("read error not existing", _pklpath)
return {}
def save_dict_pkl(_pklstem, _dict):
_pklpath = EC_PorphSiO2.folder.joinpath(_pklstem).with_suffix(".pkl")
try:
print("pkl saving to:", _pklpath)
with open(_pklpath, "wb") as file:
pickle.dump(_dict, file)
except Exception as e:
print("pkl saving error", e, _pklpath)
return _pklpath
def PorphSiO2_template():
# 'SerieIDs' : ('Porph_SiO2')*5,
Series_Porph_SiO2 = {
"SampleID": ("JOS1", "JOS2", "JOS3", "JOS4", "JOS5"),
"Metal": ("Fe", "Co", "MnTPP", "FeTPP", "H2"),
"color": (2, 4, 6, 15, 3),
}
Porphyrins = {
"TMPP": {"Formula": "C48H38N4O4", "MW": 734.8382},
"TMPP-Fe(III)Cl": {"Formula": "C48H36ClFeN4O4", "MW": 824.1204},
"TMPP-Co(II)": {"Formula": "C48H36CoN4O4", "MW": 791.7556},
"TTP-Mn(III)Cl": {"Formula": "C44H28ClMnN4", "MW": 703.1098},
"TPP-Fe(III)Cl": {"Formula": "C44H28ClFeN4", "MW": 704.0168},
"TPP": {"Formula": "C44H30N4", "MW": 614.7346},
}
Porph_template = pd.DataFrame(Series_Porph_SiO2)
return Porph_template
def EC_types_grp():
# KL ['ORR_E_AppV_RHE', 'ORR_KL_E_AppV_RHE','Electrode']
_basic_EC_cond = ["postAST_post", "Sweep_Type", "pH", "Loading_cm2"]
_extra_EC_cond = {
"N2CV": [],
"N2": [],
"ORR": ["RPM_DAC_uni"],
"KL": ["Electrode", "ORR_E_AppV_RHE"],
"EIS": ["E_RHE"],
"HER": ["HER_RPM_post"],
"OER": [],
}
_out = {key: _basic_EC_cond + val for key, val in _extra_EC_cond.items()}
return _out
def save_EC_index_PorphSiO2(EC_index, EC_folder):
_porph_index = EC_index.loc[EC_index.SampleID.isin(PorphSiO2_template().SampleID)]
_porph_index.to_excel(EC_folder.joinpath("EC_index_PorphSiO2.xlsx"))
# save_EC_index_PorphSiO2(EC_index, EC_folder)
class EC_PorphSiO2:
folder = FindExpFolder("PorphSiO2").compare
Porph_template = PorphSiO2_template()
# globals EC_index
# ['Model(Singh2015_RQRQ)', 'Model(Singh2015_RQRQR)', 'Model(Bandarenka_2011_RQRQR)',
# 'Model(Singh2015_RQRWR)', 'Model(Randles_RQRQ)', 'Model(Singh2015_R3RQ)']
# model_select = EC_PorphSiO2.EIS_models[1]
# self = EC_PorphSiO2()
def __init__(self):
# self.index, self.AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
self.select_EC_ASTexps_from_ECindex()
# self.pars = EC_PorphSiO2.mergedEC()
# self.par_export = EC_OHC.to_excel(self.folder.joinpath('EC_ORR_HPRR.xlsx'))
def select_EC_ASTexps_from_ECindex(self):
EC_idx_PorphSiO2_samples = EC_index.loc[
EC_index.SampleID.isin(self.Porph_template.SampleID.unique())
]
# pd.read_excel(list(EC_folder.rglob('*EC_index*'))[0])
EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(np.datetime_as_string(np.datetime64(i, "D")))
for i in EC_idx_PorphSiO2_samples.PAR_date.to_numpy()
]
}
)
self.EC_idx_PorphSiO2_samples = EC_idx_PorphSiO2_samples
self.get_AST_days()
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
EC_idx_PorphSiO2_AST = EC_idx_PorphSiO2_samples.loc[
EC_idx_PorphSiO2_samples.PAR_date_day_dt.isin(
[i for a in self.AST_days.to_numpy() for i in a]
)
]
# AST_days = EC_PorphSiO2.get_AST_days()
# EC_idx_PorphSiO2_AST.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
self.EC_idx_PorphSiO2 = EC_idx_PorphSiO2_AST
# if LC_idx_fp.exists():
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
def get_AST_days(self):
gr_idx = self.EC_idx_PorphSiO2_samples.groupby("PAR_date_day_dt")
AST_days = []
for n, gr in gr_idx:
# n,gr
exps = gr.PAR_exp.unique()
# gr.PAR_date_day.unique()[0]
if any(["AST" in i for i in exps]):
# print(n,exps)
# AST_days.append(n)
if n + dt.timedelta(1) in gr_idx.groups.keys():
_post = gr_idx.get_group(n + dt.timedelta(1))
# print(n + dt.timedelta(1), gr_idx.get_group(n + dt.timedelta(1)))
AST_days.append((n, n + dt.timedelta(1)))
else:
AST_days.append((n, n))
print(n + dt.timedelta(1), "grp missing")
# (AST_days[-1][0], AST_days[0][1])
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,25)))
# AST_days.append((dt.date(2019,5,6), dt.date(2019,1,26)))
_extra_AST_days = [
(dt.date(2019, 5, 6), dt.date(2019, 1, 25)),
(dt.date(2019, 5, 6), dt.date(2019, 1, 26)),
]
AST_days += _extra_AST_days
AST_days = pd.DataFrame(
AST_days, columns=["PAR_date_day_dt_pre", "PAR_date_day_dt_post"]
)
AST_days = AST_days.assign(
**{
"PAR_date_day_dt_diff": AST_days.PAR_date_day_dt_pre
- AST_days.PAR_date_day_dt_post
}
)
self.AST_days = AST_days
# def select_ECexps(EC_folder):
# LC_idx_fp = list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx')
# AST_days = EC_PorphSiO2.get_AST_days()
# if LC_idx_fp.exists():
# LC_fls = EC_PorphSiO2.EC_idx_PorphSiO2.loc[EC_PorphSiO2.EC_idx_PorphSiO2.PAR_date_day_dt.isin([i for a in AST_days.to_numpy() for i in a])]
# LC_fls.to_excel(list(EC_folder.rglob('*EC_index*'))[0].parent.joinpath('LC_index.xlsx'))
# else:
# try:
# LC_fls = pd.read_excel(LC_idx_fp,index_col=[0])
# except Exception as e:
# print(f'Excel load fail: {e}\n,file: {LC_idx_fp}')
# LC_fls = pd.DataFrame()
# return LC_fls, AST_days
# def repr_index(self):
# PAR_exp_uniq = {grn : len(grp) for grn,grp in self.index.groupby("PAR_exp")}
# print(f'Len({len(self.index)},\n{PAR_exp_uniq}')
def _testing_():
tt = EC_prepare_EC_merged(reload_AST=True, reload_merged=True, reload_pars=True)
self = tt
N2CV = self.N2cv(reload=False, use_daily=True)
#%% == EC_prepare_EC_merged == testing
class EC_prepare_EC_merged:
EIS_models = EIS_export.EIS_selection.mod_select
# ['Model(EEC_Randles_RWpCPE)', 'Model(EEC_2CPE)', 'Model(EEC_2CPEpW)',
# 'Model(EEC_RQ_RQ_RW)', 'Model(EEC_RQ_RQ_RQ)', 'Model(Randles_RQRQ)']
ORR_reload = dict(reload=True, use_daily=False)
ORR_no_reload = dict(reload=False, use_daily=True)
use_daily = True
# global ParsColl
# ParsColl = ParsColl
mcols = [i for i in Load_from_Indexes.EC_label_cols if i not in ["PAR_file"]] + [
"Sweep_Type"
]
_pkl_EC_merged = "EC_merged_dict"
def __init__(self, reload_AST=False, reload_merged=False, reload_pars=True):
self.reload_AST = reload_AST
self.reload_merged = reload_merged
self.reload_pars = reload_pars
self.set_pars_collection()
self.reload_pars_kws = dict(reload=reload_pars, use_daily=self.use_daily)
self.EC_merged_dict = {}
self.load_EC_PorphSiO2()
self.load_merged_EC()
def set_pars_collection(self):
if "ParsColl" in globals().keys():
self.ParsColl = ParsColl
else:
Pars_Collection = CollectLoadPars(load_type="fast")
# globals()['Pars_Collection'] = Pars_Collection
ParsColl = Pars_Collection.pars_collection
self.ParsColl = ParsColl
def load_EC_PorphSiO2(self):
self.EC_PorphSiO2 = EC_PorphSiO2()
self.AST_days = self.EC_PorphSiO2.AST_days
self.EC_idx_PorphSiO2 = self.EC_PorphSiO2.EC_idx_PorphSiO2
def load_merged_EC(self):
if self.reload_merged:
self.reload_merged_EC()
if not self.EC_merged_dict:
_load_EC_merge = load_dict_pkl(self._pkl_EC_merged)
if _load_EC_merge:
self.EC_merged_dict = _load_EC_merge
def reload_merged_EC(self):
try:
self.load_N2CV()
self.load_ORR()
self.load_KL()
self.load_EIS()
self.load_HER()
self.add_filter_selection_of_EC_merged()
save_dict_pkl(self._pkl_EC_merged, self.EC_merged_dict)
except Exception as e:
_logger.warning(f"EC_prepare_EC_merged, reload_merged_EC failure: {e}")
def get_AST_matches(self, DF, _verbose=False):
# LC_fls, AST_days = EC_PorphSiO2.select_ECexps(EC_folder)
# DF = ORR.drop_duplicates()
# DF = N2CV.drop_duplicates()
# DF = EIS.drop_duplicates()
# DF = HER.drop_duplicates()
# DF = ttpars
if "PAR_date_day_dt" not in DF.columns:
DF = DF.assign(
**{
"PAR_date_day_dt": [
dt.date.fromisoformat(
np.datetime_as_string(np.datetime64(i, "D"))
)
for i in DF.PAR_date.to_numpy()
]
}
)
DF.PAR_date_day_dt = pd.to_datetime(DF.PAR_date_day_dt, unit="D")
# list((set(DF.columns).intersection(set(LC_fls.columns))).intersection(set(mcols) ))
# DF = pd.merge(DF,LC_fls,on=)
_compare_cols = [
i for i in ["SampleID", "pH", "Gas", "Loading_cm2"] if i in DF.columns
]
_swp_rpm = [
"Sweep_Type",
"RPM_DAC_uni" if "RPM_DAC_uni" in DF.columns else "RPM_DAC",
]
_coll = []
# AST_days_run_lst = [i for i in AST_days if len(i) == 2][-1:]
for n, r in self.AST_days.iterrows():
# if len(_dates) == 2:
# _pre,_post = _dates
# elif (len_dates) == 1:
_pre, _post = r.PAR_date_day_dt_pre, r.PAR_date_day_dt_post
_preslice = DF.loc[
(DF.PAR_date_day == _pre.strftime("%Y-%m-%d")) & (DF.postAST == "no")
]
pre = _preslice.groupby(_compare_cols)
_postslice = DF.loc[
(DF.PAR_date_day == _post.strftime("%Y-%m-%d")) & (DF.postAST != "no")
]
post = _postslice.groupby(_compare_cols)
_res = {}
_res = {
"pre_PAR_date_day_dt": _pre,
"post_PAR_date_day_dt": _post,
"AST_days_n": n,
}
# print(_res,[_preslice.postAST.unique()[0], _postslice.postAST.unique()[0]])
union = set(pre.groups.keys()).union(set(post.groups.keys()))
matches = set(pre.groups.keys()).intersection(set(post.groups.keys()))
_difference_pre = set(pre.groups.keys()).difference(set(post.groups.keys()))
_difference_post = set(post.groups.keys()).difference(
set(pre.groups.keys())
)
# _diffr.append((_pre,_post,_difference_pre, _difference_post))
if not _preslice.empty and not _postslice.empty:
for match in union:
_res.update(dict(zip(_compare_cols, match)))
_mgrpcols = ["PAR_file", "dupli_num", "postAST"]
if match in matches:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
elif match in _difference_pre:
_mpre = pre.get_group(match).groupby(_mgrpcols)
_mpost = pre.get_group(match).groupby(_mgrpcols)
elif match in _difference_post:
_mpre = post.get_group(match).groupby(_mgrpcols)
_mpost = post.get_group(match).groupby(_mgrpcols)
# print(_mpost.groups)
for (_prePF, npr, _preAST), prgrp in _mpre:
_res.update(
{
"pre_dupli_num": npr,
"pre_PAR_file": _prePF,
"pre_postAST": _preAST,
}
)
for | |
Threshold level for logging. Available levels: TRACE,
DEBUG, INFO (default), WARN, NONE (no logging). Use
syntax `LOGLEVEL:DEFAULT` to define the default
visible log level in log files.
Examples: --loglevel DEBUG
--loglevel DEBUG:INFO
--suitestatlevel level How many levels to show in `Statistics by Suite`
in log and report. By default all suite levels are
shown. Example: --suitestatlevel 3
--tagstatinclude tag * Include only matching tags in `Statistics by Tag`
and `Test Details` in log and report. By default all
tags set in test cases are shown. Given `tag` can
also be a simple pattern (see e.g. --test).
--tagstatexclude tag * Exclude matching tags from `Statistics by Tag` and
`Test Details`. This option can be used with
--tagstatinclude similarly as --exclude is used with
--include.
--tagstatcombine tags:name * Create combined statistics based on tags.
These statistics are added into `Statistics by Tag`
and matching tests into `Test Details`. If optional
`name` is not given, name of the combined tag is got
from the specified tags. Tags are combined using the
rules explained in --include.
Examples: --tagstatcombine requirement-*
--tagstatcombine tag1ANDtag2:My_name
--tagdoc pattern:doc * Add documentation to tags matching given pattern.
Documentation is shown in `Test Details` and also as
a tooltip in `Statistics by Tag`. Pattern can contain
characters `*` (matches anything) and `?` (matches
any char). Documentation can contain formatting
similarly as with --doc option.
Examples: --tagdoc mytag:My_documentation
--tagdoc regression:*See*_http://info.html
--tagdoc owner-*:Original_author
--tagstatlink pattern:link:title * Add external links into `Statistics by
Tag`. Pattern can contain characters `*` (matches
anything) and `?` (matches any char). Characters
matching to wildcard expressions can be used in link
and title with syntax %N, where N is index of the
match (starting from 1). In title underscores are
automatically converted to spaces.
Examples: --tagstatlink mytag:http://my.domain:Link
--tagstatlink bug-*:http://tracker/id=%1:Bug_Tracker
--removekeywords all|passed|for|wuks|name:<pattern> * Remove keyword data
from the generated log file. Keywords containing
warnings are not removed except in `all` mode.
all: remove data from all keywords
passed: remove data only from keywords in passed
test cases and suites
for: remove passed iterations from for loops
wuks: remove all but the last failing keyword
inside `BuiltIn.Wait Until Keyword Succeeds`
name:<pattern>: remove data from keywords that match
the given pattern. The pattern is matched
against the full name of the keyword (e.g.
'MyLib.Keyword', 'resource.Second Keyword'),
is case, space, and underscore insensitive,
and may contain `*` and `?` as wildcards.
Examples: --removekeywords name:Lib.HugeKw
--removekeywords name:myresource.*
--flattenkeywords for|foritem|name:<pattern> * Flattens matching keywords
in the generated log file. Matching keywords get all
log messages from their child keywords and children
are discarded otherwise.
for: flatten for loops fully
foritem: flatten individual for loop iterations
name:<pattern>: flatten matched keywords using same
matching rules as with
`--removekeywords name:<pattern>`
--listener class * A class for monitoring test execution. Gets
notifications e.g. when a test case starts and ends.
Arguments to listener class can be given after class
name, using colon as separator. For example:
--listener MyListenerClass:arg1:arg2
--warnonskippedfiles If this option is used, skipped test data files will
cause a warning that is visible in the console output
and the log file. By default skipped files only cause
an info level syslog message.
--nostatusrc Sets the return code to zero regardless of failures
in test cases. Error codes are returned normally.
--runemptysuite Executes tests also if the top level test suite is
empty. Useful e.g. with --include/--exclude when it
is not an error that no test matches the condition.
--dryrun Verifies test data and runs tests so that library
keywords are not executed.
--exitonfailure Stops test execution if any critical test fails.
--exitonerror Stops test execution if any error occurs when parsing
test data, importing libraries, and so on.
--skipteardownonexit Causes teardowns to be skipped if test execution is
stopped prematurely.
--randomize all|suites|tests|none Randomizes the test execution order.
all: randomizes both suites and tests
suites: randomizes suites
tests: randomizes tests
none: no randomization (default)
Use syntax `VALUE:SEED` to give a custom random seed.
The seed must be an integer.
Examples: --randomize all
--randomize tests:1234
--runmode mode * Deprecated in version 2.8. Use individual options
--dryrun, --exitonfailure, --skipteardownonexit, or
--randomize instead.
-W --monitorwidth chars Width of the monitor output. Default is 78.
-C --monitorcolors auto|on|ansi|off Use colors on console output or not.
auto: use colors when output not redirected (default)
on: always use colors
ansi: like `on` but use ANSI colors also on Windows
off: disable colors altogether
Note that colors do not work with Jython on Windows.
-K --monitormarkers auto|on|off Show `.` (success) or `F` (failure) on
console when top level keywords in test cases end.
Values have same semantics as with --monitorcolors.
-P --pythonpath path * Additional locations (directories, ZIPs, JARs) where
to search test libraries from when they are imported.
Multiple paths can be given by separating them with a
colon (`:`) or using this option several times. Given
path can also be a glob pattern matching multiple
paths but then it normally must be escaped or quoted.
Examples:
--pythonpath libs/
--pythonpath /opt/testlibs:mylibs.zip:yourlibs
-E star:STAR -P lib/STAR.jar -P mylib.jar
-E --escape what:with * Escape characters which are problematic in console.
`what` is the name of the character to escape and
`with` is the string to escape it with. Note that
all given arguments, incl. data sources, are escaped
so escape characters ought to be selected carefully.
<--------------------ESCAPES------------------------>
Examples:
--escape space:_ --metadata X:Value_with_spaces
-E space:SP -E quot:Q -v var:QhelloSPworldQ
-A --argumentfile path * Text file to read more arguments from. Use special
path `STDIN` to read contents from the standard input
stream. File can have both options and data sources
one per line. Contents do not need to be escaped but
spaces in the beginning and end of lines are removed.
Empty lines and lines starting with a hash character
(#) are ignored.
Example file:
| --include regression
| --name Regression Tests
| # This is a comment line
| my_tests.html
| path/to/test/directory/
Examples:
--argumentfile argfile.txt --argumentfile STDIN
-h -? --help Print usage instructions.
--version Print version information.
Options that are marked with an asterisk (*) can be specified multiple times.
For example, `--test first --test third` selects test cases with name `first`
and `third`. If other options are given multiple times, the last value is used.
Long option format is case-insensitive. For example, --SuiteStatLevel is
equivalent to but easier to read than --suitestatlevel. Long options can
also be shortened as long as they are unique. For example, `--logti Title`
works while `--lo log.html` does not because the former matches only --logtitle
but the latter matches --log, --loglevel and --logtitle.
Environment Variables
=====================
ROBOT_OPTIONS Space separated list of default options to be placed
in front of any explicit options on the command line.
ROBOT_SYSLOG_FILE Path to a file where Robot Framework writes internal
information about parsing test case files and running
tests. Can be useful when debugging problems. If not
set, or set to special value `NONE`, writing to the
syslog file is disabled.
ROBOT_SYSLOG_LEVEL Log level to use when writing to the syslog file.
Available levels are the same as for --loglevel
command line option and the default is INFO.
Examples
========
# Simple test run with `pybot` without options.
$ pybot tests.html
# Using options and running with `jybot`.
$ jybot --include smoke --name Smoke_Tests path/to/tests.txt
# Executing `robot.run` module using Python.
$ python -m robot.run --test test1 --test test2 test_directory
# Running `robot/run.py` script with Jython.
$ jython /path/to/robot/run.py tests.robot
# Executing multiple test case files and using case-insensitive long options.
$ pybot --SuiteStatLevel 2 /my/tests/*.html /your/tests.html
# Setting default options and syslog file before running tests.
$ export ROBOT_OPTIONS="--critical regression --suitestatlevel 2"
$ export ROBOT_SYSLOG_FILE=/tmp/syslog.txt
$ pybot tests.tsv
"""
import sys
# Allows running as a script. __name__ check needed with multiprocessing:
# http://code.google.com/p/robotframework/issues/detail?id=1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.conf import RobotSettings
from robot.output import LOGGER
from robot.reporting import ResultWriter
from robot.running import TestSuiteBuilder
from robot.utils import Application
class RobotFramework(Application):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(1,),
env_options='ROBOT_OPTIONS', logger=LOGGER)
def main(self, datasources, **options):
settings = RobotSettings(options)
LOGGER.register_console_logger(**settings.console_logger_config)
LOGGER.info('Settings:\n%s' | |
'2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier, self).__init__()
self.yang_name = "node-protocol-identifier"
self.yang_parent_name = "topology-node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("igp-information", ("igp_information", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation)), ("srgb-information", ("srgb_information", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation))])
self._leafs = OrderedDict([
('node_name', YLeaf(YType.str, 'node-name')),
('ipv4_bgp_router_id_set', YLeaf(YType.boolean, 'ipv4-bgp-router-id-set')),
('ipv4_bgp_router_id', YLeaf(YType.str, 'ipv4-bgp-router-id')),
('ipv4te_router_id_set', YLeaf(YType.boolean, 'ipv4te-router-id-set')),
('ipv4te_router_id', YLeaf(YType.str, 'ipv4te-router-id')),
])
self.node_name = None
self.ipv4_bgp_router_id_set = None
self.ipv4_bgp_router_id = None
self.ipv4te_router_id_set = None
self.ipv4te_router_id = None
self.igp_information = YList(self)
self.srgb_information = YList(self)
self._segment_path = lambda: "node-protocol-identifier"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier, ['node_name', 'ipv4_bgp_router_id_set', 'ipv4_bgp_router_id', 'ipv4te_router_id_set', 'ipv4te_router_id'], name, value)
class IgpInformation(Entity):
"""
IGP information
.. attribute:: igp
IGP\-specific information
**type**\: :py:class:`Igp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp>`
.. attribute:: domain_identifier
Domain identifier
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: autonomous_system_number
Autonomous System Number
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation, self).__init__()
self.yang_name = "igp-information"
self.yang_parent_name = "node-protocol-identifier"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("igp", ("igp", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('domain_identifier', YLeaf(YType.uint64, 'domain-identifier')),
('autonomous_system_number', YLeaf(YType.uint32, 'autonomous-system-number')),
])
self.domain_identifier = None
self.autonomous_system_number = None
self.igp = PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp()
self.igp.parent = self
self._children_name_map["igp"] = "igp"
self._children_yang_names.add("igp")
self._segment_path = lambda: "igp-information"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation, ['domain_identifier', 'autonomous_system_number'], name, value)
class Igp(Entity):
"""
IGP\-specific information
.. attribute:: isis
ISIS information
**type**\: :py:class:`Isis <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Isis>`
.. attribute:: ospf
OSPF information
**type**\: :py:class:`Ospf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Ospf>`
.. attribute:: bgp
BGP information
**type**\: :py:class:`Bgp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Bgp>`
.. attribute:: igp_id
IGP ID
**type**\: :py:class:`PceIgpInfoId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceIgpInfoId>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp, self).__init__()
self.yang_name = "igp"
self.yang_parent_name = "igp-information"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("isis", ("isis", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Isis)), ("ospf", ("ospf", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Ospf)), ("bgp", ("bgp", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Bgp))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('igp_id', YLeaf(YType.enumeration, 'igp-id')),
])
self.igp_id = None
self.isis = PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Isis()
self.isis.parent = self
self._children_name_map["isis"] = "isis"
self._children_yang_names.add("isis")
self.ospf = PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Ospf()
self.ospf.parent = self
self._children_name_map["ospf"] = "ospf"
self._children_yang_names.add("ospf")
self.bgp = PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Bgp()
self.bgp.parent = self
self._children_name_map["bgp"] = "bgp"
self._children_yang_names.add("bgp")
self._segment_path = lambda: "igp"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp, ['igp_id'], name, value)
class Isis(Entity):
"""
ISIS information
.. attribute:: system_id
ISIS system ID
**type**\: str
.. attribute:: level
ISIS level
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Isis, self).__init__()
self.yang_name = "isis"
self.yang_parent_name = "igp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('system_id', YLeaf(YType.str, 'system-id')),
('level', YLeaf(YType.uint32, 'level')),
])
self.system_id = None
self.level = None
self._segment_path = lambda: "isis"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Isis, ['system_id', 'level'], name, value)
class Ospf(Entity):
"""
OSPF information
.. attribute:: router_id
OSPF router ID
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: area
OSPF area
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Ospf, self).__init__()
self.yang_name = "ospf"
self.yang_parent_name = "igp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('router_id', YLeaf(YType.str, 'router-id')),
('area', YLeaf(YType.uint32, 'area')),
])
self.router_id = None
self.area = None
self._segment_path = lambda: "ospf"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Ospf, ['router_id', 'area'], name, value)
class Bgp(Entity):
"""
BGP information
.. attribute:: router_id
BGP router ID
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: confed_asn
Confederation ASN
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Bgp, self).__init__()
self.yang_name = "bgp"
self.yang_parent_name = "igp"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('router_id', YLeaf(YType.str, 'router-id')),
('confed_asn', YLeaf(YType.uint32, 'confed-asn')),
])
self.router_id = None
self.confed_asn = None
self._segment_path = lambda: "bgp"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.IgpInformation.Igp.Bgp, ['router_id', 'confed_asn'], name, value)
class SrgbInformation(Entity):
"""
SRGB information
.. attribute:: igp_srgb
IGP\-specific information
**type**\: :py:class:`IgpSrgb <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb>`
.. attribute:: start
SRGB start
**type**\: int
**range:** 0..4294967295
.. attribute:: size
SRGB size
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation, self).__init__()
self.yang_name = "srgb-information"
self.yang_parent_name = "node-protocol-identifier"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("igp-srgb", ("igp_srgb", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('start', YLeaf(YType.uint32, 'start')),
('size', YLeaf(YType.uint32, 'size')),
])
self.start = None
self.size = None
self.igp_srgb = PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb()
self.igp_srgb.parent = self
self._children_name_map["igp_srgb"] = "igp-srgb"
self._children_yang_names.add("igp-srgb")
self._segment_path = lambda: "srgb-information"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation, ['start', 'size'], name, value)
class IgpSrgb(Entity):
"""
IGP\-specific information
.. attribute:: isis
ISIS information
**type**\: :py:class:`Isis <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis>`
.. attribute:: ospf
OSPF information
**type**\: :py:class:`Ospf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf>`
.. attribute:: bgp
BGP information
**type**\: :py:class:`Bgp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp>`
.. attribute:: igp_id
IGP ID
**type**\: :py:class:`PceIgpInfoId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceIgpInfoId>`
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb, self).__init__()
self.yang_name = "igp-srgb"
self.yang_parent_name = "srgb-information"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("isis", ("isis", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis)), ("ospf", ("ospf", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf)), ("bgp", ("bgp", PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('igp_id', YLeaf(YType.enumeration, 'igp-id')),
])
self.igp_id = None
self.isis = PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis()
self.isis.parent = self
self._children_name_map["isis"] = "isis"
self._children_yang_names.add("isis")
self.ospf = PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf()
self.ospf.parent = self
self._children_name_map["ospf"] = "ospf"
self._children_yang_names.add("ospf")
self.bgp = PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp()
self.bgp.parent = self
self._children_name_map["bgp"] = "bgp"
self._children_yang_names.add("bgp")
self._segment_path = lambda: "igp-srgb"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb, ['igp_id'], name, value)
class Isis(Entity):
"""
ISIS information
.. attribute:: system_id
ISIS system ID
**type**\: str
.. attribute:: level
ISIS level
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis, self).__init__()
self.yang_name = "isis"
self.yang_parent_name = "igp-srgb"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('system_id', YLeaf(YType.str, 'system-id')),
('level', YLeaf(YType.uint32, 'level')),
])
self.system_id = None
self.level = None
self._segment_path = lambda: "isis"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Isis, ['system_id', 'level'], name, value)
class Ospf(Entity):
"""
OSPF information
.. attribute:: router_id
OSPF router ID
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: area
OSPF area
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf, self).__init__()
self.yang_name = "ospf"
self.yang_parent_name = "igp-srgb"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('router_id', YLeaf(YType.str, 'router-id')),
('area', YLeaf(YType.uint32, 'area')),
])
self.router_id = None
self.area = None
self._segment_path = lambda: "ospf"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Ospf, ['router_id', 'area'], name, value)
class Bgp(Entity):
"""
BGP information
.. attribute:: router_id
BGP router ID
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: confed_asn
Confederation ASN
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp, self).__init__()
self.yang_name = "bgp"
self.yang_parent_name = "igp-srgb"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('router_id', YLeaf(YType.str, 'router-id')),
('confed_asn', YLeaf(YType.uint32, 'confed-asn')),
])
self.router_id = None
self.confed_asn = None
self._segment_path = lambda: "bgp"
def __setattr__(self, name, value):
self._perform_setattr(PceTopology.TopologyNodes.TopologyNode.NodeProtocolIdentifier.SrgbInformation.IgpSrgb.Bgp, ['router_id', 'confed_asn'], name, value)
class PrefixSid(Entity):
"""
Prefix SIDs
.. attribute:: sid_prefix
Prefix
**type**\: :py:class:`SidPrefix <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.PceTopology.TopologyNodes.TopologyNode.PrefixSid.SidPrefix>`
.. attribute:: sid_type
SID Type
**type**\: :py:class:`Sid <ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_xtc_oper.Sid>`
.. attribute:: mpls_label
MPLS Label
**type**\: int
**range:** 0..4294967295
.. attribute:: domain_identifier
Domain identifier
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rflag
R Flag
**type**\: bool
.. attribute:: nflag
N Flag
**type**\: bool
.. attribute:: pflag
P Flag
**type**\: bool
.. attribute:: eflag
E Flag
**type**\: bool
.. attribute:: vflag
V Flag
**type**\: bool
.. attribute:: lflag
L Flag
**type**\: bool
"""
_prefix = 'infra-xtc-oper'
_revision = '2017-08-24'
def __init__(self):
super(PceTopology.TopologyNodes.TopologyNode.PrefixSid, self).__init__()
self.yang_name = "prefix-sid"
self.yang_parent_name = "topology-node"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("sid-prefix", ("sid_prefix", | |
import regex as re
from urllib.parse import urljoin, urlparse
import glob
import os
import json
def biography_rename(name):
map = {
"Abu'l-Wafa": "Abul-Wafa",
"D'Adhemar": "DAdhemar",
"D'Alembert": "DAlembert",
"D'Ocagne": "DOcagne",
"D'Ovidio": "DOvidio",
"De_L'Hopital": "De_LHopital",
"Krasnosel'skii": "Krasnoselskii",
"Thompson_D'Arcy": "Thompson_DArcy"
}
if name in map:
name = map[name]
return name
def convert(href, url_context):
original_href = href
if href.endswith(' "'):
href = href[:-2]
# generate the current context url
base_url = urljoin('https://www-history.mcs.st-andrews.ac.uk/', url_context)
href = href.strip()
href = href.replace('" target=_blank', '')
href = href.replace('target=_blank', '')
href = href.replace('target=blank_', '')
href = href.replace('" target="_blank', '')
href = href.replace('target="_blank', '')
href = href.replace('height=800', '')
href = href.replace('" class="tippyPic', '')
if href.endswith(' ,'): href = href[:-1]
href = href.strip()
if href.startswith('\\http://'):
href = href[1:]
elif href.startswith('href=http://'):
href = href[5:]
pattern = re.compile(r'^https?://www-history.mcs.st-and(?:rews)?.ac.uk(?P<page>.*)$')
match = pattern.search(href)
if match:
# this is an external link that goes to us! convert to absolute
href = match.group('page')
# if a anchor link, return it
if href.startswith('#'):
return href
# if a email link, return it
if href.startswith('mailto:'):
return href
# win javascript pattern
pattern = re.compile(r'^javascript:win\(\'(?P<href>.*?)\'(?:.*?)\)$')
match = pattern.search(href)
if match:
href = '/Obits/%s.html' % match.group('href')
# win0 javascript pattern
pattern = re.compile(r'^javascript:win0\(\'(?P<href>.*?)\'(?:.*?)\)$')
match = pattern.search(href)
if match:
href = '../' + match.group('href')
# win1 second javascript pattern - no forced line start and end
# this is because when a javascript:win1 and a href are present, the js is
# usually the correct one
pattern = re.compile(r'javascript:win1\(\'(?P<href>.*?)\'(?:.*?)\)')
match = pattern.search(href)
if match:
href = match.group('href')
# showcurve javascript pattern
pattern = re.compile(r'^javascript:showcurve\(\'(?P<curve>.*?)\'(?:.*?)\)$')
match = pattern.search(href)
if match:
curve = match.group('curve')
href = '../Curvepics/' + curve + '.gif'
# if a external url, return it
if href.startswith('http://') or href.startswith('https://') or href.startswith('ftp://') or href.startswith('//'):
return href
# now convert the href to an absolute mactutor link
href_full = urljoin(base_url, href)
# and parse it into path and fragment
parsed = urlparse(href_full)
path = parsed.path
fragment = parsed.fragment
while path.startswith('/history/'):
path = path[8:]
html_directories = ('/Astronomy/','/Biographies/','/Curves/','/Extras/','/HistTopics/','/Honours/','/Quotations/','/Strick/','/Tait/','/Wallace/','/Gaz/','/Ledermann/','/Projects/Daxenberger/','/ICM/')
attachment_directories = ('/Bookpages/','/Publications/','/DNB/','/DSB/','/BSHM/')
# two special cases - need to remove spaces
path = path.replace('LMS FrolichPrize', 'LMSFrolichPrize')
path = path.replace('Atiyah_NY Times', 'Atiyah_NYTimes')
# sometimes we have moved things around, so need to correct this here
with open('moved_array.json', 'r') as f:
moved_array = json.load(f)
for item in moved_array:
move_from = item['from']
move_to = item['to']
if path.startswith(move_from) and path.lower().endswith(('.png', '.jpg', '.jpeg', '.gif')):
path = path.replace(move_from, move_to)
with open('moved_array.txt', 'a') as f:
f.write('%s :: %s :: %s\n' % (path, move_from, move_to))
break
if path == '/Honours/FRSE.html' or path == '/Societies/FRSE.html':
path = '/Societies/RSE/FRSE/'
elif path == '/Honours/FRSEchron.html' or path == '/Societies/FRSEchron.html':
path = '/Societies/RSE/FRSE/chronological/'
if path == '/' or path == '/index.html':
page = '/'
elif path == '/Astronomy/astronomers.html':
page = '/Biographies/@categoryindex/astronomy'
elif path == '/Diagrams/Popular.html':
page = '/Miscellaneous/Popular'
elif path.startswith(html_directories):
if path.endswith('index.html'):
page = path[:-10]
elif path.endswith('.html'):
page = path[:-5]
else:
page = path
elif path.startswith('/BMC/'):
with open('bmcarray.json', 'r') as f:
bmcdata = json.load(f)
# might need to reverse back up a dir
pattern = re.compile(r'/BMC/(?P<year>\d{4})/(?P<file>.+?\.html)')
match = pattern.match(path)
if match:
path = '/BMC/%s' % match.group('file')
# try and match it with a file
found = False
for year, file in bmcdata:
if '/BMC/%s.html' % file == path:
found = True
page = '/BMC/%s/%s' % (year, file)
break
if not found:
# try and match it with a year
pattern = re.compile(r'/BMC/(?P<year>\d{4})\.html')
match = pattern.match(path)
if match:
page = '/BMC/%s/' % match.group('year')
# index page?
elif path == '/BMC/index.html' or path == '/BMC/':
page = '/BMC/'
elif path == '/BMC/plenary.html':
page = '/BMC/speakers-plenary/'
elif path == '/BMC/morning.html':
page = '/BMC/speakers-morning/'
elif path == '/BMC/special.html':
page = '/BMC/speakers-special/'
elif path == '/BMC/full.html':
page = '/BMC/speakers-all/'
else:
# an error, not sure what this page is
with open('bmc-notfound.txt', 'a') as f:
f.write('%s\n' % path)
page = path
elif path.startswith('escherpic('):
pattern = re.compile(r'escherpic\(\'(?P<name>.+?)\',(?P<width>\d+?),(?P<height>\d+?)\)')
match = pattern.search(path)
if match:
name = match.group('name')
width = match.group('width')
height = match.group('height')
page = '/Diagrams/Escher_%s.jpeg' % name
else:
page = path
elif path.startswith('/Darcy/'):
found = False
still_there = ['cordmath','Darling','marshall','neville','Peddie','plateau','tait','transformation','whitehead']
for name in still_there:
name = '/Darcy/%s.html' % name
if path.startswith(name):
page = path[:-5]
found = True
break
if not found:
if path == '/Darcy/index.html':
page = '/Darcy/'
# the ms pages have moved to extras, so they appear as popups
elif path.startswith('/Darcy/ms') and path.endswith('.html'):
page = '/Extras/%s/' % path[7:-5]
elif path == '/Darcy/Overview.html':
page = '/Projects/DickinsonCernokova/chapter-1/'
elif path == '/Darcy/Heilmann_Shufeldt.html':
page = '/Projects/DickinsonCernokova/chatper-2/'
elif path == '/Darcy/Correspondence_I.html':
page = '/Projects/DickinsonCernokova/chatper-3/'
elif path == '/Darcy/Correspondence_II.html':
page = '/Projects/DickinsonCernokova/chatper-4/'
elif path == '/Darcy/Correspondence_II.html':
page = '/Projects/DickinsonCernokova/chatper-5/'
elif path == '/Darcy/Correspondence_VI.html':
page = '/Projects/DickinsonCernokova/chatper-6/'
elif path == '/Darcy/Correspondence_V.html':
page = '/Projects/DickinsonCernokova/chatper-7/'
elif path == '/Darcy/Correspondence_VI.html':
page = '/Projects/DickinsonCernokova/chatper-8/'
elif path == '/Darcy/dwtandmaths.html':
page = '/Projects/GowenlockTuminauskaite/chatper-1/'
elif path == '/Darcy/coordinates.html':
page = '/Projects/GowenlockTuminauskaite/chatper-2/'
elif path == '/Darcy/log.html':
page = '/Projects/GowenlockTuminauskaite/chatper-3/'
elif path == '/Darcy/cells.html':
page = '/Projects/GowenlockTuminauskaite/chatper-4/'
elif path == '/Darcy/fidler.html':
page = '/Projects/GowenlockTuminauskaite/chatper-5/'
elif path.endswith(('.png', '.jpg', '.jpeg', '.gif')):
page = '/Diagrams/darcy-%s' % path[7:]
else:
print('DARCY URL ERROR: %s' % path)
page = path
elif path.startswith('/Societies/'):
if path.endswith('index.html'):
page = path[:-10]
elif path.endswith('.html'):
page = path[:-5]
else:
page = path
if page == '/Societies/alph_list':
page = '/Societies/'
elif page == '/Societies/societies_list/':
page = '/Miscellaneous/other_indexes/'
elif path.startswith(attachment_directories):
if path.endswith('index.html'):
page = path[:-10]
else:
page = path
elif path.startswith('/Diagrams/'):
# need to convert a few
if path.startswith('/Diagrams/braid/'):
path = path.replace('/Diagrams/braid/', '/Diagrams/')
if path.startswith('/Diagrams/fairbook/'):
path = path.replace('/Diagrams/fairbook/', '/Diagrams/')
if path.startswith('/Diagrams/wf/'):
path = path.replace('/Diagrams/wf/', '/Diagrams/')
# special case for escher HTML pages
if path.startswith('/Diagrams/Escher_') and path.endswith('.html'):
page = '/Extras/%s' % path[10:-5]
else:
# see if this matches a diagram
DIAGRAM_DIR = '/Users/david/Documents/MacTutor/actual-work/dev/mathshistory-site/content/Diagrams/'
diagram = path[10:]
if os.path.isfile(os.path.join(DIAGRAM_DIR, diagram)):
page = path
else:
# not a diagram, try and resolve this
matches = glob.glob('%s%s*' % (DIAGRAM_DIR, diagram))
if len(matches) != 1:
with open('diagram-errors.txt', 'a') as f:
f.write('%s :: %s :: %s\n' % (original_href, url_context, diagram))
#page = ''
page = '/Diagrams/%s' % diagram
else:
page = '/Diagrams/%s' % os.path.basename(matches[0])
elif path.startswith('/Obits/'):
page = '/TimesObituaries/' + path[7:]
elif path.startswith('/Glossary/'):
if path.endswith('index.html'):
page = path[:-10]
elif path.endswith('.html'):
entry = path[10:-5]
page = '/Glossary/#%s' % entry
else:
page = path
elif path.startswith('/BigPictures/'):
pattern = re.compile(r'/BigPictures/(?P<image>(?P<name>.+?)(?:_\d+)?\..*)')
match = pattern.search(path)
if match:
name = match.group('name')
image = match.group('image')
page = '/Biographies/%s/%s' % (name, image)
else:
page = path
elif path.startswith('/References/'):
if path.endswith('.html'):
name = path[12:-5]
page = '/Biographies/%s/' % name
else:
page = path
elif path.startswith('/Obits2/'):
page = '/Obituaries/' + path[8:]
if page.endswith('.html'):
page = page[:-5]
elif path.startswith('/ems/'):
page = '/EMS/' + path[5:]
if page.endswith('.html'):
page = page[:-5]
elif path.startswith('/Mathematicians/'):
page = '/Biographies/' + path[16:]
if page.endswith('.html'):
page = page[:-5]
elif path.startswith('/Education/'):
page = path
if page.endswith('.html'):
page = page[:-5]
# fix some links, that are now subdirs
if page == '/Education/Edinburgh_m_exams':
page = '/Education/Edinburgh_maths/Edinburgh_m_exams'
elif page == '/Education/Edinburgh_p_exams':
page = '/Education/Edinburgh_maths/Edinburgh_p_exams'
elif page == '/Education/Glasgow_exams':
page = '/Education/Glasgow_maths/Glasgow_exams'
elif page == '/Education/St_Andrews_m_exams':
page = '/Education/St_Andrews_maths/St_Andrews_m_exams'
elif page == '/Education/St_Andrews_p_exams':
page = '/Education/St_Andrews_maths/St_Andrews_p_exams'
elif path.startswith('/Curvepics/'):
curve = path[11:]
pattern = re.compile(r'(?<=\D+)(\d)(?=.gif)')
match = pattern.search(curve)
if match:
curve = re.sub(pattern, r'0\1', curve)
page = '/Curves/%s' % curve
elif path.startswith('/Search/'):
page = '/Search/'
elif path.startswith('/Davis/'):
# leave it alone for now
page = path
elif path == '/Indexes/African_men_alph.html':
page = '/Biographies/@categoryindex/african-men-alph'
elif path == '/Indexes/African_women_alph.html':
page = '/Biographies/@categoryindex/african-women'
elif path == '/~john/':
page = 'http://www-groups.mcs.st-and.ac.uk/~john/'
elif path == '/~edmund/':
page = 'http://www-groups.mcs.st-and.ac.uk/~edmund/'
elif path == '/PictDisplay/Somerville.html':
page = '/Biographies/Somerville/pictdisplay/'
elif path == '<EMAIL>':
page = 'mailto:<EMAIL>'
elif path == '/Index/Changes.html':
page = '/Miscellaneous/recent_changes'
elif path == '/Miscellaneous/About_us.html':
page = '/Miscellaneous/about_us'
elif path == '/Java/Sources/Wholecode.html':
page = '/Miscellaneous/java_code'
elif path == '/Miscellaneous/FAQ.html':
page = '/Miscellaneous/faq'
elif path == '/Miscellaneous/Copyright.html':
page = '/Miscellaneous/copyright'
elif path == '/Miscellaneous/Copyright0.html':
page = '/Miscellaneous/copyright'
elif path == '/Miscellaneous/Popular.html':
page = '/Miscellaneous/Popular'
elif path == '/Miscellaneous/Popular_2009.html':
page = '/Miscellaneous/Popular_2009'
elif path == '/Miscellaneous/DArcy_Thompson.html':
page = '/Darcy/DArcy_Thompson'
elif path == '/Miscellaneous/darcy.html':
page = '/Darcy/darcy'
elif path == '/Comments/makecomment0.html':
page = '/Miscellaneous/contact_us'
else:
page = path
with open('url-conversion-non.txt', 'a') | |
)
offsetFile = fileName
with open(offsetFile, 'w') as f:
f.write(offsetsPlain)
def meanOffset(offsets):
rangeOffset = 0.0
azimuthOffset = 0.0
i = 0
for offsetx in offsets:
i += 1
rangeOffset += offsetx.dx
azimuthOffset += offsetx.dy
rangeOffset /= i
azimuthOffset /= i
return [rangeOffset, azimuthOffset]
def cullOffsetbyMean(offsets, azimuthOffsetMean, threshold):
import isce
import isceobj
from isceobj.Location.Offset import OffsetField,Offset
culledOffsetField = OffsetField()
i = 0
for offset in offsets:
if abs(offset.dy - azimuthOffsetMean) > threshold:
i += 1
else:
culledOffsetField.addOffset(offset)
print("{} offsets culled, with azimuth mean offset: {} and threshold: {}".format(i, azimuthOffsetMean, threshold))
return culledOffsetField
def cullOffset(offsets, distances, numCullOffsetsLimits):
import os
import isce
import isceobj
from iscesys.StdOEL.StdOELPy import create_writer
#offsets: offsets from ampcor
#distances: tuple
#numCullOffsetsLimits: tuple
refinedOffsets = offsets
for i, (distance, numCullOffsetsLimit) in enumerate(zip(distances, numCullOffsetsLimits)):
cullOff = isceobj.createOffoutliers()
cullOff.wireInputPort(name='offsets', object=refinedOffsets)
cullOff.setSNRThreshold(2.0)
cullOff.setDistance(distance)
#set the tag used in the outfile. each message is precided by this tag
#is the writer is not of "file" type the call has no effect
logfile = "offoutliers.log"
stdWriter = create_writer("log", "", True, filename=logfile)
stdWriter.setFileTag("offoutliers", "log")
stdWriter.setFileTag("offoutliers", "err")
stdWriter.setFileTag("offoutliers", "out")
cullOff.setStdWriter(stdWriter)
try:
cullOff.offoutliers()
refinedOffsets = cullOff.getRefinedOffsetField()
numLeft = len(refinedOffsets._offsets)
print('Number of offsets left after %2dth culling: %5d'%(i, numLeft))
if numLeft < numCullOffsetsLimit:
print('*******************************************************')
print('WARNING: Too few points left after culling: {} left'.format(numLeft))
print('*******************************************************')
return None
except:
print('*******************************************************')
print('WARNING: unsuccessful offset culling')
print('*******************************************************')
return None
os.remove(logfile)
return refinedOffsets
def getOffset(offsets, offsetFile, cullOffsetFile, dumpFile):
offsetsPlain = ''
for offsetx in offsets:
offsetsPlainx = "{}".format(offsetx)
offsetsPlainx = offsetsPlainx.split()
offsetsPlain = offsetsPlain + "{:8d} {:10.3f} {:8d} {:12.3f} {:11.5f} {:11.6f} {:11.6f} {:11.6f}\n".format(
int(offsetsPlainx[0]),
float(offsetsPlainx[1]),
int(offsetsPlainx[2]),
float(offsetsPlainx[3]),
float(offsetsPlainx[4]),
float(offsetsPlainx[5]),
float(offsetsPlainx[6]),
float(offsetsPlainx[7])
)
#offsetFile = 'offset_{}_{}.off'.format(i-1, i)
with open(offsetFile, 'w') as f:
f.write(offsetsPlain)
breakFlag = 0
for maxrms in [0.08, 0.16, 0.24]:
#maxrms = maxrms * 0.01
for nsig in [1.5, 1.4, 1.3, 1.2, 1.1, 1.0, 0.9]:
#nsig = nsig * 0.1
#cullOffsetFile = 'cull_{}_{}.off'.format(i-1, i)
#dumpFile = 'fitoff_{}_{}.out'.format(i-1, i)
#run fitoff here
cmd = '$INSAR_ZERODOP_BIN/fitoff {} {} {} {} 50 > {}'.format(offsetFile, cullOffsetFile, nsig, maxrms, dumpFile)
runCmd(cmd)
#check number of matching points left
with open(cullOffsetFile, 'r') as ff:
numCullOffsets = sum(1 for linex in ff)
if numCullOffsets < 50:
print('offsets culling with nsig {} maxrms {}: {} left after culling, two few points'.format(nsig, maxrms, numCullOffsets))
else:
print('offsets culling with nsig {} maxrms {}: {} left after culling, success'.format(nsig, maxrms, numCullOffsets))
breakFlag = 1
break
if breakFlag == 1:
break
if numCullOffsets < 50:
print('*******************************************************')
print('WARNING: Too few points left after culling: {} left'.format(numCullOffsets))
print('*******************************************************')
return None
with open(dumpFile) as f:
lines = f.readlines()
i = 0
for linex in lines:
if 'Affine Matrix ' in linex:
m11 = float(lines[i + 2].split()[0])
m12 = float(lines[i + 2].split()[1])
m21 = float(lines[i + 3].split()[0])
m22 = float(lines[i + 3].split()[1])
t1 = float(lines[i + 7].split()[0])
t2 = float(lines[i + 7].split()[1])
break
i += 1
return [m11, m12, m21, m22, t1, t2]
# def cal_coherence(inf, win=5):
# '''
# Compute coherence using scipy convolve 2D.
# '''
# filt = np.ones((win,win))/ (1.0*win*win)
# cJ = np.complex64(1.0j)
# angle = np.exp(cJ * np.angle(inf))
# cor = ss.convolve2d(angle, filt, mode='same')
# cor[0:win-1,:] = 0.0
# cor[-win+1:,:] = 0.0
# cor[:,0:win-1] = 0.0
# cor[:,-win+1:] = 0.0
# cor = np.absolute(cor)
# print(np.max(cor), np.min(cor))
# #cor.astype(np.float32).tofile(f)
# return cor
#better way to trim edges
def cal_coherence(inf, win=5):
'''
Compute coherence using scipy convolve 2D.
'''
import numpy as np
import scipy.signal as ss
filt = np.ones((win,win))/ (1.0*win*win)
#calculate flag
flag = ss.convolve2d((np.absolute(inf)!=0), filt, mode='same')
cJ = np.complex64(1.0j)
angle = np.exp(cJ * np.angle(inf))
cor = ss.convolve2d(angle, filt, mode='same')
#cor[0:win-1,:] = 0.0
#cor[-win+1:,:] = 0.0
#cor[:,0:win-1] = 0.0
#cor[:,-win+1:] = 0.0
cor = np.absolute(cor)
cor[np.nonzero(flag < 0.999)] = 0.0;
print(np.max(cor), np.min(cor))
#cor.astype(np.float32).tofile(f)
return cor
def overlapFrequency(centerfreq1, bandwidth1, centerfreq2, bandwidth2):
startfreq1 = centerfreq1 - bandwidth1 / 2.0
endingfreq1 = centerfreq1 + bandwidth1 / 2.0
startfreq2 = centerfreq2 - bandwidth2 / 2.0
endingfreq2 = centerfreq2 + bandwidth2 / 2.0
overlapfreq = []
if startfreq2 <= startfreq1 <= endingfreq2:
overlapfreq.append(startfreq1)
if startfreq2 <= endingfreq1 <= endingfreq2:
overlapfreq.append(endingfreq1)
if startfreq1 < startfreq2 < endingfreq1:
overlapfreq.append(startfreq2)
if startfreq1 < endingfreq2 < endingfreq1:
overlapfreq.append(endingfreq2)
if len(overlapfreq) != 2:
#no overlap bandwidth
return None
else:
startfreq = min(overlapfreq)
endingfreq = max(overlapfreq)
return [startfreq, endingfreq]
def gaussian(size, sigma, scale = 1.0):
import numpy as np
import numpy.matlib
if size % 2 != 1:
raise Exception('size must be odd')
hsize = (size - 1) / 2
x = np.arange(-hsize, hsize + 1) * scale
f = np.exp(-x**2/(2.0*sigma**2)) / (sigma * np.sqrt(2.0*np.pi))
f2d=np.matlib.repmat(f, size, 1) * np.matlib.repmat(f.reshape(size, 1), 1, size)
return f2d/np.sum(f2d)
def create_multi_index(width, rgl):
import numpy as np
#create index after multilooking
#assuming original index start with 0
#applies to both range and azimuth direction
widthm = int(width/rgl)
#create range index: This applies to both odd and even cases, "rgl = 1" case, and "rgl = 2" case
start_rgindex = (rgl - 1.0) / 2.0
rgindex0 = start_rgindex + np.arange(widthm) * rgl
return rgindex0
def create_multi_index2(width2, l1, l2):
import numpy as np
#for number of looks of l1 and l2
#calculate the correponding index number of l2 in the l1 array
#applies to both range and azimuth direction
return ((l2 - l1) / 2.0 + np.arange(width2) * l2) / l1
def fit_surface(x, y, z, wgt, order):
import numpy as np
import numpy.matlib
# x: x coordinate, a column vector
# y: y coordinate, a column vector
# z: z coordinate, a column vector
# wgt: weight of the data points, a column vector
#number of data points
m = x.shape[0]
l = np.ones((m,1), dtype=np.float64)
# #create polynomial
# if order == 1:
# #order of estimated coefficents: 1, x, y
# a1 = np.concatenate((l, x, y), axis=1)
# elif order == 2:
# #order of estimated coefficents: 1, x, y, x*y, x**2, y**2
# a1 = np.concatenate((l, x, y, x*y, x**2, y**2), axis=1)
# elif order == 3:
# #order of estimated coefficents: 1, x, y, x*y, x**2, y**2, x**2*y, y**2*x, x**3, y**3
# a1 = np.concatenate((l, x, y, x*y, x**2, y**2, x**2*y, y**2*x, x**3, y**3), axis=1)
# else:
# raise Exception('order not supported yet\n')
if order < 1:
raise Exception('order must be larger than 1.\n')
#create polynomial
a1 = l;
for i in range(1, order+1):
for j in range(i+1):
a1 = np.concatenate((a1, x**(i-j)*y**(j)), axis=1)
#number of variable to be estimated
n = a1.shape[1]
#do the least squares
a = a1 * np.matlib.repmat(np.sqrt(wgt), 1, n)
b = z * np.sqrt(wgt)
c = np.linalg.lstsq(a, b)[0]
#type: <class 'numpy.ndarray'>
return c
def cal_surface(x, y, c, order):
import numpy as np
import numpy.matlib
#x: x coordinate, a row vector
#y: y coordinate, a column vector
#c: coefficients of polynomial from fit_surface
#order: order of polynomial
if order < 1:
raise Exception('order must be larger than 1.\n')
#number of lines
length = y.shape[0]
#number of columns, if row vector, only one element in the shape tuple
#width = x.shape[1]
width = x.shape[0]
x = np.matlib.repmat(x, length, 1)
y = np.matlib.repmat(y, 1, width)
z = c[0] * np.ones((length,width), dtype=np.float64)
index = 0
for i in range(1, order+1):
for j in range(i+1):
index += 1
z += c[index] * x**(i-j)*y**(j)
return z
def read_param_for_checking_overlap(leader_file, image_file):
import os
import isce
from isceobj.Sensor import xmlPrefix
import isceobj.Sensor.CEOS as CEOS
#read from leader file
fsampConst = { 104: 1.047915957140240E+08,
52: 5.239579785701190E+07,
34: 3.493053190467460E+07,
17: 1.746526595233730E+07 }
fp = open(leader_file,'rb')
leaderFDR = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/leader_file.xml'),dataFile=fp)
leaderFDR.parse()
fp.seek(leaderFDR.getEndOfRecordPosition())
sceneHeaderRecord = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/scene_record.xml'),dataFile=fp)
sceneHeaderRecord.parse()
fp.seek(sceneHeaderRecord.getEndOfRecordPosition())
fsamplookup = int(sceneHeaderRecord.metadata['Range sampling rate in MHz'])
rangeSamplingRate = fsampConst[fsamplookup]
fp.close()
#print('{}'.format(rangeSamplingRate))
#read from image file
fp = open(image_file, 'rb')
imageFDR = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/image_file.xml'), dataFile=fp)
imageFDR.parse()
fp.seek(imageFDR.getEndOfRecordPosition())
imageData = CEOS.CEOSDB(xml=os.path.join(xmlPrefix,'alos2_slc/image_record.xml'), dataFile=fp)
imageData.parseFast()
width = imageFDR.metadata['Number of pixels per line per SAR channel']
near_range = imageData.metadata['Slant range to 1st data sample']
fp.close()
#print('{}'.format(width))
#print('{}'.format(near_range))
return [rangeSamplingRate, width, near_range]
def check_overlap(ldr_m, img_m, ldr_s, img_s):
import isce
from isceobj.Constants import SPEED_OF_LIGHT
#0 1 2
#[rangeSamplingRate, width, near_range]
mparam = read_param_for_checking_overlap(ldr_m, img_m)
sparam = read_param_for_checking_overlap(ldr_s, img_s)
mcenter = mparam[2] + (mparam[1] - 1) / 2.0 * 0.5 * SPEED_OF_LIGHT / mparam[0]
mwidth = (mparam[1] - 1) * 0.5 * SPEED_OF_LIGHT / mparam[0]
scenter | |
<reponame>oddlama/autokernel<gh_stars>10-100
import autokernel.kconfig
import autokernel.config
import autokernel.lkddb
import autokernel.node_detector
import autokernel.symbol_tracking
from autokernel import __version__
from autokernel import log
from autokernel import util
from autokernel.symbol_tracking import set_value_detect_conflicts
import argparse
import glob
import grp
import gzip
import kconfiglib
import os
import pwd
import re
import shutil
import stat
import subprocess
import sys
import tempfile
from datetime import datetime, timezone
from pathlib import Path
def check_program_exists(exe):
if shutil.which(exe) is None:
log.die("Missing program '{}'. Please ensure that it is installed.".format(exe))
def check_execution_environment(args):
"""
Checks that some required external programs exist, and some miscellaneous things.
"""
check_program_exists('uname')
check_program_exists('mount')
check_program_exists('umount')
check_program_exists('make')
cur_uid = os.geteuid()
with autokernel.config.config_file_path(args.autokernel_config, warn=True) as config_file:
def _die_writable_config_by(component, name):
log.die("Refusing to run, because the path '{0}' is writable by {1}. This allows {1} to replace the configuration '{2}' and thus inject commands.".format(component, name, config_file))
if not config_file.exists():
log.die("Configuration file '{}' does not exist!".format(config_file))
# Ensure that the config file has the correct mode, to prevent command-injection by other users.
# No component of the path may be modifiable by anyone else but the current user (or root).
config_path = config_file.resolve()
for component in [config_path] + [p for p in config_path.parents]:
st = component.stat()
if st.st_uid != cur_uid and st.st_uid != 0 and st.st_mode & stat.S_IWUSR:
_die_writable_config_by(component, 'user {} ({})'.format(st.st_uid, pwd.getpwuid(st.st_uid).pw_name))
if st.st_gid != 0 and st.st_mode & stat.S_IWGRP:
_die_writable_config_by(component, 'group {} ({})'.format(st.st_gid, grp.getgrgid(st.st_gid).gr_name))
if st.st_mode & stat.S_IWOTH:
_die_writable_config_by(component, 'others')
def replace_common_vars(args, p):
p = str(p)
p = p.replace('{KERNEL_DIR}', args.kernel_dir)
p = p.replace('{KERNEL_VERSION}', autokernel.kconfig.get_kernel_version(args.kernel_dir))
p = p.replace('{UNAME_ARCH}', autokernel.kconfig.get_uname_arch())
p = p.replace('{ARCH}', autokernel.kconfig.get_arch())
return p
def has_proc_config_gz():
"""
Checks if /proc/config.gz exists
"""
return os.path.isfile("/proc/config.gz")
def unpack_proc_config_gz():
"""
Unpacks /proc/config.gz into a temporary file
"""
tmp = tempfile.NamedTemporaryFile()
with gzip.open("/proc/config.gz", "rb") as f:
shutil.copyfileobj(f, tmp)
return tmp
def kconfig_load_file_or_current_config(kconfig, config_file):
"""
Applies the given kernel config file to kconfig, or uses /proc/config.gz if config_file is None.
"""
if config_file:
log.info("Applying kernel config from '{}'".format(config_file))
kconfig.load_config(os.path.realpath(config_file))
else:
log.info("Applying kernel config from '/proc/config.gz'")
with unpack_proc_config_gz() as tmp:
kconfig.load_config(os.path.realpath(tmp.name))
def generated_by_autokernel_header():
return "# Generated by autokernel on {}\n".format(datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC"))
def vim_config_modeline_header():
return "# vim: set ft=ruby ts=4 sw=4 sts=-1 noet:\n"
def apply_autokernel_config(args, kconfig, config):
"""
Applies the given autokernel configuration to a freshly loaded kconfig object,
and returns gathered extra information such as the resulting kernel cmdline
"""
log.info("Applying autokernel configuration")
# Build cmdline on demand
kernel_cmdline = []
# Reset symbol_changes
autokernel.symbol_tracking.symbol_changes.clear()
# Asserts that the symbol has the given value
def get_sym(stmt):
# Get the kconfig symbol, and change the value
try:
return kconfig.syms[stmt.sym_name]
except KeyError:
log.die_print_error_at(stmt.at, "symbol '{}' does not exist".format(stmt.sym_name))
# Asserts that the symbol has the given value
def assert_symbol(stmt):
if not stmt.assert_condition.evaluate(kconfig):
if stmt.message:
log.die_print_error_at(stmt.at, "assertion failed: {}".format(stmt.message))
else:
log.die_print_error_at(stmt.at, "assertion failed")
# Sets a symbols value if and asserts that there are no conflicting double assignments
def set_symbol(stmt):
# Get the kconfig symbol, and change the value
sym = get_sym(stmt)
value = stmt.value
if not autokernel.kconfig.symbol_can_be_user_assigned(sym):
log.die_print_error_at(stmt.at, "symbol {} can't be user-assigned".format(sym.name))
# Skip assignment if value is already pinned and the statement is in try mode.
if stmt.has_try and sym in autokernel.symbol_tracking.symbol_changes:
log.verbose("skipping {} {}".format(autokernel.kconfig.value_to_str(value), sym.name))
return
if util.is_env_var(value):
value = util.resolve_env_variable(stmt.at, value)
if not set_value_detect_conflicts(sym, value, stmt.at):
log.die_print_error_at(stmt.at, "invalid value {} for symbol {}".format(autokernel.kconfig.value_to_str(value), sym.name))
if sym.str_value != value:
if not stmt.has_try:
# Only throw an error if it wasn't a try
log.die_print_error_at(stmt.at, "symbol assignment failed: {} from {} → {}".format(
sym.name,
autokernel.kconfig.value_to_str(sym.str_value),
autokernel.kconfig.value_to_str(value)))
else:
log.verbose("failed try set {} {} (symbol is currently not assignable to the chosen value)".format(autokernel.kconfig.value_to_str(stmt.value), sym.name))
# Visit all module nodes and apply configuration changes
visited = set()
def visit(module):
# Ensure we visit only once
if module.name in visited:
return
visited.add(module.name)
def stmt_use(stmt):
visit(stmt.module)
def stmt_merge(stmt):
filename = replace_common_vars(args, stmt.filename)
log.verbose("Merging external kconf '{}'".format(filename))
kconfig.load_config(os.path.realpath(filename), replace=False)
# Assert that there are no conflicts
for sym in autokernel.symbol_tracking.symbol_changes:
sc = autokernel.symbol_tracking.symbol_changes[sym]
if sym.str_value != sc.value:
autokernel.symbol_tracking.die_print_conflict(stmt.at, 'merge', sym, sym.str_value, sc)
def stmt_assert(stmt):
assert_symbol(stmt)
def stmt_set(stmt):
set_symbol(stmt)
def stmt_add_cmdline(stmt):
kernel_cmdline.append(stmt.param)
dispatch_stmt = {
autokernel.config.ConfigModule.StmtUse: stmt_use,
autokernel.config.ConfigModule.StmtMerge: stmt_merge,
autokernel.config.ConfigModule.StmtAssert: stmt_assert,
autokernel.config.ConfigModule.StmtSet: stmt_set,
autokernel.config.ConfigModule.StmtAddCmdline: stmt_add_cmdline,
}
def conditions_met(stmt):
for condition in stmt.conditions:
if not condition.evaluate(kconfig):
return False
return True
for stmt in module.all_statements_in_order:
# Ensure all attached conditions are met for the statement.
if conditions_met(stmt):
dispatch_stmt[stmt.__class__](stmt)
# Visit the root node and apply all symbol changes
visit(config.kernel.module)
log.verbose(" Changed {} symbols".format(len(autokernel.symbol_tracking.symbol_changes)))
# Lastly, invalidate all non-assigned symbols to process new default value conditions
for sym in kconfig.unique_defined_syms:
if sym.user_value is None:
sym._invalidate() # pylint: disable=protected-access
return kernel_cmdline
def execute_command(args, name, cmd, _replace_vars):
if len(cmd.value) > 0:
command = [_replace_vars(args, p) for p in cmd.value]
log.info("Executing {}: [{}]".format(name, ', '.join(["'{}'".format(i) for i in command])))
try:
# Replace variables in command and run it
subprocess.run(command, check=True)
except subprocess.CalledProcessError as e:
log.die("{} failed with code {}. Aborting.".format(name, e.returncode))
def main_setup(args):
"""
Main function for the 'setup' command.
"""
log.info("Setting up autokernel configuration at '{}'".format(args.setup_dir))
setup_dir = Path(args.setup_dir)
if setup_dir.exists():
log.die("Refusing to setup: directory '{}' exists".format(args.setup_dir))
saved_umask = os.umask(0o077)
setup_dir.mkdir()
modules_d_dir = setup_dir / 'modules.d'
modules_d_dir.mkdir()
import autokernel.contrib.etc as etc
import autokernel.contrib.etc.modules_d as modules_d
for i in util.resource_contents(etc):
if i.endswith('.conf'):
with (setup_dir / i).open('w') as f:
f.write(util.read_resource(i, pkg=etc))
for i in util.resource_contents(modules_d):
if i.endswith('.conf'):
with (modules_d_dir / i).open('w') as f:
f.write(util.read_resource(i, pkg=modules_d))
os.umask(saved_umask)
log.info("A default configuration has been installed")
log.info("You might want to edit it now.")
def main_check_config(args):
"""
Main function for the 'check' command.
"""
if args.compare_config:
if not args.compare_kernel_dir:
args.compare_kernel_dir = args.kernel_dir
kname_cmp = "'{}'".format(args.compare_config)
else:
if not has_proc_config_gz():
log.die("This kernel does not expose /proc/config.gz. Please provide the path to a valid config file manually.")
if not args.compare_kernel_dir:
# Use /usr/src/linux-{kernel_version} as the directory.
running_kver = subprocess.run(['uname', '-r'], check=True, stdout=subprocess.PIPE).stdout.decode().strip().splitlines()[0]
args.compare_kernel_dir = os.path.join('/usr/src/linux-{}'.format(running_kver))
try:
check_kernel_dir(args.compare_kernel_dir)
except argparse.ArgumentTypeError:
log.die("Could not find sources for running kernel (version {}) in '{}', use --check_kernel_dir to specify it manually.".format(running_kver, args.compare_kernel_dir))
kname_cmp = 'running kernel'
log.info("Comparing {} against generated config".format(kname_cmp))
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Load symbols from Kconfig
kconfig_gen = autokernel.kconfig.load_kconfig(args.kernel_dir)
# Apply autokernel configuration
apply_autokernel_config(args, kconfig_gen, config)
# Load symbols from Kconfig
kconfig_cmp = autokernel.kconfig.load_kconfig(args.compare_kernel_dir)
# Load the given config file or the current kernel's config
kconfig_load_file_or_current_config(kconfig_cmp, args.compare_config)
indicator_del = log.color("[31m-[m", "-")
indicator_add = log.color("[32m+[m", "+")
indicator_mod = log.color("[33m~[m", "~")
log.info("Comparing existing config (left) against generated config (right)")
log.info(" ({}) symbol was removed".format(indicator_del))
log.info(" ({}) symbol is new".format(indicator_add))
log.info(" ({}) symbol value changed".format(indicator_mod))
gen_syms = [s.name for s in kconfig_gen.unique_defined_syms]
cmp_syms = [s.name for s in kconfig_cmp.unique_defined_syms]
def intersection(a, b):
return [i for i in a if i in b]
def comprehension(a, b):
return [i for i in a if i not in b]
common_syms = intersection(gen_syms, set(cmp_syms))
common_syms_set = set(common_syms)
only_gen_syms = comprehension(gen_syms, common_syms_set)
only_cmp_syms = comprehension(cmp_syms, common_syms_set)
supress_new, supress_del, supress_chg = (args.suppress_columns or (False, False, False))
if not supress_new:
for sym in only_gen_syms:
sym_gen = kconfig_gen.syms[sym]
print(indicator_add + " {} {}".format(
autokernel.kconfig.value_to_str(sym_gen.str_value),
sym))
if not supress_del:
for sym in only_cmp_syms:
sym_cmp = kconfig_cmp.syms[sym]
print(indicator_del + " {} {}".format(
autokernel.kconfig.value_to_str(sym_cmp.str_value),
sym))
if not supress_chg:
for sym in common_syms:
sym_gen = kconfig_gen.syms[sym]
sym_cmp = kconfig_cmp.syms[sym]
if sym_gen.str_value != sym_cmp.str_value:
print(indicator_mod + " {} → {} {}".format(
autokernel.kconfig.value_to_str(sym_cmp.str_value),
autokernel.kconfig.value_to_str(sym_gen.str_value),
sym))
def main_generate_config(args, config=None):
"""
Main function for the 'generate_config' command.
"""
log.info("Generating kernel configuration")
if not config:
# Load configuration file
config = autokernel.config.load_config(args.autokernel_config)
# Fallback for config output
if not hasattr(args, 'output') or not args.output:
args.output = os.path.join(args.kernel_dir, '.config')
# Load symbols from Kconfig
kconfig = autokernel.kconfig.load_kconfig(args.kernel_dir)
# Apply autokernel configuration
apply_autokernel_config(args, kconfig, config)
# Write configuration to file
kconfig.write_config(
filename=args.output,
header=generated_by_autokernel_header(),
save_old=False)
log.info("Configuration written to '{}'".format(args.output))
def clean_kernel_dir(args):
"""
Clean the kernel tree (call make distclean)
"""
try:
subprocess.run(['make', 'distclean'], cwd=args.kernel_dir, check=True)
except subprocess.CalledProcessError as e:
log.die("'make distclean' failed in {} with code {}".format(args.kernel_dir, e.returncode))
def build_kernel(args):
"""
Build the kernel (call make)
"""
try:
subprocess.run(['make'], cwd=args.kernel_dir, check=True)
except subprocess.CalledProcessError as e:
log.die("'make' failed in {} with code {}".format(args.kernel_dir, e.returncode))
def build_initramfs(args, config, modules_prefix, initramfs_output):
log.info("Building initramfs")
def _replace_vars(args, p):
p = replace_common_vars(args, p)
if '{MODULES_PREFIX}' in p:
if modules_prefix is None:
log.die(f"A variable used {{MODULES_PREFIX}}, but kernel module support is disabled!")
p = p.replace('{MODULES_PREFIX}', modules_prefix)
p = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.